code-202604_v2 / data_gen /build_dpo_dataset.py
Rakancorle11's picture
Snapshot 20260424-2142
0a4deb9 verified
#!/usr/bin/env python3
from __future__ import annotations
import argparse
import asyncio
import json
import os
import random
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Optional, Set, Tuple
from openai import AsyncOpenAI
PROMPT_CHOICES = [
"Are the audio and video tracks synchronized in this video?",
"Identify the exact timing of the visual impact and its corresponding sound. Do they align?",
"Is there any noticeable audio delay or temporal manipulation in this clip?",
]
RESPONSE_KEYS = [
"resp_sample1_correct",
"resp_sample1_hallucinated",
"resp_sample2_correct",
"resp_sample2_hallucinated",
"resp_sample3_correct",
"resp_sample3_hallucinated",
]
def parse_args() -> argparse.Namespace:
p = argparse.ArgumentParser(
description="Build KTO preference dataset from original + shifted videos with GPT-5.4 text generation."
)
p.add_argument(
"--labels-jsonl",
type=Path,
default=Path("/home/ubuntu/video_source/gemini_label_result/timestamps_results_normalized.jsonl"),
)
p.add_argument(
"--shift-meta-jsonl",
type=Path,
default=Path("/home/ubuntu/video_source/random_shift_video/shift_metadata.jsonl"),
)
p.add_argument(
"--output-jsonl",
type=Path,
default=Path("/home/ubuntu/video_source/kto_training_data.jsonl"),
)
p.add_argument("--model", type=str, default="gpt-5.4")
p.add_argument("--max-concurrency", type=int, default=8)
p.add_argument("--max-retries", type=int, default=5)
p.add_argument("--base-backoff-sec", type=float, default=2.0)
p.add_argument("--seed", type=int, default=42)
p.add_argument("--ffprobe-timeout-sec", type=int, default=30)
return p.parse_args()
def read_jsonl(path: Path) -> List[Dict[str, Any]]:
out: List[Dict[str, Any]] = []
with path.open("r", encoding="utf-8") as f:
for line in f:
s = line.strip()
if not s:
continue
out.append(json.loads(s))
return out
def append_jsonl(path: Path, row: Dict[str, Any]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
with path.open("a", encoding="utf-8") as f:
f.write(json.dumps(row, ensure_ascii=False) + "\n")
def original_rel_from_label_row(row: Dict[str, Any]) -> str:
# Convert absolute original path to relative path under /home/ubuntu/video_source/original.
video_path = Path(str(row["video_path"]))
parts = list(video_path.parts)
if "original" in parts:
idx = parts.index("original")
return str(Path(*parts[idx + 1 :]))
return str(video_path.name)
def build_annotations_map(rows: List[Dict[str, Any]]) -> Dict[str, Dict[str, Any]]:
out: Dict[str, Dict[str, Any]] = {}
for r in rows:
rel = original_rel_from_label_row(r)
out[rel] = r
return out
def build_shift_map(rows: List[Dict[str, Any]]) -> Dict[str, Dict[str, Dict[str, Any]]]:
out: Dict[str, Dict[str, Dict[str, Any]]] = defaultdict(dict)
for r in rows:
orig = str(r.get("original_video", "")).strip()
direction = str(r.get("shift_direction", "")).strip()
if not orig or direction not in {"delayed", "early"}:
continue
# Keep first occurrence to avoid duplicates in metadata.
if direction not in out[orig]:
out[orig][direction] = r
return out
async def ffprobe_duration_seconds(video_path: str, timeout_sec: int) -> float:
proc = await asyncio.create_subprocess_exec(
"ffprobe",
"-v",
"error",
"-show_entries",
"format=duration",
"-of",
"default=noprint_wrappers=1:nokey=1",
video_path,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
try:
stdout, stderr = await asyncio.wait_for(proc.communicate(), timeout=float(timeout_sec))
except asyncio.TimeoutError:
proc.kill()
await proc.wait()
raise RuntimeError(f"ffprobe timeout: {video_path}")
if proc.returncode != 0:
raise RuntimeError(f"ffprobe failed for {video_path}: {(stderr or b'').decode(errors='ignore')[-200:]}")
txt = (stdout or b"").decode().strip()
return float(txt)
def make_system_prompt() -> str:
return (
"You generate ONLY JSON. Return EXACTLY six string fields and no extras: "
+ ", ".join(RESPONSE_KEYS)
+ ". "
"Do not perform arithmetic beyond using provided facts. Python already computed all numbers. "
"Important style rule: the three clips are anonymous. "
"Never infer or mention dataset labels/split names/source names or paths. "
"Do not use words such as original, delayed, early, manipulated, shifted."
)
def make_user_payload(facts: Dict[str, Any]) -> str:
return (
"Facts (ground truth computed by Python):\n"
+ json.dumps(facts, ensure_ascii=False, indent=2)
+ "\n\nInstruction:\n"
"Return a JSON object with exactly these 6 string values:\n"
"1) resp_sample1_correct: confirms synchronization for sample_1. Integrate event_v and event_a. "
"If sample1_gap <= 0.4s, explicitly mention negligible natural tolerance of sample1_gap.\n"
"2) resp_sample1_hallucinated: incorrectly claims a noticeable mismatch for sample_1.\n"
"3) resp_sample2_correct: expert explanation for sample_2, integrate event_v/event_a, "
"must cite t_visual and t_audio_sample2, and use natural estimation language with sample2_gap.\n"
"4) resp_sample2_hallucinated: incorrectly claims sample_2 is perfectly synchronized at t_visual.\n"
"5) resp_sample3_correct: expert explanation for sample_3, integrate event_v/event_a, "
"must cite t_visual and t_audio_sample3, and use natural estimation language with sample3_gap.\n"
"6) resp_sample3_hallucinated: incorrectly claims sample_3 is perfectly synchronized.\n"
"Hard constraint: clip labels are anonymous. Do not use words such as original/delayed/early/manipulated.\n"
"All six values must be concise, natural, and label-neutral."
)
async def call_llm_with_backoff(
client: AsyncOpenAI,
model: str,
facts: Dict[str, Any],
max_retries: int,
base_backoff_sec: float,
) -> Dict[str, str]:
system_prompt = make_system_prompt()
user_prompt = make_user_payload(facts)
last_exc: Optional[Exception] = None
for attempt in range(1, max_retries + 1):
try:
resp = await client.chat.completions.create(
model=model,
response_format={"type": "json_object"},
messages=[
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt},
],
)
txt = (resp.choices[0].message.content or "").strip()
obj = json.loads(txt)
missing = [k for k in RESPONSE_KEYS if k not in obj or not isinstance(obj[k], str)]
if missing:
raise ValueError(f"Missing/invalid response keys: {missing}")
return {k: obj[k] for k in RESPONSE_KEYS}
except Exception as exc:
last_exc = exc
if attempt == max_retries:
break
wait_s = base_backoff_sec * (2 ** (attempt - 1))
await asyncio.sleep(wait_s)
raise RuntimeError(f"LLM generation failed after retries: {last_exc}")
def build_existing_completion_state(
output_jsonl: Path,
shifted_to_original: Dict[str, str],
) -> Dict[str, Set[str]]:
"""
Returns mapping: original_rel -> set of row types already written {"orig","delayed","early"}.
"""
state: Dict[str, Set[str]] = defaultdict(set)
if not output_jsonl.exists():
return state
with output_jsonl.open("r", encoding="utf-8") as f:
for line in f:
s = line.strip()
if not s:
continue
try:
row = json.loads(s)
except json.JSONDecodeError:
continue
video = str(row.get("video", "")).strip()
if not video:
continue
# Infer row type and original key.
if video in shifted_to_original:
orig = shifted_to_original[video]
if "_delay_" in Path(video).name:
state[orig].add("delayed")
elif "_early_" in Path(video).name:
state[orig].add("early")
else:
# Treat as original row if it looks like an original relative path.
state[video].add("orig")
return state
async def process_one_original(
original_rel: str,
ann_row: Dict[str, Any],
shift_row_delayed: Dict[str, Any],
shift_row_early: Dict[str, Any],
args: argparse.Namespace,
client: AsyncOpenAI,
sem: asyncio.Semaphore,
write_lock: asyncio.Lock,
completion_state: Dict[str, Set[str]],
prompt_rng: random.Random,
counters: Dict[str, int],
) -> None:
existing_types = completion_state.get(original_rel, set())
if existing_types == {"orig", "delayed", "early"}:
counters["skipped_completed"] += 1
return
async with sem:
original_video_path = str(ann_row["video_path"])
video_duration = await ffprobe_duration_seconds(original_video_path, args.ffprobe_timeout_sec)
t_visual = float(ann_row["t_visual"])
t_audio = float(ann_row["t_audio"])
event_v = str(ann_row.get("event_description", "")).strip()
event_a = str(ann_row.get("sound_description", "")).strip()
delayed_shift_seconds = float(shift_row_delayed["exact_shift_seconds"])
early_shift_seconds = float(shift_row_early["exact_shift_seconds"])
original_delta = abs(t_visual - t_audio)
t_audio_delayed = min(t_audio + delayed_shift_seconds, video_duration)
t_audio_early = max(t_audio - early_shift_seconds, 0.0)
true_delayed_gap = round(abs(t_audio_delayed - t_visual), 2)
true_early_gap = round(abs(t_audio_early - t_visual), 2)
facts = {
"video_duration": round(video_duration, 3),
"t_visual": round(t_visual, 3),
"t_audio_sample1": round(t_audio, 3),
"event_v": event_v,
"event_a": event_a,
"shift_seconds_sample2": round(delayed_shift_seconds, 6),
"shift_seconds_sample3": round(early_shift_seconds, 6),
"t_audio_sample2": round(t_audio_delayed, 3),
"t_audio_sample3": round(t_audio_early, 3),
"sample1_gap": round(original_delta, 3),
"sample2_gap": true_delayed_gap,
"sample3_gap": true_early_gap,
}
texts = await call_llm_with_backoff(
client=client,
model=args.model,
facts=facts,
max_retries=args.max_retries,
base_backoff_sec=args.base_backoff_sec,
)
selected_prompt = prompt_rng.choice(PROMPT_CHOICES)
rows = [
{
"video": original_rel,
"prompt": selected_prompt,
"chosen": texts["resp_sample1_correct"],
"rejected": texts["resp_sample1_hallucinated"],
},
{
"video": str(shift_row_delayed["shifted_video_path"]),
"prompt": selected_prompt,
"chosen": texts["resp_sample2_correct"],
"rejected": texts["resp_sample2_hallucinated"],
},
{
"video": str(shift_row_early["shifted_video_path"]),
"prompt": selected_prompt,
"chosen": texts["resp_sample3_correct"],
"rejected": texts["resp_sample3_hallucinated"],
},
]
missing_rows: List[Dict[str, Any]] = []
if "orig" not in existing_types:
missing_rows.append(rows[0])
if "delayed" not in existing_types:
missing_rows.append(rows[1])
if "early" not in existing_types:
missing_rows.append(rows[2])
async with write_lock:
for r in missing_rows:
append_jsonl(args.output_jsonl, r)
completion_state[original_rel] = {"orig", "delayed", "early"}
counters["processed_originals"] += 1
counters["written_rows"] += len(missing_rows)
async def main_async() -> None:
args = parse_args()
random.seed(args.seed)
if not os.getenv("OPENAI_API_KEY"):
raise RuntimeError("OPENAI_API_KEY is required.")
client = AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
labels = read_jsonl(args.labels_jsonl)
shift_meta = read_jsonl(args.shift_meta_jsonl)
ann_map = build_annotations_map(labels)
shift_map = build_shift_map(shift_meta)
# Build reverse map for resume detection from existing output rows.
shifted_to_original: Dict[str, str] = {}
for orig, both in shift_map.items():
for direction in ("delayed", "early"):
item = both.get(direction)
if item:
shifted_to_original[str(item["shifted_video_path"])] = orig
completion_state = build_existing_completion_state(args.output_jsonl, shifted_to_original)
# Process only originals present in labels (1420) and that are included in shift set (1589 superset).
candidates: List[Tuple[str, Dict[str, Any], Dict[str, Any], Dict[str, Any]]] = []
missing_shift = 0
for original_rel, ann_row in ann_map.items():
sides = shift_map.get(original_rel, {})
delayed = sides.get("delayed")
early = sides.get("early")
if delayed is None or early is None:
missing_shift += 1
continue
candidates.append((original_rel, ann_row, delayed, early))
candidates.sort(key=lambda x: x[0])
sem = asyncio.Semaphore(max(1, args.max_concurrency))
write_lock = asyncio.Lock()
prompt_rng = random.Random(args.seed)
counters = {
"processed_originals": 0,
"written_rows": 0,
"skipped_completed": 0,
"failed_originals": 0,
}
async def runner(item: Tuple[str, Dict[str, Any], Dict[str, Any], Dict[str, Any]]) -> None:
orig, ann, drow, erow = item
try:
await process_one_original(
original_rel=orig,
ann_row=ann,
shift_row_delayed=drow,
shift_row_early=erow,
args=args,
client=client,
sem=sem,
write_lock=write_lock,
completion_state=completion_state,
prompt_rng=prompt_rng,
counters=counters,
)
except Exception as exc:
counters["failed_originals"] += 1
print(f"[error] {orig}: {exc}")
tasks = [asyncio.create_task(runner(it)) for it in candidates]
for i, t in enumerate(asyncio.as_completed(tasks), start=1):
await t
if i % 50 == 0:
print(
f"[progress] done={i}/{len(candidates)} "
f"processed={counters['processed_originals']} "
f"written_rows={counters['written_rows']} "
f"skipped={counters['skipped_completed']} "
f"failed={counters['failed_originals']}"
)
print("========== KTO Build Summary ==========")
print(f"Labels rows : {len(labels)}")
print(f"Unique labeled originals : {len(ann_map)}")
print(f"Unique shifted originals : {len(shift_map)}")
print(f"Labeled but missing shift pairs : {missing_shift}")
print(f"Candidate originals : {len(candidates)}")
print(f"Processed originals : {counters['processed_originals']}")
print(f"Skipped completed originals : {counters['skipped_completed']}")
print(f"Failed originals : {counters['failed_originals']}")
print(f"Rows appended to output : {counters['written_rows']}")
print(f"Output JSONL : {args.output_jsonl}")
print("=======================================")
def main() -> None:
asyncio.run(main_async())
if __name__ == "__main__":
main()