File size: 7,904 Bytes
4949db9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 | """Prompt templates and configuration for VLM evaluation and judge training.
Supports loading prompt configs from YAML files for A/B comparison.
Default config loaded at import for backward compatibility.
Usage:
from evals.prompts import PromptConfig
cfg = PromptConfig.load("subq+human.yaml")
prompt = cfg.build_eval_prompt(caption, "SA")
"""
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Any
import yaml
from evals.physics_criteria import SUB_QUESTIONS, SubQuestion
_PROMPTS_DIR = Path(__file__).parent
# ──────────────────────────────────────────────────────────────────────────────
# PromptConfig
# ──────────────────────────────────────────────────────────────────────────────
@dataclass
class PromptConfig:
"""A prompt template set loaded from YAML."""
name: str
scheme: str
system_prompt: str
general_keys: list[str]
eval_prompts: dict[str, str]
training_prompts: dict[str, str]
physical_template: str
physical_sub_questions: bool = False
sub_questions: dict[str, str] | None = None
@classmethod
def load(cls, name: str) -> PromptConfig:
"""Load evals/prompts/{name}; name must be a YAML filename."""
raw_path = Path(name)
if raw_path.name != name or raw_path.suffix != ".yaml":
raise ValueError(
"prompt config must be a YAML filename under evals/prompts, "
f"got {name!r}"
)
path = _PROMPTS_DIR / name
with open(path) as f:
data = yaml.safe_load(f)
return cls._from_dict(data)
@classmethod
def _from_dict(cls, data: dict[str, Any]) -> PromptConfig:
def _resolve(raw: str | dict) -> str:
if isinstance(raw, str):
return raw
template = raw["template"]
considerations = raw.get("considerations")
if considerations and "{considerations}" in template:
block = "\n".join(
f"{i}. {c}" for i, c in enumerate(considerations, 1)
)
template = template.replace("{considerations}", block)
return template
return cls(
name=data.get("name", ""),
scheme=data.get("scheme", "plain"),
system_prompt=data.get(
"system_prompt", "You are a strict video evaluation model."
),
general_keys=data.get("general_keys", ["SA", "PTV", "persistence"]),
eval_prompts={
k: _resolve(v) for k, v in data.get("eval_prompts", {}).items()
},
training_prompts={
k: _resolve(v) for k, v in data.get("training_prompts", {}).items()
},
physical_template=data.get("physical_template", data.get("physical_training_template", "")),
physical_sub_questions=bool(data.get("physical_sub_questions", False)),
sub_questions=data.get("sub_questions"),
)
@property
def _answer_format(self) -> str | None:
if self.sub_questions:
return self.sub_questions.get("answer_format")
return None
def build_eval_prompt(
self, prompt_text: str, metric: str, *, answers_block: str = "",
) -> str:
questions_block, question_keys_str = build_general_questions_block(
metric, self._answer_format,
)
return self.eval_prompts[metric].format(
prompt=prompt_text,
questions_block=questions_block,
question_keys_str=question_keys_str,
answers_block=answers_block,
)
def build_training_prompt(
self, prompt_text: str, dim: str, *, answers_block: str = "",
) -> str:
prompts = self.training_prompts or self.eval_prompts
questions_block, question_keys_str = build_general_questions_block(
dim, self._answer_format,
)
return prompts[dim].format(
prompt=prompt_text,
questions_block=questions_block,
question_keys_str=question_keys_str,
answers_block=answers_block,
)
def build_physical_prompt(
self, prompt_text: str, law: str, criteria: str,
*, answers_block: str = "",
) -> str:
if self.physical_sub_questions:
questions_block, question_keys_str = build_physical_questions_block(
law, criteria, self._answer_format,
)
else:
questions_block = ""
question_keys_str = ""
return self.physical_template.format(
prompt=prompt_text,
law=law,
criteria=criteria,
questions_block=questions_block,
question_keys_str=question_keys_str,
answers_block=answers_block,
)
# ──────────────────────────────────────────────────────────────────────────────
# Scoring dimension keys
# ──────────────────────────────────────────────────────────────────────────────
GENERAL_KEYS = ["SA", "PTV", "persistence"]
GENERAL_DIMS = GENERAL_KEYS
GENERAL_SUB_QUESTIONS: dict[str, list[str]] = {
"SA": [
"Are the main objects in the caption present in the video?",
"Are the key actions or interactions from the caption visible?",
"Are important scene attributes and relationships preserved?",
"Does the video avoid major contradictions to the caption?",
],
"PTV": [
"Do causes appear before their effects?",
"Do physical events unfold in a plausible temporal order?",
"Are motion transitions continuous rather than abrupt jumps or loops?",
"Does the sequence avoid impossible reversals or repeated resets?",
],
"persistence": [
"Do objects maintain consistent existence throughout the video?",
"Do objects keep a stable shape, size, color, and texture?",
"Do objects avoid disappearing, appearing, or transforming unexpectedly?",
"Do objects preserve identity through motion and brief occlusion?",
],
}
_ANSWER_FORMAT_SUFFIX = {
"answer": "Answer each with yes/no/uncertain.",
}
def _format_questions_block(
questions: list[str], answer_format: str | None = None,
) -> tuple[str, str]:
keys: list[str] = []
lines: list[str] = []
for i, question in enumerate(questions, 1):
key = f"q{i}"
keys.append(key)
lines.append(f"{key}: {question}")
suffix = _ANSWER_FORMAT_SUFFIX.get(answer_format or "")
if suffix:
lines.append(suffix)
return "\n".join(lines), ", ".join(keys)
def build_general_questions_block(
dim: str, answer_format: str | None = None,
) -> tuple[str, str]:
questions = GENERAL_SUB_QUESTIONS.get(dim)
if not questions:
questions = [f"Does the video satisfy the {dim} criterion?"]
return _format_questions_block(questions, answer_format)
def build_physical_questions_block(
law: str, criteria: str, answer_format: str | None = None,
) -> tuple[str, str]:
sub_qs = SUB_QUESTIONS.get(law)
if not sub_qs:
sub_qs = [SubQuestion(f"{law}_q1", criteria, violation="no")]
return _format_questions_block([sq.question for sq in sub_qs], answer_format) |