Spaces:
Sleeping
Sleeping
File size: 9,732 Bytes
eda316b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 | """Map source-timeline ASR words to per-clip subtitle timings (t=0 at clip in-point)."""
from __future__ import annotations
from humeo_core.schemas import Clip, ClipSubtitleWords, RenderTheme, TranscriptWord
# Whisper / WhisperX / OpenAI-normalized segment shapes
_MAX_WORDS_PER_CUE = 8
_MAX_CUE_SEC = 4.0
_PUNCTUATION_BREAK_CHARS = (".", "?", "!", ";", ":")
_SENTENCE_RESTART_WORDS = frozenset(
{
"And",
"But",
"Did",
"Now",
"So",
"That",
"Then",
"This",
"Those",
"What",
"When",
"Where",
"Why",
}
)
def _iter_words_from_segments(transcript: dict) -> list[TranscriptWord]:
out: list[TranscriptWord] = []
for seg in transcript.get("segments", []) or []:
words = seg.get("words") or []
if words:
for raw in words:
w = str(raw.get("word", "")).strip()
if not w:
continue
out.append(
TranscriptWord(
word=w,
start_time=float(raw["start"]),
end_time=float(raw["end"]),
)
)
continue
# Segment-level only (no word list): treat whole segment as one token
text = str(seg.get("text", "")).strip()
if text:
out.append(
TranscriptWord(
word=text,
start_time=float(seg.get("start", 0.0)),
end_time=float(seg.get("end", 0.0)),
)
)
return out
def clip_subtitle_words(transcript: dict, clip: Clip) -> ClipSubtitleWords:
"""Words overlapping ``clip`` with times shifted to start at 0 (clip-local)."""
clip_start = clip.start_time_sec
clip_end = clip.end_time_sec
words = _iter_words_from_segments(transcript)
local: list[TranscriptWord] = []
for w in words:
if w.end_time <= clip_start or w.start_time >= clip_end:
continue
t0 = max(w.start_time, clip_start) - clip_start
t1 = min(w.end_time, clip_end) - clip_start
if t1 <= t0:
continue
local.append(TranscriptWord(word=w.word, start_time=t0, end_time=t1))
if local:
return ClipSubtitleWords(words=local)
return ClipSubtitleWords(words=_fallback_even_words(clip))
def _fallback_even_words(clip: Clip) -> list[TranscriptWord]:
"""Even split over clip duration when no word timestamps exist."""
text = (clip.transcript or "").strip()
if not text:
return []
parts = text.split()
if not parts:
return []
d = clip.duration_sec
step = d / len(parts)
out: list[TranscriptWord] = []
for i, p in enumerate(parts):
out.append(
TranscriptWord(
word=p,
start_time=i * step,
end_time=(i + 1) * step if i < len(parts) - 1 else d,
)
)
return out
def _looks_like_sentence_restart(prev_word: str, next_word: str) -> bool:
prev = prev_word.rstrip("\"')]}")
nxt = next_word.lstrip("\"'([{")
if not prev or not nxt:
return False
if nxt in _SENTENCE_RESTART_WORDS:
return True
return any(ch.isdigit() for ch in prev) and nxt[0].isupper()
def clip_words_to_srt_lines(
words: list[TranscriptWord],
*,
max_words_per_cue: int = _MAX_WORDS_PER_CUE,
max_cue_sec: float = _MAX_CUE_SEC,
prefer_break_on_punctuation: bool = False,
min_words_before_break: int = 1,
) -> list[tuple[float, float, str]]:
"""Group words into SRT cues: max N words and max duration per cue."""
chunks = group_words_to_cue_chunks(
words,
max_words_per_cue=max_words_per_cue,
max_cue_sec=max_cue_sec,
prefer_break_on_punctuation=prefer_break_on_punctuation,
min_words_before_break=min_words_before_break,
)
return [
(chunk[0].start_time, chunk[-1].end_time, " ".join(w.word for w in chunk))
for chunk in chunks
]
def group_words_to_cue_chunks(
words: list[TranscriptWord],
*,
max_words_per_cue: int = _MAX_WORDS_PER_CUE,
max_cue_sec: float = _MAX_CUE_SEC,
prefer_break_on_punctuation: bool = False,
min_words_before_break: int = 1,
) -> list[list[TranscriptWord]]:
"""Group words into timed cue chunks while preserving per-word timings."""
if not words:
return []
max_words_per_cue = max(1, int(max_words_per_cue))
max_cue_sec = max(0.2, float(max_cue_sec))
min_words_before_break = max(1, int(min_words_before_break))
chunks_out: list[list[TranscriptWord]] = []
i = 0
n = len(words)
while i < n:
chunk: list[TranscriptWord] = [words[i]]
t0 = words[i].start_time
end_t = words[i].end_time
j = i + 1
while j < n:
w = words[j]
if len(chunk) >= max_words_per_cue:
break
if w.start_time - t0 > max_cue_sec:
break
if (
prefer_break_on_punctuation
and (len(chunk) >= 2 or end_t - t0 >= 0.45)
and _looks_like_sentence_restart(chunk[-1].word, w.word)
):
break
chunk.append(w)
end_t = w.end_time
j += 1
if (
prefer_break_on_punctuation
and len(chunk) >= min_words_before_break
and chunk[-1].word.rstrip("\"')]}").endswith(_PUNCTUATION_BREAK_CHARS)
):
break
chunks_out.append(chunk)
i = j
return chunks_out
def format_srt(lines: list[tuple[float, float, str]]) -> str:
blocks: list[str] = []
for idx, (start, end, text) in enumerate(lines, start=1):
blocks.append(
f"{idx}\n{_fmt_time(start)} --> {_fmt_time(end)}\n{text}\n"
)
return "\n".join(blocks)
def _fmt_time(seconds: float) -> str:
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
secs = int(seconds % 60)
millis = int(round((seconds % 1) * 1000))
if millis >= 1000:
millis = 999
return f"{hours:02d}:{minutes:02d}:{secs:02d},{millis:03d}"
# ---------------------------------------------------------------------------
# ASS / SubStation Alpha output (the format libass natively renders)
# ---------------------------------------------------------------------------
def _fmt_ass_time(seconds: float) -> str:
"""ASS time format: ``H:MM:SS.cs`` (centiseconds)."""
seconds = max(0.0, seconds)
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
secs = seconds % 60
whole = int(secs)
cs = int(round((secs - whole) * 100))
if cs >= 100:
cs = 99
return f"{hours:d}:{minutes:02d}:{whole:02d}.{cs:02d}"
def _escape_ass_text(text: str) -> str:
"""Escape characters that are significant to the ASS dialogue parser."""
return (
text.replace("\\", r"\\")
.replace("{", r"\{")
.replace("}", r"\}")
.replace("\n", r"\N")
)
def format_ass(
lines: list[tuple[float, float, str]],
*,
play_res_x: int,
play_res_y: int,
font_size: int,
margin_v: int,
margin_h: int = 60,
font_name: str = "Arial",
render_theme: RenderTheme = RenderTheme.LEGACY,
) -> str:
"""Render captions as an ASS script whose PlayRes matches the output video.
Why this exists: libass' font/margin scaling multiplies every pixel-ish
value by ``video_height / PlayResY``. The default ``PlayResY=288`` blew
``FontSize=48`` up to ~320 output pixels and pushed ``MarginV`` to the
middle of the frame. Pinning ``PlayResY`` to the actual output height
makes that scale factor exactly 1.0, so ``font_size`` and ``margin_v``
below are honest output pixel values.
"""
if render_theme == RenderTheme.REFERENCE_LOWER_THIRD:
style_line = (
f"Style: Default,{font_name},{font_size},&H00FFFFFF,&H000000FF,"
"&H00000000,&H00000000,-1,0,0,0,100,100,-1,0,1,3,0,2,"
f"{margin_h},{margin_h},{margin_v},0\n"
)
else:
style_line = (
f"Style: Default,{font_name},{font_size},&H00FFFFFF,&H000000FF,"
f"&H00000000,&H70000000,-1,0,0,0,100,100,0,0,4,0,0,2,"
f"{margin_h},{margin_h},{margin_v},0\n"
)
header = (
"[Script Info]\n"
"ScriptType: v4.00+\n"
f"PlayResX: {play_res_x}\n"
f"PlayResY: {play_res_y}\n"
"WrapStyle: 0\n"
"ScaledBorderAndShadow: yes\n"
"YCbCr Matrix: None\n"
"\n"
"[V4+ Styles]\n"
"Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, "
"OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, "
"ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, "
"Alignment, MarginL, MarginR, MarginV, Encoding\n"
+ style_line +
"\n"
"[Events]\n"
"Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n"
)
events = []
for start, end, text in lines:
events.append(
f"Dialogue: 0,{_fmt_ass_time(start)},{_fmt_ass_time(end)},Default,,"
f"0,0,0,,{_escape_ass_text(text)}"
)
return header + "\n".join(events) + ("\n" if events else "")
|