Spaces:
Sleeping
Sleeping
File size: 7,407 Bytes
67ce4ed | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 | """Map source-timeline ASR words to per-clip subtitle timings (t=0 at clip in-point)."""
from __future__ import annotations
from humeo_core.schemas import Clip, ClipSubtitleWords, TranscriptWord
# Whisper / WhisperX / OpenAI-normalized segment shapes
_MAX_WORDS_PER_CUE = 8
_MAX_CUE_SEC = 4.0
def _iter_words_from_segments(transcript: dict) -> list[TranscriptWord]:
out: list[TranscriptWord] = []
for seg in transcript.get("segments", []) or []:
words = seg.get("words") or []
if words:
for raw in words:
w = str(raw.get("word", "")).strip()
if not w:
continue
out.append(
TranscriptWord(
word=w,
start_time=float(raw["start"]),
end_time=float(raw["end"]),
)
)
continue
# Segment-level only (no word list): treat whole segment as one token
text = str(seg.get("text", "")).strip()
if text:
out.append(
TranscriptWord(
word=text,
start_time=float(seg.get("start", 0.0)),
end_time=float(seg.get("end", 0.0)),
)
)
return out
def clip_subtitle_words(transcript: dict, clip: Clip) -> ClipSubtitleWords:
"""Words overlapping ``clip`` with times shifted to start at 0 (clip-local)."""
clip_start = clip.start_time_sec
clip_end = clip.end_time_sec
words = _iter_words_from_segments(transcript)
local: list[TranscriptWord] = []
for w in words:
if w.end_time <= clip_start or w.start_time >= clip_end:
continue
t0 = max(w.start_time, clip_start) - clip_start
t1 = min(w.end_time, clip_end) - clip_start
if t1 <= t0:
continue
local.append(TranscriptWord(word=w.word, start_time=t0, end_time=t1))
if local:
return ClipSubtitleWords(words=local)
return ClipSubtitleWords(words=_fallback_even_words(clip))
def _fallback_even_words(clip: Clip) -> list[TranscriptWord]:
"""Even split over clip duration when no word timestamps exist."""
text = (clip.transcript or "").strip()
if not text:
return []
parts = text.split()
if not parts:
return []
d = clip.duration_sec
step = d / len(parts)
out: list[TranscriptWord] = []
for i, p in enumerate(parts):
out.append(
TranscriptWord(
word=p,
start_time=i * step,
end_time=(i + 1) * step if i < len(parts) - 1 else d,
)
)
return out
def clip_words_to_srt_lines(
words: list[TranscriptWord],
*,
max_words_per_cue: int = _MAX_WORDS_PER_CUE,
max_cue_sec: float = _MAX_CUE_SEC,
) -> list[tuple[float, float, str]]:
"""Group words into SRT cues: max N words and max duration per cue."""
if not words:
return []
max_words_per_cue = max(1, int(max_words_per_cue))
max_cue_sec = max(0.2, float(max_cue_sec))
lines: list[tuple[float, float, str]] = []
i = 0
n = len(words)
while i < n:
chunk: list[TranscriptWord] = [words[i]]
t0 = words[i].start_time
end_t = words[i].end_time
j = i + 1
while j < n:
w = words[j]
if len(chunk) >= max_words_per_cue:
break
if w.start_time - t0 > max_cue_sec:
break
chunk.append(w)
end_t = w.end_time
j += 1
text = " ".join(w.word for w in chunk)
lines.append((t0, end_t, text))
i = j
return lines
def format_srt(lines: list[tuple[float, float, str]]) -> str:
blocks: list[str] = []
for idx, (start, end, text) in enumerate(lines, start=1):
blocks.append(
f"{idx}\n{_fmt_time(start)} --> {_fmt_time(end)}\n{text}\n"
)
return "\n".join(blocks)
def _fmt_time(seconds: float) -> str:
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
secs = int(seconds % 60)
millis = int(round((seconds % 1) * 1000))
if millis >= 1000:
millis = 999
return f"{hours:02d}:{minutes:02d}:{secs:02d},{millis:03d}"
# ---------------------------------------------------------------------------
# ASS / SubStation Alpha output (the format libass natively renders)
# ---------------------------------------------------------------------------
def _fmt_ass_time(seconds: float) -> str:
"""ASS time format: ``H:MM:SS.cs`` (centiseconds)."""
seconds = max(0.0, seconds)
hours = int(seconds // 3600)
minutes = int((seconds % 3600) // 60)
secs = seconds % 60
whole = int(secs)
cs = int(round((secs - whole) * 100))
if cs >= 100:
cs = 99
return f"{hours:d}:{minutes:02d}:{whole:02d}.{cs:02d}"
def _escape_ass_text(text: str) -> str:
"""Escape characters that are significant to the ASS dialogue parser."""
return (
text.replace("\\", r"\\")
.replace("{", r"\{")
.replace("}", r"\}")
.replace("\n", r"\N")
)
def format_ass(
lines: list[tuple[float, float, str]],
*,
play_res_x: int,
play_res_y: int,
font_size: int,
margin_v: int,
margin_h: int = 60,
font_name: str = "Arial",
) -> str:
"""Render captions as an ASS script whose PlayRes matches the output video.
Why this exists: libass' font/margin scaling multiplies every pixel-ish
value by ``video_height / PlayResY``. The default ``PlayResY=288`` blew
``FontSize=48`` up to ~320 output pixels and pushed ``MarginV`` to the
middle of the frame. Pinning ``PlayResY`` to the actual output height
makes that scale factor exactly 1.0, so ``font_size`` and ``margin_v``
below are honest output pixel values.
"""
header = (
"[Script Info]\n"
"ScriptType: v4.00+\n"
f"PlayResX: {play_res_x}\n"
f"PlayResY: {play_res_y}\n"
"WrapStyle: 0\n"
"ScaledBorderAndShadow: yes\n"
"YCbCr Matrix: None\n"
"\n"
"[V4+ Styles]\n"
"Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, "
"OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, "
"ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, "
"Alignment, MarginL, MarginR, MarginV, Encoding\n"
# Bold=-1, Italic=0, ScaleX/Y=100, BorderStyle=4 (opaque box),
# Outline=0, Shadow=0, Alignment=2 (bottom-center).
f"Style: Default,{font_name},{font_size},&H00FFFFFF,&H000000FF,"
f"&H00000000,&H70000000,-1,0,0,0,100,100,0,0,4,0,0,2,"
f"{margin_h},{margin_h},{margin_v},0\n"
"\n"
"[Events]\n"
"Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n"
)
events = []
for start, end, text in lines:
events.append(
f"Dialogue: 0,{_fmt_ass_time(start)},{_fmt_ass_time(end)},Default,,"
f"0,0,0,,{_escape_ass_text(text)}"
)
return header + "\n".join(events) + ("\n" if events else "")
|