| from copy import deepcopy |
| import pathlib |
| import srt |
| import typer |
| import re |
| import orjson |
|
|
| app = typer.Typer() |
|
|
| alphabets = "([A-Za-z])" |
| prefixes = "(Mr|St|Mrs|Ms|Dr)[.]" |
| suffixes = "(Inc|Ltd|Jr|Sr|Co)" |
| starters = r"(Mr|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)" |
| acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)" |
| websites = "[.](com|net|org|io|gov|edu|me)" |
| digits = "([0-9])" |
| multiple_dots = r"\.{2,}" |
|
|
|
|
| def split_into_sentences(text: str) -> list[str]: |
| """ |
| Split the text into sentences. |
| |
| If the text contains substrings "<prd>" or "<stop>", they would lead |
| to incorrect splitting because they are used as markers for splitting. |
| |
| :param text: text to be split into sentences |
| :type text: str |
| |
| :return: list of sentences |
| :rtype: list[str] |
| """ |
| text = " " + text + " " |
| text = text.replace("\n", " ") |
| text = re.sub(prefixes, "\\1<prd>", text) |
| text = re.sub(websites, "<prd>\\1", text) |
| text = re.sub(digits + "[.]" + digits, "\\1<prd>\\2", text) |
| text = re.sub( |
| multiple_dots, lambda match: "<prd>" * len(match.group(0)) + "<stop>", text |
| ) |
| if "Ph.D" in text: |
| text = text.replace("Ph.D.", "Ph<prd>D<prd>") |
| text = re.sub(r"\s" + alphabets + "[.] ", " \\1<prd> ", text) |
| text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text) |
| text = re.sub( |
| alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]", |
| "\\1<prd>\\2<prd>\\3<prd>", |
| text, |
| ) |
| text = re.sub(alphabets + "[.]" + alphabets + "[.]", "\\1<prd>\\2<prd>", text) |
| text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text) |
| text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text) |
| text = re.sub(" " + alphabets + "[.]", " \\1<prd>", text) |
| if "”" in text: |
| text = text.replace(".”", "”.") |
| if '"' in text: |
| text = text.replace('."', '".') |
| if "!" in text: |
| text = text.replace('!"', '"!') |
| if "?" in text: |
| text = text.replace('?"', '"?') |
| text = text.replace(".", ".<stop>") |
| text = text.replace("?", "?<stop>") |
| text = text.replace("!", "!<stop>") |
| text = text.replace("<prd>", ".") |
| sentences = text.split("<stop>") |
| sentences = [s.strip() for s in sentences] |
| if sentences and not sentences[-1]: |
| sentences = sentences[:-1] |
| return sentences |
|
|
|
|
| @app.command() |
| def srt_folder(folder: pathlib.Path, output_file: pathlib.Path): |
| with open(output_file,"wb") as f: |
| for file in folder.rglob("*.srt"): |
| if "(576p" in file.stem: |
| things_string = "_".join(file.stem.split("_")[:-1]).split("-")[1].split("(576p")[0] |
| elif "(1080p" in file.stem: |
| things_string = "_".join(file.stem.split("_")[:-1]).split("-")[1].split("(1080p")[0] |
| else: |
| print(file.stem,"Missing trailing?") |
| things = [i.strip() for i in things_string.split(";")] |
| dict_content = srt_file(file, None, as_dict=True) |
| dict_content["meta"]["things"] = things |
| del dict_content["text"] |
| dict_content["text"] = dict_content["meta"]["list_sentences"] |
| del dict_content["meta"]["list_sentences"] |
| f.write(orjson.dumps(dict_content) + b"\n") |
|
|
|
|
| @app.command() |
| def srt_file(file: pathlib.Path, output_file: pathlib.Path, as_dict: bool = False): |
| data = file.read_text(encoding="utf-8") |
| sub_lines = list(srt.parse(data)) |
| raw_content = "" |
| for sub in sub_lines: |
| sub_content = sub.content.lower() |
| if "captions by" in sub_content: |
| continue |
| if "captions paid for" in sub_content: |
| continue |
| if sub_content.startswith("narrator"): |
| sub_content = sub_content.split("narrator:")[1].strip() |
| |
| if sub_content.startswith(">> narrator:"): |
| sub_content = sub_content.split(">> narrator:")[1].strip() |
| |
| if sub_content.startswith(">>"): |
| sub_content = sub_content[2:].strip() |
| raw_content += sub_content.replace("\\N", " ").replace(" ", " ") + " " |
| raw_content = raw_content.replace(" --", "-- ").replace("♪","").replace(" ", " ") |
| sents = split_into_sentences(raw_content) |
| sents = [s[0].upper() + s[1:] for s in sents] |
| z = len(sents) |
| for sent in deepcopy(sents): |
| sent = re.sub(r"\[ .*? \]","",sent).strip() |
| if not sent.strip("."): |
| continue |
| if "have any comments about the show" in sent: |
| continue |
| if "have any comments," in sent: |
| continue |
| if "have any questions about the show" in sent: |
| continue |
| if "drop us a line at" in sent: |
| continue |
| else: |
| sents.append(sent) |
| sents = sents[z:] |
| |
| if as_dict: |
| return {"text": " ".join(sents), "meta": {"list_sentences": sents}} |
| output_file.write_bytes( |
| orjson.dumps({"text": " ".join(sents), "meta": {"list_sentences": sents}}) |
| ) |
|
|
|
|
| if __name__ == "__main__": |
| app() |
|
|