| """ |
| Parse all paragraphs from all *.fb2 files in the input directory, create a Huggingface Dataset and push it |
| to the Hub as `vldsavelyev/murakami`. |
| """ |
|
|
|
|
| import os |
| from pathlib import Path |
| from lxml import etree |
| import datasets |
|
|
| datasets.logging.set_verbosity_info() |
|
|
|
|
| _DESCRIPTION = """\ |
| Russian translations of Murakami novels, to fine-tune a generative language model. Source is FB2 files |
| from http://flibusta.is/a/8570. |
| """ |
|
|
|
|
| class Builder(datasets.GeneratorBasedBuilder): |
| """Murakami novels, translated to Russian.""" |
|
|
| VERSION = datasets.Version("1.1.0") |
|
|
| |
| |
| MIN_CHAPTER_SIZE = 500 |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=datasets.Features({"text": datasets.Value("string")}), |
| ) |
|
|
| def _split_generators(self, dl_manager: datasets.DownloadManager): |
| fb2_dir = dl_manager.download_and_extract("data.zip") |
| fb2_paths = list(Path(fb2_dir).glob("**/*.fb2")) |
| if len(fb2_paths) > 0: |
| print(f"Found {len(fb2_paths)} fb2 files") |
| else: |
| raise ValueError(f"No fb2 files found in {fb2_dir}") |
|
|
| smallest_path = min(fb2_paths, key=os.path.getsize) |
| print(f"Using smallest title as a training example: {smallest_path}") |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| |
| gen_kwargs={ |
| "filepaths": [p for p in fb2_paths if p != smallest_path], |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| |
| gen_kwargs={ |
| "filepaths": [smallest_path], |
| }, |
| ), |
| ] |
|
|
| def _generate_examples(self, filepaths): |
| for fileidx, filepath in enumerate(filepaths): |
| title, chapters = self._extract_text_from_fb2(filepath, fileidx) |
| for i, chapter in enumerate(chapters): |
| yield f"{title} {i}", {"text": chapter} |
|
|
| @staticmethod |
| def _extract_text_from_fb2(filepath: Path, fileidx: int) -> tuple[str, list[str]]: |
| """ |
| Parse a FB2 file and return book chapters, along with the book title. |
| """ |
| |
| with filepath.open("rb") as file: |
| fb2_data = file.read() |
|
|
| |
| root = etree.fromstring(fb2_data) |
|
|
| |
| title = root.xpath( |
| "//fb:title-info/fb:book-title", |
| namespaces={"fb": "http://www.gribuser.ru/xml/fictionbook/2.0"}, |
| )[0].text |
|
|
| |
| |
| |
|
|
| |
| |
| chapters: list[str] = [] |
|
|
| def _add_chapter(text: str): |
| if not text: |
| return |
| if ( |
| Builder.MIN_CHAPTER_SIZE is not None |
| and len(text) < Builder.MIN_CHAPTER_SIZE |
| ): |
| |
| pass |
| else: |
| |
| chapters.append(text) |
|
|
| chapter = "" |
| for e in root.iter(): |
| if e.tag.endswith("}p"): |
| chapter += (e.text or "") + (e.tail or "") |
| elif e.tag.endswith("}section"): |
| _add_chapter(chapter) |
| chapter = "" |
| _add_chapter(chapter) |
|
|
| print(f'{filepath}: "{title}", found {len(chapters)} chapters') |
| |
| |
| return title, chapters |
|
|