| import json |
| import gzip |
| import os |
| from pathlib import Path |
| import re |
| from time import sleep |
|
|
| import datasets |
| import numpy as np |
| from tqdm import tqdm |
| import requests |
|
|
| logger = datasets.logging.get_logger(__name__) |
|
|
| _DESCRIPTION = """\ |
| Libriheavy is a labeled version of Librilight. |
| This (unofficial) huggingface dataset contains the medium (4500 hours) split of the Libriheavy dataset with alignments and mel spectrograms. |
| """ |
|
|
| _URL = """\ |
| https://github.com/k2-fsa/libriheavy |
| """ |
|
|
| _CITATION = """\ |
| @article{kang2023libriheavy, |
| title={Libriheavy: a 50,000 hours asr corpus with punctuation casing and context}, |
| author={Kang, Wei and Yang, Xiaoyu and Yao, Zengwei and Kuang, Fangjun and Yang, Yifan and Guo, Liyong and Lin, Long and Povey, Daniel}, |
| journal={arXiv preprint arXiv:2309.08105}, |
| year={2023} |
| } |
| """ |
|
|
| PATH = "./medium_data" |
|
|
| class LibriheavyConfig(datasets.BuilderConfig): |
| """BuilderConfig for Libriheavy.""" |
|
|
| def __init__(self, **kwargs): |
| """BuilderConfig for Libriheavy. |
| Args: |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super(LibriheavyConfig, self).__init__(**kwargs) |
|
|
|
|
| class Libriheavy(datasets.GeneratorBasedBuilder): |
| """Libriheavy dataset.""" |
|
|
| BUILDER_CONFIGS = [ |
| LibriheavyConfig(name="libriheavy", version=datasets.Version("1.0.0"), description="Libriheavy dataset."), |
| ] |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "id": datasets.Value("string"), |
| "speaker_id": datasets.Value("string"), |
| "speaker_name": datasets.Value("string"), |
| "speaker_vec": datasets.Sequence(datasets.Value("float32")), |
| "audio": datasets.Value("string"), |
| "text": datasets.Value("string"), |
| "word_segments": datasets.Sequence( |
| { |
| "start": datasets.Value("float32"), |
| "end": datasets.Value("float32"), |
| "word": datasets.Value("string"), |
| } |
| ), |
| "phone_segments": datasets.Sequence( |
| { |
| "start": datasets.Value("float32"), |
| "end": datasets.Value("float32"), |
| "phone": datasets.Value("string"), |
| } |
| ), |
| "mel_spectrogram": datasets.Sequence(datasets.Sequence(datasets.Value("float32"))), |
| "attributes": datasets.Features( |
| { |
| "pitch": datasets.Sequence(datasets.Value("float32")), |
| "energy": datasets.Sequence(datasets.Value("float32")), |
| "snr": datasets.Sequence(datasets.Value("float32")), |
| "srmr": datasets.Sequence(datasets.Value("float32")), |
| } |
| ), |
| "overall_attributes": datasets.Features( |
| { |
| "pitch": datasets.Value("float32"), |
| "energy": datasets.Value("float32"), |
| "snr": datasets.Value("float32"), |
| "srmr": datasets.Value("float32"), |
| } |
| ), |
| } |
| ), |
| supervised_keys=None, |
| homepage=_URL, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| """Returns SplitGenerators.""" |
| |
| speaker_list = f"{PATH}/speaker_list.json" |
| speaker_list = dl_manager.download_and_extract(speaker_list) |
| with open(speaker_list, "r") as f: |
| speaker_list = json.load(f) |
| |
| speaker_metadata = {} |
| for speaker_id, metadata_path in tqdm(speaker_list.items()): |
| hf_home = os.environ.get("HF_HOME", "~/.cache/huggingface") |
| metadata_cache = f"{hf_home}/libriheavy_metadata" |
| |
| if os.path.exists(f"{metadata_cache}/{speaker_id}.json"): |
| with open(f"{metadata_cache}/{speaker_id}.json", "r") as f: |
| speaker_metadata[speaker_id] = json.load(f) |
| else: |
| Path(metadata_cache).mkdir(parents=True, exist_ok=True) |
| metadata_path = f"{PATH}/{speaker_id}/{metadata_path}" |
| metadata_path = dl_manager.download_and_extract(metadata_path) |
| with open(metadata_path, "r") as f: |
| speaker_metadata[speaker_id] = json.load(f) |
| try: |
| speaker_name = requests.get(f"https://librivox.org/reader/{speaker_id}").text |
| speaker_name = re.findall("<h1>([^<>]+)</h1>", speaker_name)[0] |
| sleep(0.5) |
| except IndexError: |
| print(f"No name found for speaker with id {speaker_id}") |
| speaker_name = "None" |
| speaker_metadata[speaker_id]["name"] = speaker_name |
| with open(f"{metadata_cache}/{speaker_id}.json", "w") as f: |
| json.dump(speaker_metadata[speaker_id], f) |
|
|
| speaker_chunks = [] |
| even_speaker_chunks = [] |
| odd_speaker_chunks = [] |
| for speaker_id, metadata in speaker_metadata.items(): |
| for chunk_id, chunk in metadata["chunks"].items(): |
| chunk_dict = { |
| "speaker_id": speaker_id, |
| "speaker_name": metadata["name"], |
| "id": f"{speaker_id}_{chunk_id}", |
| "audio": dl_manager.download(f"{PATH}/{speaker_id}/{chunk['npz'].replace('.gz', '')}"), |
| "text": dl_manager.download(f"{PATH}/{speaker_id}/{chunk['json']}"), |
| } |
| speaker_chunks.append(chunk_dict) |
| if int(chunk_id) % 2 == 0: |
| even_speaker_chunks.append(chunk_dict) |
| else: |
| odd_speaker_chunks.append(chunk_dict) |
| |
| np.random.seed(42) |
| np.random.shuffle(speaker_chunks) |
| return [ |
| datasets.SplitGenerator( |
| name="train", |
| gen_kwargs={"speaker_chunks": speaker_chunks, "split": "train"} |
| ), |
| datasets.SplitGenerator( |
| name="validation", |
| gen_kwargs={"speaker_chunks": speaker_chunks, "split": "validation"} |
| ), |
| datasets.SplitGenerator( |
| name="even", |
| gen_kwargs={"speaker_chunks": even_speaker_chunks, "split": "even"} |
| ), |
| datasets.SplitGenerator( |
| name="odd", |
| gen_kwargs={"speaker_chunks": odd_speaker_chunks, "split": "odd"} |
| ), |
| datasets.SplitGenerator( |
| name="even100", |
| gen_kwargs={"speaker_chunks": even_speaker_chunks, "split": "even", "hours": 100} |
| ), |
| datasets.SplitGenerator( |
| name="odd100", |
| gen_kwargs={"speaker_chunks": odd_speaker_chunks, "split": "odd", "hours": 100} |
| ), |
| datasets.SplitGenerator( |
| name="even500", |
| gen_kwargs={"speaker_chunks": even_speaker_chunks, "split": "even", "hours": 500} |
| ), |
| datasets.SplitGenerator( |
| name="odd500", |
| gen_kwargs={"speaker_chunks": odd_speaker_chunks, "split": "odd", "hours": 500} |
| ), |
| datasets.SplitGenerator( |
| name="even1000", |
| gen_kwargs={"speaker_chunks": even_speaker_chunks, "split": "even", "hours": 1000} |
| ), |
| datasets.SplitGenerator( |
| name="odd1000", |
| gen_kwargs={"speaker_chunks": odd_speaker_chunks, "split": "odd", "hours": 1000} |
| ), |
| ] |
|
|
| def _generate_examples(self, speaker_chunks, split, hours=None): |
| """Yields examples.""" |
| hours_streamed = 0 |
| finish_stream = False |
| if hours is None: |
| hours = float("inf") |
| for chunk in speaker_chunks: |
| if finish_stream: |
| break |
| retry = 0 |
| while retry < 10: |
| try: |
| npz = dict(np.load(chunk["audio"], allow_pickle=True)) |
| break |
| except Exception as e: |
| print(e, "retrying in 60s") |
| sleep(60) |
| retry += 1 |
| utterances = npz.keys() |
| with gzip.open(chunk["text"], "rt") as f: |
| text = json.load(f) |
| if split in ["train", "even", "odd"]: |
| for utterance_id, utterance in text.items(): |
| |
| if utterance_id == sorted(list(text.keys()))[-1]: |
| continue |
| npz_item = npz[str(utterance_id)].item() |
| result = { |
| "id": chunk["speaker_id"] + "_" + utterance_id, |
| "speaker_id": chunk["speaker_id"], |
| "speaker_name": chunk["speaker_name"], |
| "speaker_vec": npz_item["d_vector"][0], |
| "audio": chunk["audio"], |
| "text": " ".join([segment[2] for segment in utterance["word_segments"] if "<" not in segment[2]]), |
| "word_segments": [ |
| {"start": segment[0], "end": segment[1], "word": segment[2]} for segment in utterance["word_segments"] |
| ], |
| "phone_segments": [ |
| {"start": segment[0], "end": segment[1], "phone": segment[2]} for segment in utterance["phone_segments"] |
| ], |
| "mel_spectrogram": npz_item["mel"][0][0], |
| "attributes": { |
| "pitch": npz_item["pitch"][0], |
| "energy": npz_item["energy"][0], |
| "snr": npz_item["snr"][0], |
| "srmr": npz_item["srmr"][0], |
| }, |
| "overall_attributes": { |
| "pitch": npz_item["overall_pitch"], |
| "energy": npz_item["overall_energy"], |
| "snr": npz_item["overall_snr"], |
| "srmr": npz_item["overall_srmr"], |
| }, |
| } |
| hours_streamed += (utterance["word_segments"][-1][1] - utterance["word_segments"][0][0]) / 3600 |
| yield chunk["speaker_id"] + "_" + utterance_id, result |
| if hours_streamed >= hours: |
| finish_stream = True |
| break |
| else: |
| |
| utterance_id = sorted(list(text.keys()))[-1] |
| utterance = text[utterance_id] |
| npz_item = npz[str(utterance_id)].item() |
| result = { |
| "id": chunk["speaker_id"] + "_" + utterance_id, |
| "speaker_id": chunk["speaker_id"], |
| "speaker_vec": npz_item["d_vector"][0], |
| "speaker_name": chunk["speaker_name"], |
| "audio": chunk["audio"], |
| "text": " ".join([segment[2] for segment in utterance["word_segments"] if "<" not in segment[2]]), |
| "word_segments": [ |
| {"start": segment[0], "end": segment[1], "word": segment[2]} for segment in utterance["word_segments"] |
| ], |
| "phone_segments": [ |
| {"start": segment[0], "end": segment[1], "phone": segment[2]} for segment in utterance["phone_segments"] |
| ], |
| "mel_spectrogram": npz_item["mel"][0][0], |
| "attributes": { |
| "pitch": npz_item["pitch"][0], |
| "energy": npz_item["energy"][0], |
| "snr": npz_item["snr"][0], |
| "srmr": npz_item["srmr"][0], |
| }, |
| "overall_attributes": { |
| "pitch": npz_item["overall_pitch"], |
| "energy": npz_item["overall_energy"], |
| "snr": npz_item["overall_snr"], |
| "srmr": npz_item["overall_srmr"], |
| }, |
| } |
| hours_streamed += (utterance["word_segments"][-1][1] - utterance["word_segments"][0][0]) / 3600 |
| yield chunk["speaker_id"] + "_" + utterance_id, result |
| if hours_streamed >= hours: |
| finish_stream = True |
| break |