| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """Simple sentences Dataset - contains 90 mins of speech data""" |
|
|
| import csv |
| import json |
| import os |
|
|
| import datasets |
|
|
| _CITATION = """\ |
| @misc{simpledata_1, |
| title = {Whisper model for tamil-to-eng translation}, |
| publisher = {Achitha}, |
| year = {2022}, |
| } |
| @misc{simpledata_2, |
| title = {Fine-tuning whisper model}, |
| publisher = {Achitha}, |
| year = {2022}, |
| } |
| """ |
| _DESCRIPTION = """\ |
| The data contains roughly one and half hours of audio and transcripts in Tamil language. |
| """ |
|
|
| _HOMEPAGE = "" |
|
|
| _LICENSE = "MIT" |
|
|
|
|
| _METADATA_URLS = { |
| "train": "data/train.jsonl", |
| "test": "data/test.jsonl" |
| } |
| _URLS = { |
| "train": "data/train.tar.gz", |
| "test": "data/test.tar.gz", |
| |
| } |
|
|
| class simple_data(datasets.GeneratorBasedBuilder): |
| |
|
|
| VERSION = datasets.Version("1.1.0") |
| def _info(self): |
| features = datasets.Features( |
| { |
| "audio": datasets.Audio(sampling_rate=16_000), |
| "path": datasets.Value("string"), |
| "sentence": datasets.Value("string"), |
| "length": datasets.Value("float") |
| |
| } |
| ) |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=features, |
| supervised_keys=("sentence", "label"), |
| homepage=_HOMEPAGE, |
| license=_LICENSE, |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| metadata_paths = dl_manager.download(_METADATA_URLS) |
| train_archive = dl_manager.download(_URLS["train"]) |
| test_archive = dl_manager.download(_URLS["test"]) |
| local_extracted_train_archive = dl_manager.extract(train_archive) if not dl_manager.is_streaming else None |
| local_extracted_test_archive = dl_manager.extract(test_archive) if not dl_manager.is_streaming else None |
| test_archive = dl_manager.download(_URLS["test"]) |
| train_dir = "train" |
| test_dir = "test" |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={ |
| "metadata_path": metadata_paths["train"], |
| "local_extracted_archive": local_extracted_train_archive, |
| "path_to_clips": train_dir, |
| "audio_files": dl_manager.iter_archive(train_archive), |
| }, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={ |
| "metadata_path": metadata_paths["test"], |
| "local_extracted_archive": local_extracted_test_archive, |
| "path_to_clips": test_dir, |
| "audio_files": dl_manager.iter_archive(test_archive), |
| }, |
| ), |
| |
| ] |
| |
| def _generate_examples(self, metadata_path, local_extracted_archive, path_to_clips, audio_files): |
| """Yields examples as (key, example) tuples.""" |
| examples = {} |
| with open(metadata_path, encoding="utf-8") as f: |
| for key, row in enumerate(f): |
| data = json.loads(row) |
| examples[data["path"]] = data |
| inside_clips_dir = False |
| id_ = 0 |
| for path, f in audio_files: |
| if path.startswith(path_to_clips): |
| inside_clips_dir = True |
| if path in examples: |
| result = examples[path] |
| path = os.path.join(local_extracted_archive, path) if local_extracted_archive else path |
| result["audio"] = {"path": path, "bytes": f.read()} |
| result["path"] = path |
| yield id_, result |
| id_ += 1 |
| elif inside_clips_dir: |
| break |