| import json |
| import tarfile |
| import time |
| from contextlib import closing |
| from io import BytesIO |
| from pathlib import Path |
|
|
| from pydub import AudioSegment |
| from tqdm import tqdm |
|
|
|
|
| MIC_TYPES = (("close", 1), ("distant", 2)) |
|
|
| def convert_to_mp3(split, audio_dir, metadata_dir, output_dir, prefix=None): |
| if prefix and not prefix.endswith("_"): |
| prefix += "_" |
| else: |
| prefix = "" |
| paths = Path(metadata_dir).glob("*.json") |
| audio_dir = Path(audio_dir) |
| output_dir = Path(output_dir) |
| with (open(f"{prefix}{split}.jsonl", "w") as metadata_jsonlines, |
| open(f"{prefix}{split}.log", "w") as metadata_log): |
| for path in tqdm(paths): |
| metadata = json.load(path.open()) |
| for recording in tqdm(metadata.get("val_recordings", [])): |
| MIC_TYPES = (("close", 1), ("distant", 2)) |
| mic_segments = [None, None] |
| for mic_position, mic_index in MIC_TYPES: |
| audio_filename = audio_dir / metadata["pid"] / f'{metadata["pid"]}_{recording["file"].replace(".wav", f"-{mic_index}.wav")}' |
| metadata_jsonl = { |
| "pid": metadata["pid"], |
| **metadata["info"], |
| **metadata["session"], |
| **metadata["system"], |
| **recording |
| } |
| try: |
| segment = AudioSegment.from_file(audio_filename.as_posix()) |
| mic_segments[mic_index - 1] = segment |
| except FileNotFoundError: |
| message = f"File {audio_filename.as_posix()} ({mic_position}) not found for split {split}." |
| print(message) |
| metadata_log.write(message + "\n") |
| if all(mic_segments): |
| metadata_jsonlines.write(json.dumps(metadata_jsonl) + "\n") |
| for idx, segment in enumerate(mic_segments): |
| mic_position = MIC_TYPES[idx][0] |
| export_filename = output_dir / split / mic_position / f"{metadata_jsonl['pid']}_{metadata_jsonl['file'].replace('.wav', '.mp3')}" |
| export_filename.parent.mkdir(parents=True, exist_ok=True) |
| segment.export(export_filename.as_posix(), format="mp3") |
|
|
|
|
| def human_size(bytes, units=[' bytes', 'KB', 'MB', 'GB', 'TB', 'PB', 'EB']): |
| """ Returns a human readable string representation of bytes """ |
| return str(bytes) + units[0] if bytes < 1024 else human_size(bytes>>10, units[1:]) |
|
|
|
|
| def create_shards(split, mp3_dir, shard_dir, size, prefix=None): |
| if prefix and not prefix.endswith("_"): |
| prefix += "_" |
| else: |
| prefix = "" |
| mp3_dir = Path(mp3_dir) |
| shard_dir = Path(shard_dir) |
| for mic_position, _ in MIC_TYPES: |
| with open(f"{prefix}{split}.jsonl", "r") as metadata_jsonlines_file: |
| metadata_jsonlines = iter(metadata_jsonlines_file) |
| shard_count = 0 |
| shard_jsonlines = [] |
| shard_size = 0 |
| metadata = True |
| while metadata: |
| shard_name = f"{prefix}{split}_{mic_position}-{(shard_count + 1):04d}.tar.gz" |
| print(f"Shard {(shard_dir / split / shard_name).as_posix()}") |
| (shard_dir / split).mkdir(parents=True, exist_ok=True) |
| with tarfile.open(shard_dir / split / shard_name, "w:gz") as shard: |
| pbar = tqdm() |
| while shard_size < size: |
| try: |
| metadata = json.loads(next(metadata_jsonlines)) |
| except StopIteration: |
| metadata = None |
| shard_size = size |
| if metadata: |
| audio_file = mp3_dir / split / mic_position / f"{metadata['pid']}_{metadata['file'].replace('.wav', '.mp3')}" |
| shard.add(audio_file.as_posix(), arcname=".") |
| shard_size += audio_file.stat().st_size |
| shard_jsonlines.append(json.dumps(metadata)) |
| pbar.set_description(f"Processing {audio_file.as_posix()} [{human_size(shard_size)}]") |
| pbar.update(1) |
| if shard_jsonlines: |
| pbar.set_description(f"Processing metadata [{len(shard_jsonlines)} entries]") |
| pbar.update(1) |
| shard_jsonlines_str = "\n".join(shard_jsonlines) |
| with closing(BytesIO(shard_jsonlines_str.encode('utf-8'))) as metadata_file: |
| tarinfo = tarfile.TarInfo("metadata.json") |
| tarinfo.size = len(metadata_file.getvalue()) |
| tarinfo.mtime = time.time() |
| shard.addfile(tarinfo, fileobj=metadata_file) |
| with open(shard_dir / split / shard_name.replace(".tar.gz", ".json"), "w") as metadata_file: |
| metadata_file.write(shard_jsonlines_str) |
| shard_size = 0 |
| shard_jsonlines = [] |
| shard_count += 1 |
| print("Renaming...") |
| for shard_index in range(shard_count): |
| shard_name = f"{prefix}{split}_{mic_position}-{(shard_index + 1):04d}.tar.gz" |
| (shard_dir / split / shard_name).rename(shard_dir / split / shard_name.replace(".tar.gz", f"-of-{shard_count:04d}.tar.gz")) |
| shard_json = f"{prefix}{split}_{mic_position}-{(shard_index + 1):04d}.json" |
| (shard_dir / split / shard_json).rename(shard_dir / split / shard_json.replace(".json", f"-of-{shard_count:04d}.json")) |
|
|
|
|
|
|
| def main(): |
| audio_dir = "audio/no" |
| mp3_dir = "dataset_mp3" |
| shard_dir = "dataset_shards" |
| split_paths = { |
| "train": "metadata/ADB_NOR_0463", |
| "test": "metadata/ADB_NOR_0464", |
| } |
| prefix_dataset = "dataset" |
| for split, metadata_dir in split_paths.items(): |
| convert_to_mp3(split, metadata_dir, audio_dir, output_dir=mp3_dir, prefix=prefix_dataset) |
|
|
| prefix = "nst_no" |
| print(f"""Dedup leaked files |
| $ comm -23 <(sort {prefix_dataset}_train.jsonl) <(sort {prefix_dataset}_test.jsonl) | shuf > {prefix}_train.jsonl |
| $ shuf {prefix_dataset}_test.jsonl -o {prefix}_test.jsonl |
| """) |
| input("Enter [Enter] when dedupe is done.") |
| for split, metadata_dir in split_paths.items(): |
| shard_size = 500 * 1024 * 1024 |
| create_shards(split, mp3_dir, shard_dir, shard_size, prefix="nst_no") |
|
|
|
|