| |
| """Convert paired metadata.tsv -> metadata_shard.jsonl (s-s training format). |
| |
| Input TSV (header required, tab-separated): |
| zh_path en_path zh_text en_text category |
| |
| Output JSONL: one JSON object per line, schema: |
| { |
| "task_type": "s-s", |
| "conversation": [ |
| {"role":"user", "message_type":"text", "content": <prompt>}, |
| {"role":"user", "message_type":"audio", "content": <src_wav_path>}, |
| {"role":"assistant", "message_type":"audio-text", |
| "content": [<tgt_wav_path>, <tgt_text>]} |
| ] |
| } |
| |
| Each TSV row produces 2 conversations by default (en->zh and zh->en), |
| matching the original metadata_shard distribution. |
| |
| Usage: |
| python tsv_to_metadata_shard.py metadata.tsv -o metadata_shard.jsonl |
| python tsv_to_metadata_shard.py metadata.tsv --directions en2zh # only one |
| python tsv_to_metadata_shard.py metadata.tsv --shard-size 50000 # split |
| """ |
| import argparse |
| import csv |
| import json |
| import os |
| import sys |
|
|
| EN2ZH_PROMPT = ("Translate the given English speech into Chinese " |
| "while preserving its expressiveness.") |
| ZH2EN_PROMPT = ("Translate the given Chinese speech into English " |
| "while preserving its expressiveness.") |
|
|
|
|
| def make_conversation(prompt: str, src: str, tgt: str, tgt_text: str) -> dict: |
| return { |
| "task_type": "s-s", |
| "conversation": [ |
| {"role": "user", "message_type": "text", "content": prompt}, |
| {"role": "user", "message_type": "audio", "content": src}, |
| {"role": "assistant", "message_type": "audio-text", |
| "content": [tgt, tgt_text]}, |
| ], |
| } |
|
|
|
|
| def open_writer(out: str, shard_size: int): |
| """Yield writer fn that handles optional sharding.""" |
| if shard_size <= 0: |
| f = sys.stdout if out == "-" else open(out, "w", encoding="utf-8") |
| def write(obj): |
| f.write(json.dumps(obj, ensure_ascii=False) + "\n") |
| def close(): |
| if f is not sys.stdout: |
| f.close() |
| return write, close |
|
|
| base, ext = os.path.splitext(out if out != "-" else "metadata_shard.jsonl") |
| if not ext: |
| ext = ".jsonl" |
| state = {"idx": 0, "count": 0, "f": None} |
|
|
| def _rotate(): |
| if state["f"] is not None: |
| state["f"].close() |
| path = f"{base}_{state['idx']:03d}{ext}" |
| state["f"] = open(path, "w", encoding="utf-8") |
| state["count"] = 0 |
| state["idx"] += 1 |
|
|
| _rotate() |
| def write(obj): |
| if state["count"] >= shard_size: |
| _rotate() |
| state["f"].write(json.dumps(obj, ensure_ascii=False) + "\n") |
| state["count"] += 1 |
| def close(): |
| if state["f"] is not None: |
| state["f"].close() |
| return write, close |
|
|
|
|
| def main(): |
| ap = argparse.ArgumentParser() |
| ap.add_argument("tsv", |
| help="metadata.tsv (header: zh_path,en_path,zh_text,en_text,category)") |
| ap.add_argument("-o", "--out", default="metadata_shard.jsonl", |
| help="output path; '-' for stdout (default: metadata_shard.jsonl)") |
| ap.add_argument("--directions", default="both", |
| choices=["both", "en2zh", "zh2en"]) |
| ap.add_argument("--shard-size", type=int, default=0, |
| help="if >0, split output into <out>_NNN.jsonl shards of this many lines") |
| args = ap.parse_args() |
|
|
| write, close = open_writer(args.out, args.shard_size) |
|
|
| required = {"zh_path", "en_path", "zh_text", "en_text", "category"} |
| n = 0 |
| with open(args.tsv, encoding="utf-8", newline="") as f: |
| reader = csv.DictReader(f, delimiter="\t") |
| missing = required - set(reader.fieldnames or []) |
| if missing: |
| sys.exit(f"missing TSV columns: {missing}") |
|
|
| for row in reader: |
| if args.directions in ("both", "en2zh"): |
| write(make_conversation( |
| EN2ZH_PROMPT, row["en_path"], row["zh_path"], row["zh_text"])) |
| n += 1 |
| if args.directions in ("both", "zh2en"): |
| write(make_conversation( |
| ZH2EN_PROMPT, row["zh_path"], row["en_path"], row["en_text"])) |
| n += 1 |
|
|
| close() |
| print(f"wrote {n:,} conversations", file=sys.stderr) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|