File size: 4,304 Bytes
4445ebf | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 | #!/usr/bin/env python3
"""Convert paired metadata.tsv -> metadata_shard.jsonl (s-s training format).
Input TSV (header required, tab-separated):
zh_path en_path zh_text en_text category
Output JSONL: one JSON object per line, schema:
{
"task_type": "s-s",
"conversation": [
{"role":"user", "message_type":"text", "content": <prompt>},
{"role":"user", "message_type":"audio", "content": <src_wav_path>},
{"role":"assistant", "message_type":"audio-text",
"content": [<tgt_wav_path>, <tgt_text>]}
]
}
Each TSV row produces 2 conversations by default (en->zh and zh->en),
matching the original metadata_shard distribution.
Usage:
python tsv_to_metadata_shard.py metadata.tsv -o metadata_shard.jsonl
python tsv_to_metadata_shard.py metadata.tsv --directions en2zh # only one
python tsv_to_metadata_shard.py metadata.tsv --shard-size 50000 # split
"""
import argparse
import csv
import json
import os
import sys
EN2ZH_PROMPT = ("Translate the given English speech into Chinese "
"while preserving its expressiveness.")
ZH2EN_PROMPT = ("Translate the given Chinese speech into English "
"while preserving its expressiveness.")
def make_conversation(prompt: str, src: str, tgt: str, tgt_text: str) -> dict:
return {
"task_type": "s-s",
"conversation": [
{"role": "user", "message_type": "text", "content": prompt},
{"role": "user", "message_type": "audio", "content": src},
{"role": "assistant", "message_type": "audio-text",
"content": [tgt, tgt_text]},
],
}
def open_writer(out: str, shard_size: int):
"""Yield writer fn that handles optional sharding."""
if shard_size <= 0:
f = sys.stdout if out == "-" else open(out, "w", encoding="utf-8")
def write(obj):
f.write(json.dumps(obj, ensure_ascii=False) + "\n")
def close():
if f is not sys.stdout:
f.close()
return write, close
base, ext = os.path.splitext(out if out != "-" else "metadata_shard.jsonl")
if not ext:
ext = ".jsonl"
state = {"idx": 0, "count": 0, "f": None}
def _rotate():
if state["f"] is not None:
state["f"].close()
path = f"{base}_{state['idx']:03d}{ext}"
state["f"] = open(path, "w", encoding="utf-8")
state["count"] = 0
state["idx"] += 1
_rotate()
def write(obj):
if state["count"] >= shard_size:
_rotate()
state["f"].write(json.dumps(obj, ensure_ascii=False) + "\n")
state["count"] += 1
def close():
if state["f"] is not None:
state["f"].close()
return write, close
def main():
ap = argparse.ArgumentParser()
ap.add_argument("tsv",
help="metadata.tsv (header: zh_path,en_path,zh_text,en_text,category)")
ap.add_argument("-o", "--out", default="metadata_shard.jsonl",
help="output path; '-' for stdout (default: metadata_shard.jsonl)")
ap.add_argument("--directions", default="both",
choices=["both", "en2zh", "zh2en"])
ap.add_argument("--shard-size", type=int, default=0,
help="if >0, split output into <out>_NNN.jsonl shards of this many lines")
args = ap.parse_args()
write, close = open_writer(args.out, args.shard_size)
required = {"zh_path", "en_path", "zh_text", "en_text", "category"}
n = 0
with open(args.tsv, encoding="utf-8", newline="") as f:
reader = csv.DictReader(f, delimiter="\t")
missing = required - set(reader.fieldnames or [])
if missing:
sys.exit(f"missing TSV columns: {missing}")
for row in reader:
if args.directions in ("both", "en2zh"):
write(make_conversation(
EN2ZH_PROMPT, row["en_path"], row["zh_path"], row["zh_text"]))
n += 1
if args.directions in ("both", "zh2en"):
write(make_conversation(
ZH2EN_PROMPT, row["zh_path"], row["en_path"], row["en_text"]))
n += 1
close()
print(f"wrote {n:,} conversations", file=sys.stderr)
if __name__ == "__main__":
main()
|