Add tsv_to_metadata.py (TSV -> Kimi-Audio training JSONL)
Browse files- scripts/tsv_to_metadata.py +123 -0
scripts/tsv_to_metadata.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
"""Convert paired metadata.tsv -> metadata_shard.jsonl (s-s training format).
|
| 3 |
+
|
| 4 |
+
Input TSV (header required, tab-separated):
|
| 5 |
+
zh_path en_path zh_text en_text category
|
| 6 |
+
|
| 7 |
+
Output JSONL: one JSON object per line, schema:
|
| 8 |
+
{
|
| 9 |
+
"task_type": "s-s",
|
| 10 |
+
"conversation": [
|
| 11 |
+
{"role":"user", "message_type":"text", "content": <prompt>},
|
| 12 |
+
{"role":"user", "message_type":"audio", "content": <src_wav_path>},
|
| 13 |
+
{"role":"assistant", "message_type":"audio-text",
|
| 14 |
+
"content": [<tgt_wav_path>, <tgt_text>]}
|
| 15 |
+
]
|
| 16 |
+
}
|
| 17 |
+
|
| 18 |
+
Each TSV row produces 2 conversations by default (en->zh and zh->en),
|
| 19 |
+
matching the original metadata_shard distribution.
|
| 20 |
+
|
| 21 |
+
Usage:
|
| 22 |
+
python tsv_to_metadata_shard.py metadata.tsv -o metadata_shard.jsonl
|
| 23 |
+
python tsv_to_metadata_shard.py metadata.tsv --directions en2zh # only one
|
| 24 |
+
python tsv_to_metadata_shard.py metadata.tsv --shard-size 50000 # split
|
| 25 |
+
"""
|
| 26 |
+
import argparse
|
| 27 |
+
import csv
|
| 28 |
+
import json
|
| 29 |
+
import os
|
| 30 |
+
import sys
|
| 31 |
+
|
| 32 |
+
EN2ZH_PROMPT = ("Translate the given English speech into Chinese "
|
| 33 |
+
"while preserving its expressiveness.")
|
| 34 |
+
ZH2EN_PROMPT = ("Translate the given Chinese speech into English "
|
| 35 |
+
"while preserving its expressiveness.")
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def make_conversation(prompt: str, src: str, tgt: str, tgt_text: str) -> dict:
|
| 39 |
+
return {
|
| 40 |
+
"task_type": "s-s",
|
| 41 |
+
"conversation": [
|
| 42 |
+
{"role": "user", "message_type": "text", "content": prompt},
|
| 43 |
+
{"role": "user", "message_type": "audio", "content": src},
|
| 44 |
+
{"role": "assistant", "message_type": "audio-text",
|
| 45 |
+
"content": [tgt, tgt_text]},
|
| 46 |
+
],
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def open_writer(out: str, shard_size: int):
|
| 51 |
+
"""Yield writer fn that handles optional sharding."""
|
| 52 |
+
if shard_size <= 0:
|
| 53 |
+
f = sys.stdout if out == "-" else open(out, "w", encoding="utf-8")
|
| 54 |
+
def write(obj):
|
| 55 |
+
f.write(json.dumps(obj, ensure_ascii=False) + "\n")
|
| 56 |
+
def close():
|
| 57 |
+
if f is not sys.stdout:
|
| 58 |
+
f.close()
|
| 59 |
+
return write, close
|
| 60 |
+
|
| 61 |
+
base, ext = os.path.splitext(out if out != "-" else "metadata_shard.jsonl")
|
| 62 |
+
if not ext:
|
| 63 |
+
ext = ".jsonl"
|
| 64 |
+
state = {"idx": 0, "count": 0, "f": None}
|
| 65 |
+
|
| 66 |
+
def _rotate():
|
| 67 |
+
if state["f"] is not None:
|
| 68 |
+
state["f"].close()
|
| 69 |
+
path = f"{base}_{state['idx']:03d}{ext}"
|
| 70 |
+
state["f"] = open(path, "w", encoding="utf-8")
|
| 71 |
+
state["count"] = 0
|
| 72 |
+
state["idx"] += 1
|
| 73 |
+
|
| 74 |
+
_rotate()
|
| 75 |
+
def write(obj):
|
| 76 |
+
if state["count"] >= shard_size:
|
| 77 |
+
_rotate()
|
| 78 |
+
state["f"].write(json.dumps(obj, ensure_ascii=False) + "\n")
|
| 79 |
+
state["count"] += 1
|
| 80 |
+
def close():
|
| 81 |
+
if state["f"] is not None:
|
| 82 |
+
state["f"].close()
|
| 83 |
+
return write, close
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def main():
|
| 87 |
+
ap = argparse.ArgumentParser()
|
| 88 |
+
ap.add_argument("tsv",
|
| 89 |
+
help="metadata.tsv (header: zh_path,en_path,zh_text,en_text,category)")
|
| 90 |
+
ap.add_argument("-o", "--out", default="metadata_shard.jsonl",
|
| 91 |
+
help="output path; '-' for stdout (default: metadata_shard.jsonl)")
|
| 92 |
+
ap.add_argument("--directions", default="both",
|
| 93 |
+
choices=["both", "en2zh", "zh2en"])
|
| 94 |
+
ap.add_argument("--shard-size", type=int, default=0,
|
| 95 |
+
help="if >0, split output into <out>_NNN.jsonl shards of this many lines")
|
| 96 |
+
args = ap.parse_args()
|
| 97 |
+
|
| 98 |
+
write, close = open_writer(args.out, args.shard_size)
|
| 99 |
+
|
| 100 |
+
required = {"zh_path", "en_path", "zh_text", "en_text", "category"}
|
| 101 |
+
n = 0
|
| 102 |
+
with open(args.tsv, encoding="utf-8", newline="") as f:
|
| 103 |
+
reader = csv.DictReader(f, delimiter="\t")
|
| 104 |
+
missing = required - set(reader.fieldnames or [])
|
| 105 |
+
if missing:
|
| 106 |
+
sys.exit(f"missing TSV columns: {missing}")
|
| 107 |
+
|
| 108 |
+
for row in reader:
|
| 109 |
+
if args.directions in ("both", "en2zh"):
|
| 110 |
+
write(make_conversation(
|
| 111 |
+
EN2ZH_PROMPT, row["en_path"], row["zh_path"], row["zh_text"]))
|
| 112 |
+
n += 1
|
| 113 |
+
if args.directions in ("both", "zh2en"):
|
| 114 |
+
write(make_conversation(
|
| 115 |
+
ZH2EN_PROMPT, row["zh_path"], row["en_path"], row["en_text"]))
|
| 116 |
+
n += 1
|
| 117 |
+
|
| 118 |
+
close()
|
| 119 |
+
print(f"wrote {n:,} conversations", file=sys.stderr)
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
main()
|