human-chats-worldwide / build_dataset.py
asdf98's picture
Upload build_dataset.py
826235c verified
"""Build real human conversations dataset from multiple sources."""
import os, json, gc
from datetime import datetime
import pandas as pd
from datasets import load_dataset
OUTPUT_DIR = "./output"
os.makedirs(OUTPUT_DIR, exist_ok=True)
MAX_ROWS = 500_000
def log(msg): print(f"[{datetime.now().strftime('%H:%M:%S')}] {msg}", flush=True)
def save(df, name):
path = os.path.join(OUTPUT_DIR, f"{name}.parquet")
df.to_parquet(path, index=False, compression="zstd")
mb = os.path.getsize(path)/(1024*1024)
log(f"Saved {name}: {len(df)} rows, {mb:.1f} MB")
SOURCES = [
("discord", "mookiezi/Discord-Dialogues", "train", "en", "discord_chat",
lambda ex: {"text": ex.get("text",""), "turns": ex.get("turns",0), "metadata": json.dumps({"tokens":ex.get("tokens",0)})}),
("reddit_comments", "HuggingFaceGECLM/REDDIT_comments", "AskReddit", "en", "reddit_qa",
lambda ex: {"text": ex.get("body",""), "turns": 1, "metadata": json.dumps({"score":ex.get("score",0)})}),
("reddit_confessions", "SocialGrep/one-million-reddit-confessions", "train", "en", "reddit_confession",
lambda ex: {"text": f"{ex.get('title','')}\n\n{ex.get('selftext','')}", "turns": 1, "metadata": json.dumps({"score":ex.get("score",0)})}),
("russian", "Den4ikAI/russian_dialogues_2", "train", "ru", "telegram_chat",
lambda ex: {"text": "\n".join(str(s) for s in ex.get("sample",[]) if s), "turns": len(ex.get("sample",[])), "metadata": "{}"}),
("italian_usenet", "mii-community/UsenetArchiveIT-conversations", "train", "it", "usenet_forum",
lambda ex: {"text": "\n\n".join(m.get("content","").strip() for m in ex.get("messages",[]) if m.get("content","").strip()), "turns": len([m for m in ex.get("messages",[]) if m.get("content","").strip()]), "metadata": json.dumps({"newsgroup":ex.get("newsgroup","")})}),
("twitch", "lparkourer10/twitch_chat", "train", "en", "live_stream_chat",
lambda ex: {"text": ex.get("Message",""), "turns": 1, "metadata": "{}"}),
("mental_health", "Amod/mental_health_counseling_conversations", "train", "en", "therapy_dialogue",
lambda ex: {"text": f"Patient: {ex.get('Context','')}\n\nCounselor: {ex.get('Response','')}", "turns": 2, "metadata": "{}"}),
("japanese_speech", "japanese-asr/whisper_transcriptions.reazon_speech_all", "subset_0", "ja", "speech_transcription",
lambda ex: {"text": ex.get("transcription",""), "turns": 1, "metadata": "{}"}),
("korean_chat", "jojo0217/korean_safe_conversation", "train", "ko", "everyday_chat",
lambda ex: {"text": f"User: {ex.get('instruction','')}\n\nAssistant: {ex.get('output','')}", "turns": 2, "metadata": "{}"}),
("reddit_youtube_mix", "fsteig/conversations-30gb", "train", "en", "reddit_youtube",
lambda ex: {"text": ex.get("body",""), "turns": 1, "metadata": json.dumps({"source":ex.get("source","reddit")})}),
]
all_dfs = []
for name, ds_name, split, lang, domain, extractor in SOURCES:
log(f"Processing {name}...")
try:
ds = load_dataset(ds_name, split, split="train", streaming=True)
rows = []
for i, ex in enumerate(ds):
if i >= MAX_ROWS: break
try:
r = extractor(ex)
if not r["text"] or len(r["text"]) < 10: continue
if r["text"] in ("[deleted]","[removed]"): continue
rows.append({**r, "source": name, "language": lang, "domain": domain})
except: pass
if rows:
df = pd.DataFrame(rows)
save(df, name)
all_dfs.append(df)
log(f"{name}: {len(rows)} rows")
except Exception as e:
log(f"ERROR {name}: {e}")
gc.collect()
if all_dfs:
combined = pd.concat(all_dfs, ignore_index=True)
save(combined, "all_conversations")
log(f"TOTAL: {len(combined)} rows")
else:
log("No data collected")