MIMIC_PROGNOSIS / prepare_mds_ed_dataset.py
jialucode's picture
Update prepare_mds_ed_dataset.py
6e199e2 verified
"""Prepare the UniPACT MDS-ED prompted dataset.
Reads the MDS-ED tabular splits and emits, for each (split, task family),
a JSON array of prompted Yes/No conversation samples paired with an ECG
reference. Optionally merges the per-task arrays of one split into a single
``<split>.json``.
"""
from __future__ import annotations
import argparse
import gc
import json
import os
from pathlib import Path
from typing import Iterable, Iterator
import pandas as pd
CONDITIONS_DETERIORATION = {
"deterioration_severe_hypoxemia": "experience severe hypoxemia",
"deterioration_ecmo": "require ECMO (extracorporeal membrane oxygenation)",
"deterioration_vasopressors": "require vasopressors",
"deterioration_inotropes": "require inotropes",
"deterioration_mechanical_ventilation": "require mechanical ventilation",
"deterioration_cardiac_arrest": "experience cardiac arrest",
}
CONDITIONS_ICU = {
"deterioration_icu_24h": "require ICU admission within the next 24 hours",
"deterioration_icu_stay": "require ICU admission during this hospital stay",
}
CONDITIONS_MORTALITY = {
"deterioration_mortality_1d": "die within 24 hours",
"deterioration_mortality_7d": "die within 7 days",
"deterioration_mortality_28d": "die within 28 days",
"deterioration_mortality_90d": "die within 90 days",
"deterioration_mortality_180d": "die within 180 days",
"deterioration_mortality_365d": "die within 365 days",
"deterioration_mortality_stay": "die during the hospital stay",
}
TEMPLATES = {
"deterioration": (
'You are a cardiologist. Your task is "to predict whether a patient will experience clinical deterioration" '
"based on the provided ECG and Electronic Health Record (EHR) data. "
"{EHR} "
'Will the patient "{condition}"? Answer strictly with Yes or No.'
),
"icu": (
'You are a cardiologist. Your task is "to predict whether a patient will require ICU admission" '
"based on the provided ECG and Electronic Health Record (EHR) data. "
"{EHR} "
'Will the patient "{condition}"? Answer strictly with Yes or No.'
),
"mortality": (
'You are a cardiologist. Your task is "to predict whether a patient will experience mortality" '
"based on the provided ECG and Electronic Health Record (EHR) data. "
"{EHR} "
'Will the patient "{condition}"? Answer strictly with Yes or No.'
),
"diagnose": (
'You are a cardiologist. Your task is "to predict the correct ICD-10 diagnosis code" '
"based on the provided ECG and Electronic Health Record (EHR) data. "
"{EHR} "
'Will the patient be diagnosed with "{condition}"? Answer strictly with Yes or No.'
),
}
GENDER_MAP = {0: "female", 1: "male"}
DECIMAL_COLUMNS = [
"biometrics_bmi", "biometrics_weight", "biometrics_height",
"vitals_temperature_mean", "vitals_heartrate_mean", "vitals_resprate_mean",
"vitals_o2sat_mean", "vitals_sbp_mean", "vitals_dbp_mean",
]
VITAL_FIELDS = (
"vitals_temperature_mean", "vitals_heartrate_mean",
"vitals_resprate_mean", "vitals_o2sat_mean",
"vitals_sbp_mean", "vitals_dbp_mean", "vitals_acuity",
)
BIOMETRIC_FIELDS = ("biometrics_bmi", "biometrics_weight", "biometrics_height")
SPLITS = ("train", "val", "test")
TASKS = ("deterioration", "icu", "mortality", "diagnose")
RANDOM_STATE = 42
def load_split(csv_path: Path) -> pd.DataFrame:
"""Load a split CSV, decode gender, and round floating-point columns."""
df = pd.read_csv(csv_path, low_memory=False)
df["demographics_gender"] = df["demographics_gender"].map(GENDER_MAP)
df[DECIMAL_COLUMNS] = df[DECIMAL_COLUMNS].round(1)
return df
def render_ehr_text(row: pd.Series) -> str:
"""Compose a single-paragraph EHR description from one row."""
demo = []
if not pd.isna(row.get("demographics_age")):
demo.append(f"{row['demographics_age']} year-old")
if not pd.isna(row.get("general_race")):
demo.append(row["general_race"].replace("/", " ").replace(" - ", " "))
if not pd.isna(row.get("demographics_gender")):
demo.append(row["demographics_gender"])
bio = [
f"{c.replace('biometrics_', '').replace('_', ' ')} {row[c]}"
for c in BIOMETRIC_FIELDS if not pd.isna(row.get(c))
]
vital = [
f"{c.replace('vitals_', '').replace('_mean', '').replace('_', ' ')} {row[c]}"
for c in VITAL_FIELDS if not pd.isna(row.get(c))
]
sentences = [
"The demographics information, " + ", ".join(demo) + "." if demo else "",
"The biometrics information, " + ", ".join(bio) + "." if bio else "",
"The vital parameters, " + ", ".join(vital) + "." if vital else "",
]
return " ".join(s for s in sentences if s).strip()
def generate_task_json(
df: pd.DataFrame,
conditions: dict[str, str],
template: str,
output_dir: Path,
suffix: str,
balanced: bool,
) -> None:
"""Emit one task-family JSON for one split.
If ``balanced`` is True, the majority class is downsampled per condition
so that Yes and No counts match. Otherwise both classes are kept intact.
"""
out_records: list[dict] = []
for col, desc in conditions.items():
yes_rows = df[df[col] == 1]
no_rows = df[df[col] == 0]
if balanced:
n = min(len(yes_rows), len(no_rows))
yes_sample = yes_rows.sample(n=n, random_state=RANDOM_STATE) if len(yes_rows) > n else yes_rows
no_sample = no_rows.sample(n=n, random_state=RANDOM_STATE) if len(no_rows) > n else no_rows
else:
yes_sample, no_sample = yes_rows, no_rows
chunk = (
pd.concat([yes_sample, no_sample])
.sample(frac=1, random_state=RANDOM_STATE)
.reset_index(drop=True)
)
n_yes = n_no = 0
for idx, row in chunk.iterrows():
answer = "Yes" if row[col] == 1 else "No"
n_yes += answer == "Yes"
n_no += answer == "No"
question = template.format(condition=desc, EHR=render_ehr_text(row))
out_records.append({
"id": f"{col}_{idx}",
"ecg": row.get("general_file_name", ""),
"conversations": [
{"from": "human", "value": f"<ecg> {question}"},
{"from": "gpt", "value": answer},
],
})
print(f" {col}: Yes={n_yes}, No={n_no}", flush=True)
out_path = output_dir / f"all_tasks_{suffix}.json"
print(f" -> {out_path}", flush=True)
with out_path.open("w", encoding="utf-8") as f:
json.dump(out_records, f, separators=(",", ":"))
gc.collect()
def stream_records(path: Path) -> Iterator[dict]:
"""Yield items from a JSON-array file. Uses ijson if available."""
try:
import ijson # type: ignore
except ModuleNotFoundError:
with path.open("r", encoding="utf-8") as f:
yield from json.load(f)
return
with path.open("rb") as f:
yield from ijson.items(f, "item")
def merge_split(output_dir: Path, split: str, tasks: Iterable[str]) -> None:
"""Concatenate per-task JSONs of one split into ``<split>.json`` (streamed)."""
out_path = output_dir / f"{split}.json"
print(f"merging {split} -> {out_path}", flush=True)
n = 0
with out_path.open("w", encoding="utf-8") as out:
out.write("[")
first = True
for task in tasks:
part = output_dir / f"all_tasks_{split}_predictions_{task}.json"
if not part.exists():
print(f" skip missing {part}", flush=True)
continue
for record in stream_records(part):
if not first:
out.write(",")
out.write(json.dumps(record, separators=(",", ":")))
first = False
n += 1
out.write("]")
print(f" merged {n} samples", flush=True)
def build_arg_parser() -> argparse.ArgumentParser:
p = argparse.ArgumentParser(description=__doc__.splitlines()[0])
p.add_argument("--data-dir", type=Path, required=True,
help="Directory containing mds_ed_{train,val,test}.csv.")
p.add_argument("--output-dir", type=Path, required=True,
help="Output directory for per-task and merged JSONs.")
p.add_argument("--icd-xlsx", type=Path, required=True,
help="Excel file with columns 'Original Column' and 'Description'.")
p.add_argument("--splits", nargs="+", choices=SPLITS, default=list(SPLITS),
help="Which splits to process. Default: all.")
p.add_argument("--tasks", nargs="+", choices=TASKS, default=list(TASKS),
help="Which task families to generate. Default: all.")
p.add_argument("--balanced-splits", nargs="*", choices=SPLITS,
default=["val"],
help="Splits whose majority class should be downsampled to "
"match the minority class per condition. Splits not "
"listed here keep the original prevalence. "
"Default: val (train and test kept unbalanced).")
p.add_argument("--merge", action="store_true",
help="After generation, merge per-task JSONs into <split>.json.")
p.add_argument("--no-generate", action="store_true",
help="Skip generation; only run the merge step on existing parts.")
return p
def main() -> None:
args = build_arg_parser().parse_args()
args.output_dir.mkdir(parents=True, exist_ok=True)
icd_df = pd.read_excel(args.icd_xlsx)
icd_mapping = dict(zip(icd_df["Original Column"], icd_df["Description"]))
task_specs = {
"deterioration": (CONDITIONS_DETERIORATION, TEMPLATES["deterioration"]),
"icu": (CONDITIONS_ICU, TEMPLATES["icu"]),
"mortality": (CONDITIONS_MORTALITY, TEMPLATES["mortality"]),
"diagnose": (icd_mapping, TEMPLATES["diagnose"]),
}
balanced_set = set(args.balanced_splits)
if not args.no_generate:
for split in args.splits:
csv_path = args.data_dir / f"mds_ed_{split}.csv"
print(f"loading {csv_path}", flush=True)
df = load_split(csv_path)
balanced = split in balanced_set
print(f"split={split} balanced={balanced}", flush=True)
for task in args.tasks:
conditions, template = task_specs[task]
print(f"task={task}", flush=True)
generate_task_json(
df, conditions, template, args.output_dir,
suffix=f"{split}_predictions_{task}", balanced=balanced,
)
del df
gc.collect()
if args.merge:
for split in args.splits:
merge_split(args.output_dir, split, args.tasks)
if __name__ == "__main__":
main()