jialucode commited on
Commit
8877bb1
·
verified ·
1 Parent(s): 0614d84

Upload prepare_mds_ed_dataset.py

Browse files
Files changed (1) hide show
  1. prepare_mds_ed_dataset.py +275 -0
prepare_mds_ed_dataset.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Prepare the UniPACT MDS-ED prompted dataset.
2
+
3
+ Reads the MDS-ED tabular splits and emits, for each (split, task family),
4
+ a JSON array of prompted Yes/No conversation samples paired with an ECG
5
+ reference. Optionally merges the per-task arrays of one split into a single
6
+ ``<split>.json``.
7
+ """
8
+
9
+ from __future__ import annotations
10
+
11
+ import argparse
12
+ import gc
13
+ import json
14
+ import os
15
+ from pathlib import Path
16
+ from typing import Iterable, Iterator
17
+
18
+ import pandas as pd
19
+
20
+
21
+ CONDITIONS_DETERIORATION = {
22
+ "deterioration_severe_hypoxemia": "experience severe hypoxemia",
23
+ "deterioration_ecmo": "require ECMO (extracorporeal membrane oxygenation)",
24
+ "deterioration_vasopressors": "require vasopressors",
25
+ "deterioration_inotropes": "require inotropes",
26
+ "deterioration_mechanical_ventilation": "require mechanical ventilation",
27
+ "deterioration_cardiac_arrest": "experience cardiac arrest",
28
+ }
29
+
30
+ CONDITIONS_ICU = {
31
+ "deterioration_icu_24h": "require ICU admission within the next 24 hours",
32
+ "deterioration_icu_stay": "require ICU admission during this hospital stay",
33
+ }
34
+
35
+ CONDITIONS_MORTALITY = {
36
+ "deterioration_mortality_1d": "die within 24 hours",
37
+ "deterioration_mortality_7d": "die within 7 days",
38
+ "deterioration_mortality_28d": "die within 28 days",
39
+ "deterioration_mortality_90d": "die within 90 days",
40
+ "deterioration_mortality_180d": "die within 180 days",
41
+ "deterioration_mortality_365d": "die within 365 days",
42
+ "deterioration_mortality_stay": "die during the hospital stay",
43
+ }
44
+
45
+ TEMPLATES = {
46
+ "deterioration": (
47
+ 'You are a cardiologist. Your task is "to predict whether a patient will experience clinical deterioration" '
48
+ "based on the provided ECG and Electronic Health Record (EHR) data. "
49
+ "{EHR} "
50
+ 'Will the patient "{condition}"? Answer strictly with Yes or No.'
51
+ ),
52
+ "icu": (
53
+ 'You are a cardiologist. Your task is "to predict whether a patient will require ICU admission" '
54
+ "based on the provided ECG and Electronic Health Record (EHR) data. "
55
+ "{EHR} "
56
+ 'Will the patient "{condition}"? Answer strictly with Yes or No.'
57
+ ),
58
+ "mortality": (
59
+ 'You are a cardiologist. Your task is "to predict whether a patient will experience mortality" '
60
+ "based on the provided ECG and Electronic Health Record (EHR) data. "
61
+ "{EHR} "
62
+ 'Will the patient "{condition}"? Answer strictly with Yes or No.'
63
+ ),
64
+ "diagnose": (
65
+ 'You are a cardiologist. Your task is "to predict the correct ICD-10 diagnosis code" '
66
+ "based on the provided ECG and Electronic Health Record (EHR) data. "
67
+ "{EHR} "
68
+ 'Will the patient be diagnosed with "{condition}"? Answer strictly with Yes or No.'
69
+ ),
70
+ }
71
+
72
+ GENDER_MAP = {0: "female", 1: "male"}
73
+
74
+ DECIMAL_COLUMNS = [
75
+ "biometrics_bmi", "biometrics_weight", "biometrics_height",
76
+ "vitals_temperature_mean", "vitals_heartrate_mean", "vitals_resprate_mean",
77
+ "vitals_o2sat_mean", "vitals_sbp_mean", "vitals_dbp_mean",
78
+ ]
79
+
80
+ VITAL_FIELDS = (
81
+ "vitals_temperature_mean", "vitals_heartrate_mean",
82
+ "vitals_resprate_mean", "vitals_o2sat_mean",
83
+ "vitals_sbp_mean", "vitals_dbp_mean", "vitals_acuity",
84
+ )
85
+
86
+ BIOMETRIC_FIELDS = ("biometrics_bmi", "biometrics_weight", "biometrics_height")
87
+
88
+ SPLITS = ("train", "val", "test")
89
+ TASKS = ("deterioration", "icu", "mortality", "diagnose")
90
+ RANDOM_STATE = 42
91
+
92
+
93
+ def load_split(csv_path: Path) -> pd.DataFrame:
94
+ """Load a split CSV, decode gender, and round floating-point columns."""
95
+ df = pd.read_csv(csv_path, low_memory=False)
96
+ df["demographics_gender"] = df["demographics_gender"].map(GENDER_MAP)
97
+ df[DECIMAL_COLUMNS] = df[DECIMAL_COLUMNS].round(1)
98
+ return df
99
+
100
+
101
+ def render_ehr_text(row: pd.Series) -> str:
102
+ """Compose a single-paragraph EHR description from one row."""
103
+ demo = []
104
+ if not pd.isna(row.get("demographics_age")):
105
+ demo.append(f"{row['demographics_age']} year-old")
106
+ if not pd.isna(row.get("general_race")):
107
+ demo.append(row["general_race"].replace("/", " ").replace(" - ", " "))
108
+ if not pd.isna(row.get("demographics_gender")):
109
+ demo.append(row["demographics_gender"])
110
+ bio = [
111
+ f"{c.replace('biometrics_', '').replace('_', ' ')} {row[c]}"
112
+ for c in BIOMETRIC_FIELDS if not pd.isna(row.get(c))
113
+ ]
114
+ vital = [
115
+ f"{c.replace('vitals_', '').replace('_mean', '').replace('_', ' ')} {row[c]}"
116
+ for c in VITAL_FIELDS if not pd.isna(row.get(c))
117
+ ]
118
+ sentences = [
119
+ "The demographics information, " + ", ".join(demo) + "." if demo else "",
120
+ "The biometrics information, " + ", ".join(bio) + "." if bio else "",
121
+ "The vital parameters, " + ", ".join(vital) + "." if vital else "",
122
+ ]
123
+ return " ".join(s for s in sentences if s).strip()
124
+
125
+
126
+ def generate_task_json(
127
+ df: pd.DataFrame,
128
+ conditions: dict[str, str],
129
+ template: str,
130
+ output_dir: Path,
131
+ suffix: str,
132
+ balanced: bool,
133
+ ) -> None:
134
+ """Emit one task-family JSON for one split.
135
+
136
+ If ``balanced`` is True, the majority class is downsampled per condition
137
+ so that Yes and No counts match. Otherwise both classes are kept intact.
138
+ """
139
+ out_records: list[dict] = []
140
+ for col, desc in conditions.items():
141
+ yes_rows = df[df[col] == 1]
142
+ no_rows = df[df[col] == 0]
143
+ if balanced:
144
+ n = min(len(yes_rows), len(no_rows))
145
+ yes_sample = yes_rows.sample(n=n, random_state=RANDOM_STATE) if len(yes_rows) > n else yes_rows
146
+ no_sample = no_rows.sample(n=n, random_state=RANDOM_STATE) if len(no_rows) > n else no_rows
147
+ else:
148
+ yes_sample, no_sample = yes_rows, no_rows
149
+ chunk = (
150
+ pd.concat([yes_sample, no_sample])
151
+ .sample(frac=1, random_state=RANDOM_STATE)
152
+ .reset_index(drop=True)
153
+ )
154
+ n_yes = n_no = 0
155
+ for idx, row in chunk.iterrows():
156
+ answer = "Yes" if row[col] == 1 else "No"
157
+ n_yes += answer == "Yes"
158
+ n_no += answer == "No"
159
+ question = template.format(condition=desc, EHR=render_ehr_text(row))
160
+ out_records.append({
161
+ "id": f"{col}_{idx}",
162
+ "ecg": row.get("general_file_name", ""),
163
+ "conversations": [
164
+ {"from": "human", "value": f"<ecg> {question}"},
165
+ {"from": "gpt", "value": answer},
166
+ ],
167
+ })
168
+ print(f" {col}: Yes={n_yes}, No={n_no}", flush=True)
169
+
170
+ out_path = output_dir / f"all_tasks_{suffix}.json"
171
+ print(f" -> {out_path}", flush=True)
172
+ with out_path.open("w", encoding="utf-8") as f:
173
+ json.dump(out_records, f, separators=(",", ":"))
174
+ gc.collect()
175
+
176
+
177
+ def stream_records(path: Path) -> Iterator[dict]:
178
+ """Yield items from a JSON-array file. Uses ijson if available."""
179
+ try:
180
+ import ijson # type: ignore
181
+ except ModuleNotFoundError:
182
+ with path.open("r", encoding="utf-8") as f:
183
+ yield from json.load(f)
184
+ return
185
+ with path.open("rb") as f:
186
+ yield from ijson.items(f, "item")
187
+
188
+
189
+ def merge_split(output_dir: Path, split: str, tasks: Iterable[str]) -> None:
190
+ """Concatenate per-task JSONs of one split into ``<split>.json`` (streamed)."""
191
+ out_path = output_dir / f"{split}.json"
192
+ print(f"merging {split} -> {out_path}", flush=True)
193
+ n = 0
194
+ with out_path.open("w", encoding="utf-8") as out:
195
+ out.write("[")
196
+ first = True
197
+ for task in tasks:
198
+ part = output_dir / f"all_tasks_{split}_predictions_{task}.json"
199
+ if not part.exists():
200
+ print(f" skip missing {part}", flush=True)
201
+ continue
202
+ for record in stream_records(part):
203
+ if not first:
204
+ out.write(",")
205
+ out.write(json.dumps(record, separators=(",", ":")))
206
+ first = False
207
+ n += 1
208
+ out.write("]")
209
+ print(f" merged {n} samples", flush=True)
210
+
211
+
212
+ def build_arg_parser() -> argparse.ArgumentParser:
213
+ p = argparse.ArgumentParser(description=__doc__.splitlines()[0])
214
+ p.add_argument("--data-dir", type=Path, required=True,
215
+ help="Directory containing mds_ed_{train,val,test}.csv.")
216
+ p.add_argument("--output-dir", type=Path, required=True,
217
+ help="Output directory for per-task and merged JSONs.")
218
+ p.add_argument("--icd-xlsx", type=Path, required=True,
219
+ help="Excel file with columns 'Original Column' and 'Description'.")
220
+ p.add_argument("--splits", nargs="+", choices=SPLITS, default=list(SPLITS),
221
+ help="Which splits to process. Default: all.")
222
+ p.add_argument("--tasks", nargs="+", choices=TASKS, default=list(TASKS),
223
+ help="Which task families to generate. Default: all.")
224
+ p.add_argument("--balanced-splits", nargs="*", choices=SPLITS,
225
+ default=["train", "val"],
226
+ help="Splits whose majority class should be downsampled to "
227
+ "match the minority class per condition. Splits not "
228
+ "listed here keep the original prevalence. "
229
+ "Default: train val (test kept unbalanced).")
230
+ p.add_argument("--merge", action="store_true",
231
+ help="After generation, merge per-task JSONs into <split>.json.")
232
+ p.add_argument("--no-generate", action="store_true",
233
+ help="Skip generation; only run the merge step on existing parts.")
234
+ return p
235
+
236
+
237
+ def main() -> None:
238
+ args = build_arg_parser().parse_args()
239
+ args.output_dir.mkdir(parents=True, exist_ok=True)
240
+
241
+ icd_df = pd.read_excel(args.icd_xlsx)
242
+ icd_mapping = dict(zip(icd_df["Original Column"], icd_df["Description"]))
243
+
244
+ task_specs = {
245
+ "deterioration": (CONDITIONS_DETERIORATION, TEMPLATES["deterioration"]),
246
+ "icu": (CONDITIONS_ICU, TEMPLATES["icu"]),
247
+ "mortality": (CONDITIONS_MORTALITY, TEMPLATES["mortality"]),
248
+ "diagnose": (icd_mapping, TEMPLATES["diagnose"]),
249
+ }
250
+ balanced_set = set(args.balanced_splits)
251
+
252
+ if not args.no_generate:
253
+ for split in args.splits:
254
+ csv_path = args.data_dir / f"mds_ed_{split}.csv"
255
+ print(f"loading {csv_path}", flush=True)
256
+ df = load_split(csv_path)
257
+ balanced = split in balanced_set
258
+ print(f"split={split} balanced={balanced}", flush=True)
259
+ for task in args.tasks:
260
+ conditions, template = task_specs[task]
261
+ print(f"task={task}", flush=True)
262
+ generate_task_json(
263
+ df, conditions, template, args.output_dir,
264
+ suffix=f"{split}_predictions_{task}", balanced=balanced,
265
+ )
266
+ del df
267
+ gc.collect()
268
+
269
+ if args.merge:
270
+ for split in args.splits:
271
+ merge_split(args.output_dir, split, args.tasks)
272
+
273
+
274
+ if __name__ == "__main__":
275
+ main()