Sample-CSV / make_acc_splits.py
Yfeng0216's picture
Upload folder using huggingface_hub
a8a75c9 verified
import argparse
import os
import re
import random
TCGA_ID_RE = re.compile(r"(TCGA-[A-Z0-9]{2}-[A-Z0-9]+)")
DX_TOKEN_RE = re.compile(r"(?:^|-)DX\d*(?=[\-.]|$)")
TS_TOKEN_RE = re.compile(r"(?:^|-)TS[A-Za-z0-9]*(?=[\-.]|$)")
def extract_case_id(name):
m = TCGA_ID_RE.search(name)
return m.group(1) if m else None
def is_dx_item(name):
if TS_TOKEN_RE.search(name):
return False
return DX_TOKEN_RE.search(name) is not None
def collect_features(path):
case_to_items = {}
for n in os.listdir(path):
if n.startswith('.'):
continue
if not n.endswith('.pt'):
continue
if not is_dx_item(n):
continue
cid = extract_case_id(n)
if not cid:
continue
case_to_items.setdefault(cid, []).append(n)
for k in list(case_to_items.keys()):
case_to_items[k].sort()
return case_to_items
def collect_label_cases(path):
cases = set()
for n in os.listdir(path):
if n.startswith('.'):
continue
p = os.path.join(path, n)
if not os.path.isdir(p):
continue
cid = extract_case_id(n)
if cid:
cases.add(cid)
return cases
def write_csv(train_items, val_items, test_items, out_path):
max_len = max(len(train_items), len(val_items), len(test_items))
with open(out_path, 'w', encoding='utf-8') as f:
f.write(',train,val,test\n')
for i in range(max_len):
t = train_items[i] if i < len(train_items) else ''
v = val_items[i] if i < len(val_items) else ''
te = test_items[i] if i < len(test_items) else ''
t = os.path.splitext(t)[0]
v = os.path.splitext(v)[0]
te = os.path.splitext(te)[0]
f.write(f"{i},{t},{v},{te}\n")
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--feature_source', type=str, default='/mnt/datadisk0/TCGA_pt/MESO_UNI')
ap.add_argument('--label_source', type=str, default='/mnt/datadisk0/datasets/TCGA-MESO')
ap.add_argument('--output_csv', type=str, default='/mnt/datadisk0/BiGen/ocr/dataset_csv/splits_MESO.csv')
ap.add_argument('--seed', type=int, default=42)
args = ap.parse_args()
features_map = collect_features(args.feature_source)
label_cases = collect_label_cases(args.label_source)
common_cases = sorted(set(features_map.keys()) & label_cases)
random.seed(args.seed)
random.shuffle(common_cases)
total = len(common_cases)
n_train = int(total * 0.8)
n_val = int(total * 0.1)
n_test = total - n_train - n_val
train_cases = common_cases[:n_train]
val_cases = common_cases[n_train:n_train + n_val]
test_cases = common_cases[n_train + n_val:]
train_items = []
val_items = []
test_items = []
for c in train_cases:
train_items.extend(features_map.get(c, []))
for c in val_cases:
val_items.extend(features_map.get(c, []))
for c in test_cases:
test_items.extend(features_map.get(c, []))
write_csv(train_items, val_items, test_items, args.output_csv)
print('Feature source:', args.feature_source)
print('Label source:', args.label_source)
print('Common cases:', total)
print('Train cases:', len(train_cases))
print('Val cases:', len(val_cases))
print('Test cases:', len(test_cases))
print('Train items:', len(train_items))
print('Val items:', len(val_items))
print('Test items:', len(test_items))
print('Output CSV:', args.output_csv)
if __name__ == '__main__':
main()