MedRCube / MedRCube.py
Flmc's picture
Add files using upload-large-folder tool
d0059a3 verified
import json
import os
from typing import Any, Dict, Iterator, Tuple
import datasets
from datasets import Features, Image, Value
_DESCRIPTION = """MedRCube: A Multidimensional Framework for Fine-Grained and In-Depth Evaluation of MLLMs in Medical Imaging.
This loader discovers all `test.json` files under the dataset directory and emits a single `test` split.
Notes:
- Each example is multiple-choice VQA with 4 options (A-D).
- For license-restricted sources, images are not redistributed; those examples have `restricted=True` and `image=None`.
"""
class MedRCube(datasets.GeneratorBasedBuilder):
VERSION = datasets.Version("1.0.0")
def _info(self) -> datasets.DatasetInfo:
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=Features(
{
"id": Value("string"),
"dataset": Value("string"),
"image": Image(),
"image_path": Value("string"),
"restricted": Value("bool"),
"original_task": Value("string"),
"task": Value("string"),
"question": Value("string"),
"option_A": Value("string"),
"option_B": Value("string"),
"option_C": Value("string"),
"option_D": Value("string"),
"gt_answer": Value("string"),
"correct_index": Value("int32"),
"parts": Value("string"),
"modality": Value("string"),
# kept as unstructured JSON-compatible object
"metadata": datasets.Value("string"),
}
),
supervised_keys=None,
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
# In an HF snapshot, the data files live in the same repo root.
# This script should be placed at the repo root (or in a folder that still
# shares the same root as the dataset directories).
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"data_dir": repo_root},
)
]
def _generate_examples(self, data_dir: str) -> Iterator[Tuple[int, Dict[str, Any]]]:
idx = 0
for root, _, files in os.walk(data_dir):
if "test.json" not in files:
continue
json_path = os.path.join(root, "test.json")
with open(json_path, "r", encoding="utf-8") as f:
items = json.load(f)
if not isinstance(items, list):
continue
for item in items:
image_path = item.get("image_path") or ""
restricted = bool(item.get("restricted", False))
abs_image_path = ""
if image_path:
abs_image_path = (
os.path.normpath(os.path.join(root, image_path))
if not os.path.isabs(image_path)
else image_path
)
image = None
if abs_image_path and (not restricted) and os.path.exists(abs_image_path):
image = abs_image_path
ex = dict(item)
ex["image_path"] = abs_image_path
ex["restricted"] = restricted
ex["image"] = image
ex["metadata"] = json.dumps(ex.get("metadata", {}), ensure_ascii=False)
yield idx, ex
idx += 1