Datasets:
File size: 3,597 Bytes
5c80299 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 | from __future__ import annotations
import json
from pathlib import Path
from datasets import Features, Sequence, Value
ROOT = Path(__file__).parent
FEATURES = Features(
{
"pair_id": Value("string"),
"task_family": Value("string"),
"task_id": Value("string"),
"option_id": Value("string"),
"option_description": Value("string"),
"natural_image": Value("string"),
"tactile_image": Value("string"),
"votes_total": Value("int32"),
"positives": Value("int32"),
"negatives": Value("int32"),
"vote_fraction": Value("float32"),
"label": Value("int32"),
"source_assignments": Value("int32"),
"status_counts": Sequence(
Features({
"status": Value("string"),
"count": Value("int32"),
})
),
"used_consensus": Value("bool"),
}
)
CONFIGS = {
"full": {
"description": "All families and options with the official splits.",
"data_files": {
"train": "processed/splits/train.jsonl",
"validation": "processed/splits/val.jsonl",
"test": "processed/splits/test.jsonl",
},
},
}
for family in ["F1", "F2", "F3", "F4", "F5", "F6"]:
CONFIGS[f"family_{family.lower()}"] = {
"description": f"Family {family} only.",
"data_files": {
"train": f"processed/family_splits/{family}/train.jsonl",
"validation": f"processed/family_splits/{family}/val.jsonl",
"test": f"processed/family_splits/{family}/test.jsonl",
},
}
def count_examples(path: Path) -> int:
with path.open("r", encoding="utf-8") as handle:
return sum(1 for _ in handle)
def build_split_info(rel_path: str) -> tuple[int, int]:
path = ROOT / rel_path
num_bytes = path.stat().st_size
num_examples = count_examples(path)
return num_examples, num_bytes
def build_config_entry(name: str, data_files: dict) -> dict:
splits = {}
dataset_size = 0
for split_name, rel_path in data_files.items():
num_examples, num_bytes = build_split_info(rel_path)
dataset_size += num_bytes
splits[split_name] = {
"name": split_name,
"num_examples": num_examples,
"num_bytes": num_bytes,
}
return {
"description": CONFIGS[name]["description"],
"citation": """@misc{khan2026tactileevalstepautomatedfinegrained, title={TactileEval: A Step Towards Automated Fine-Grained Evaluation and Editing of Tactile Graphics}, author={Adnan Khan and Abbas Akkasi and Majid Komeili}, year={2026}, eprint={2604.19829}, archivePrefix={arXiv}, primaryClass={cs.CV}, url={https://arxiv.org/abs/2604.19829}}""",
"homepage": "https://tactileeval.github.io/",
"license": "mit",
"features": FEATURES.to_dict(),
"splits": splits,
"download_size": dataset_size,
"dataset_size": dataset_size,
"config_name": name,
"data_files": {
split: [{"filename": rel_path}] for split, rel_path in data_files.items()
},
"default_validation_split": "validation" if "validation" in data_files else None,
}
def main() -> None:
info = {
name: build_config_entry(name, cfg["data_files"])
for name, cfg in CONFIGS.items()
}
output_path = ROOT / "dataset_infos.json"
with output_path.open("w", encoding="utf-8") as handle:
json.dump(info, handle, indent=2)
print(f"Wrote {output_path}")
if __name__ == "__main__":
main()
|