| import datasets |
| import numpy as np |
| import os |
|
|
| class STARDataset(datasets.GeneratorBasedBuilder): |
| """STAR Dataset with NPY file loading""" |
| |
| VERSION = datasets.Version("1.0.0") |
| |
| def _info(self): |
| return datasets.DatasetInfo( |
| features=datasets.Features({ |
| "id": datasets.Value("int32"), |
| "hr_path": datasets.Value("string"), |
| "lr_path": datasets.Value("string"), |
| "split": datasets.Value("string"), |
| }) |
| ) |
| |
| def _split_generators(self, dl_manager): |
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, |
| gen_kwargs={"filepath": "sampled_data/x2/train_metadata.jsonl"}, |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.VALIDATION, |
| gen_kwargs={"filepath": "sampled_data/x2/validation_metadata.jsonl"}, |
| ), |
| ] |
| |
| def _generate_examples(self, filepath): |
| import json |
| with open(filepath, 'r') as f: |
| for idx, line in enumerate(f): |
| yield idx, json.loads(line) |