import os import json import datasets import rasterio from datetime import datetime from tqdm import tqdm class SSL4EOEUForest(datasets.GeneratorBasedBuilder): """ SSL4EO-EU Forest Dataset Loader This loader supports both directory-based scanning and prebuilt index streaming via JSONL. It yields one sample at a time, making it compatible with Hugging Face's streaming mode. Each sample includes: - A list of image paths (one per timestamp) - A single mask path - Start and end timestamps for each image - Sentinel tile IDs - Bounding box metadata Bounding boxes are stored as a dictionary of arrays: { "minx": [...], "maxx": [...], "miny": [...], "maxy": [...] } This avoids redundancy and simplifies downstream parsing. """ def _info(self): return datasets.DatasetInfo( description="SSL4EO-EU Forest dataset with grouped timestamps and bounding box metadata.", features=datasets.Features({ "image_paths": datasets.Sequence(datasets.Value("string")), "mask_path": datasets.Value("string"), "start_timestamps": datasets.Sequence(datasets.Value("timestamp[ms]")), "end_timestamps": datasets.Sequence(datasets.Value("timestamp[ms]")), "sentinel_tile_ids": datasets.Sequence(datasets.Value("string")), "bboxes": datasets.Features({ "minx": datasets.Sequence(datasets.Value("float32")), "maxx": datasets.Sequence(datasets.Value("float32")), "miny": datasets.Sequence(datasets.Value("float32")), "maxy": datasets.Sequence(datasets.Value("float32")), }) }), citation="""@misc{ssl4eo_eu_forest, author = {Nassim Ait Ali Braham and Conrad M Albrecht}, title = {SSL4EO-EU Forest Dataset}, year = {2025}, howpublished = {https://huggingface.co/datasets/dm4eo/ssl4eo-eu-forest}, note = {Funded by the EvoLand project under EU Horizon Europe grant No. 101082130.} }""", homepage="https://www.evo-land.eu", license="CC-BY-4.0", ) def _split_generators(self, dl_manager): use_index = os.environ.get("HF_DATASET_USE_INDEX", "false").lower() == "true" use_local = os.environ.get("HF_DATASET_LOCAL", "false").lower() == "true" root = os.path.abspath(".") if use_local else dl_manager.download_and_extract(".") if use_index: index_path = os.path.join(root, "index.jsonl") return [datasets.SplitGenerator(name="all", gen_kwargs={"index_path": index_path})] else: images_dir = os.path.join(root, "images") masks_dir = os.path.join(root, "masks") return [datasets.SplitGenerator(name="all", gen_kwargs={ "images_dir": images_dir, "masks_dir": masks_dir })] def _generate_examples(self, index_path=None, images_dir=None, masks_dir=None): if index_path: with open(index_path, "r") as f: for key, line in enumerate(f): entry = json.loads(line) entry["start_timestamps"] = [datetime.fromisoformat(ts) for ts in entry["start_timestamps"]] entry["end_timestamps"] = [datetime.fromisoformat(ts) for ts in entry["end_timestamps"]] yield key, entry else: sample_ids = sorted(os.listdir(images_dir)) key = 0 for sample_id in sample_ids: sample = self._parse_sample(sample_id, images_dir, masks_dir) if sample: yield key, sample key += 1 @staticmethod def _parse_sample(sample_id, images_dir, masks_dir): """ Parses a single sample directory and returns a dictionary with all metadata. Returns None if the sample is incomplete or malformed. """ sample_path = os.path.join(images_dir, sample_id) if not os.path.isdir(sample_path): return None mask_path = os.path.join(masks_dir, sample_id, "mask.tif") if not os.path.exists(mask_path): return None image_paths, start_ts, end_ts, tile_ids = [], [], [], [] minx_list, maxx_list, miny_list, maxy_list = [], [], [], [] for ts in sorted(os.listdir(sample_path)): parts = ts.split("_") if len(parts) != 3: continue try: start = datetime.strptime(parts[0], "%Y%m%dT%H%M%S") end = datetime.strptime(parts[1], "%Y%m%dT%H%M%S") except ValueError: continue tile_id = parts[2] image_path = os.path.join(sample_path, ts, "all_bands.tif") if not os.path.exists(image_path): continue try: with rasterio.open(image_path) as src: bounds = src.bounds except Exception: continue image_paths.append(image_path) start_ts.append(start) end_ts.append(end) tile_ids.append(tile_id) minx_list.append(bounds.left) maxx_list.append(bounds.right) miny_list.append(bounds.bottom) maxy_list.append(bounds.top) if not image_paths: return None return { "image_paths": image_paths, "mask_path": mask_path, "start_timestamps": start_ts, "end_timestamps": end_ts, "sentinel_tile_ids": tile_ids, "bboxes": { "minx": minx_list, "maxx": maxx_list, "miny": miny_list, "maxy": maxy_list } } @classmethod def generate_index(cls, dataset_dir, output_path="index.jsonl"): """ Scans the dataset directory and writes a streaming-friendly index.jsonl file. Each line is a complete sample in JSON format. """ images_dir = os.path.join(dataset_dir, "images") masks_dir = os.path.join(dataset_dir, "masks") sample_ids = sorted(os.listdir(images_dir)) with open(output_path, "w") as f: for sample_id in tqdm(sample_ids, desc="Generating index", unit="sample"): sample = cls._parse_sample(sample_id, images_dir, masks_dir) if sample: sample["start_timestamps"] = [ts.isoformat() for ts in sample["start_timestamps"]] sample["end_timestamps"] = [ts.isoformat() for ts in sample["end_timestamps"]] sample["sample_id"] = sample_id f.write(json.dumps(sample) + "\n") print(f"✅ Index written to {output_path}")