Datasets:

Modalities:
Geospatial
Languages:
English
DOI:
Libraries:
License:
File size: 7,077 Bytes
a248601
73c6f98
7dcdc23
73c6f98
 
7dcdc23
 
73c6f98
 
7dcdc23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73c6f98
 
7dcdc23
73c6f98
a248601
73c6f98
a248601
 
 
7dcdc23
 
 
 
 
a248601
73c6f98
 
 
 
 
 
7dcdc23
73c6f98
 
 
 
 
 
7dcdc23
73c6f98
7dcdc23
73c6f98
7dcdc23
 
 
73c6f98
7dcdc23
 
 
73c6f98
 
7dcdc23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
73c6f98
 
7dcdc23
 
 
73c6f98
 
7dcdc23
 
 
 
 
a248601
7dcdc23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177

import os
import json
import datasets
import rasterio
from datetime import datetime
from tqdm import tqdm

class SSL4EOEUForest(datasets.GeneratorBasedBuilder):
    """

    SSL4EO-EU Forest Dataset Loader



    This loader supports both directory-based scanning and prebuilt index streaming via JSONL.

    It yields one sample at a time, making it compatible with Hugging Face's streaming mode.



    Each sample includes:

    - A list of image paths (one per timestamp)

    - A single mask path

    - Start and end timestamps for each image

    - Sentinel tile IDs

    - Bounding box metadata



    Bounding boxes are stored as a dictionary of arrays:

        {

            "minx": [...], "maxx": [...], "miny": [...], "maxy": [...]

        }



    This avoids redundancy and simplifies downstream parsing.

    """

    def _info(self):
        return datasets.DatasetInfo(
            description="SSL4EO-EU Forest dataset with grouped timestamps and bounding box metadata.",
            features=datasets.Features({
                "image_paths": datasets.Sequence(datasets.Value("string")),
                "mask_path": datasets.Value("string"),
                "start_timestamps": datasets.Sequence(datasets.Value("timestamp[ms]")),
                "end_timestamps": datasets.Sequence(datasets.Value("timestamp[ms]")),
                "sentinel_tile_ids": datasets.Sequence(datasets.Value("string")),
                "bboxes": datasets.Features({
                    "minx": datasets.Sequence(datasets.Value("float32")),
                    "maxx": datasets.Sequence(datasets.Value("float32")),
                    "miny": datasets.Sequence(datasets.Value("float32")),
                    "maxy": datasets.Sequence(datasets.Value("float32")),
                })
            }),
            citation="""@misc{ssl4eo_eu_forest,

  author = {Nassim Ait Ali Braham and Conrad M Albrecht},

  title = {SSL4EO-EU Forest Dataset},

  year = {2025},

  howpublished = {https://huggingface.co/datasets/dm4eo/ssl4eo-eu-forest},

  note = {Funded by the EvoLand project under EU Horizon Europe grant No. 101082130.}

}""",
            homepage="https://www.evo-land.eu",
            license="CC-BY-4.0",
        )

    def _split_generators(self, dl_manager):
        use_index = os.environ.get("HF_DATASET_USE_INDEX", "false").lower() == "true"
        use_local = os.environ.get("HF_DATASET_LOCAL", "false").lower() == "true"
        root = os.path.abspath(".") if use_local else dl_manager.download_and_extract(".")

        if use_index:
            index_path = os.path.join(root, "index.jsonl")
            return [datasets.SplitGenerator(name="all", gen_kwargs={"index_path": index_path})]
        else:
            images_dir = os.path.join(root, "images")
            masks_dir = os.path.join(root, "masks")
            return [datasets.SplitGenerator(name="all", gen_kwargs={
                "images_dir": images_dir,
                "masks_dir": masks_dir
            })]

    def _generate_examples(self, index_path=None, images_dir=None, masks_dir=None):
        if index_path:
            with open(index_path, "r") as f:
                for key, line in enumerate(f):
                    entry = json.loads(line)
                    entry["start_timestamps"] = [datetime.fromisoformat(ts) for ts in entry["start_timestamps"]]
                    entry["end_timestamps"] = [datetime.fromisoformat(ts) for ts in entry["end_timestamps"]]
                    yield key, entry
        else:
            sample_ids = sorted(os.listdir(images_dir))
            key = 0
            for sample_id in sample_ids:
                sample = self._parse_sample(sample_id, images_dir, masks_dir)
                if sample:
                    yield key, sample
                    key += 1

    @staticmethod
    def _parse_sample(sample_id, images_dir, masks_dir):
        """

        Parses a single sample directory and returns a dictionary with all metadata.



        Returns None if the sample is incomplete or malformed.

        """
        sample_path = os.path.join(images_dir, sample_id)
        if not os.path.isdir(sample_path):
            return None

        mask_path = os.path.join(masks_dir, sample_id, "mask.tif")
        if not os.path.exists(mask_path):
            return None

        image_paths, start_ts, end_ts, tile_ids = [], [], [], []
        minx_list, maxx_list, miny_list, maxy_list = [], [], [], []

        for ts in sorted(os.listdir(sample_path)):
            parts = ts.split("_")
            if len(parts) != 3:
                continue
            try:
                start = datetime.strptime(parts[0], "%Y%m%dT%H%M%S")
                end = datetime.strptime(parts[1], "%Y%m%dT%H%M%S")
            except ValueError:
                continue

            tile_id = parts[2]
            image_path = os.path.join(sample_path, ts, "all_bands.tif")
            if not os.path.exists(image_path):
                continue

            try:
                with rasterio.open(image_path) as src:
                    bounds = src.bounds
            except Exception:
                continue

            image_paths.append(image_path)
            start_ts.append(start)
            end_ts.append(end)
            tile_ids.append(tile_id)
            minx_list.append(bounds.left)
            maxx_list.append(bounds.right)
            miny_list.append(bounds.bottom)
            maxy_list.append(bounds.top)

        if not image_paths:
            return None

        return {
            "image_paths": image_paths,
            "mask_path": mask_path,
            "start_timestamps": start_ts,
            "end_timestamps": end_ts,
            "sentinel_tile_ids": tile_ids,
            "bboxes": {
                "minx": minx_list,
                "maxx": maxx_list,
                "miny": miny_list,
                "maxy": maxy_list
            }
        }

    @classmethod
    def generate_index(cls, dataset_dir, output_path="index.jsonl"):
        """

        Scans the dataset directory and writes a streaming-friendly index.jsonl file.



        Each line is a complete sample in JSON format.

        """
        images_dir = os.path.join(dataset_dir, "images")
        masks_dir = os.path.join(dataset_dir, "masks")
        sample_ids = sorted(os.listdir(images_dir))

        with open(output_path, "w") as f:
            for sample_id in tqdm(sample_ids, desc="Generating index", unit="sample"):
                sample = cls._parse_sample(sample_id, images_dir, masks_dir)
                if sample:
                    sample["start_timestamps"] = [ts.isoformat() for ts in sample["start_timestamps"]]
                    sample["end_timestamps"] = [ts.isoformat() for ts in sample["end_timestamps"]]
                    sample["sample_id"] = sample_id
                    f.write(json.dumps(sample) + "\n")

        print(f"✅ Index written to {output_path}")