Datasets:

Modalities:
Geospatial
Languages:
English
DOI:
Libraries:
License:
cmalbrec commited on
Commit
b7d4966
·
verified ·
1 Parent(s): e7271b2

test new custom dataset loader for streaming

Browse files
Files changed (1) hide show
  1. dataset.py +35 -159
dataset.py CHANGED
@@ -1,176 +1,52 @@
1
 
2
  import os
3
- import json
4
  import datasets
5
- import rasterio
6
- from datetime import datetime
7
- from tqdm import tqdm
8
-
9
- class SSL4EOEUForest(datasets.GeneratorBasedBuilder):
10
- """
11
- SSL4EO-EU Forest Dataset Loader
12
-
13
- This loader supports both directory-based scanning and prebuilt index streaming via JSONL.
14
- It yields one sample at a time, making it compatible with Hugging Face's streaming mode.
15
-
16
- Each sample includes:
17
- - A list of image paths (one per timestamp)
18
- - A single mask path
19
- - Start and end timestamps for each image
20
- - Sentinel tile IDs
21
- - Bounding box metadata
22
-
23
- Bounding boxes are stored as a dictionary of arrays:
24
- {
25
- "minx": [...], "maxx": [...], "miny": [...], "maxy": [...]
26
- }
27
-
28
- This avoids redundancy and simplifies downstream parsing.
29
- """
30
 
 
31
  def _info(self):
32
  return datasets.DatasetInfo(
33
- description="SSL4EO-EU Forest dataset with grouped timestamps and bounding box metadata.",
34
  features=datasets.Features({
35
- "image_paths": datasets.Sequence(datasets.Value("string")),
36
- "mask_path": datasets.Value("string"),
37
- "start_timestamps": datasets.Sequence(datasets.Value("timestamp[ms]")),
38
- "end_timestamps": datasets.Sequence(datasets.Value("timestamp[ms]")),
39
- "sentinel_tile_ids": datasets.Sequence(datasets.Value("string")),
40
- "bboxes": datasets.Features({
41
- "minx": datasets.Sequence(datasets.Value("float32")),
42
- "maxx": datasets.Sequence(datasets.Value("float32")),
43
- "miny": datasets.Sequence(datasets.Value("float32")),
44
- "maxy": datasets.Sequence(datasets.Value("float32")),
45
- })
46
  }),
47
- citation="""@misc{ssl4eo_eu_forest,
48
- author = {Nassim Ait Ali Braham and Conrad M Albrecht},
49
- title = {SSL4EO-EU Forest Dataset},
50
- year = {2025},
51
- howpublished = {https://huggingface.co/datasets/dm4eo/ssl4eo-eu-forest},
52
- note = {Funded by the EvoLand project under EU Horizon Europe grant No. 101082130.}
53
- }""",
54
- homepage="https://www.evo-land.eu",
55
- license="CC-BY-4.0",
56
  )
57
 
58
  def _split_generators(self, dl_manager):
59
- use_index = os.environ.get("HF_DATASET_USE_INDEX", "false").lower() == "true"
60
- use_local = os.environ.get("HF_DATASET_LOCAL", "false").lower() == "true"
61
- root = os.path.abspath(".") if use_local else dl_manager.download_and_extract(".")
62
-
63
- if use_index:
64
- index_path = os.path.join(root, "index.jsonl")
65
- return [datasets.SplitGenerator(name="all", gen_kwargs={"index_path": index_path})]
66
- else:
67
- images_dir = os.path.join(root, "images")
68
- masks_dir = os.path.join(root, "masks")
69
- return [datasets.SplitGenerator(name="all", gen_kwargs={
70
- "images_dir": images_dir,
71
- "masks_dir": masks_dir
72
- })]
73
-
74
- def _generate_examples(self, index_path=None, images_dir=None, masks_dir=None):
75
- if index_path:
76
- with open(index_path, "r") as f:
77
- for key, line in enumerate(f):
78
- entry = json.loads(line)
79
- entry["start_timestamps"] = [datetime.fromisoformat(ts) for ts in entry["start_timestamps"]]
80
- entry["end_timestamps"] = [datetime.fromisoformat(ts) for ts in entry["end_timestamps"]]
81
- yield key, entry
82
- else:
83
- sample_ids = sorted(os.listdir(images_dir))
84
- key = 0
85
- for sample_id in sample_ids:
86
- sample = self._parse_sample(sample_id, images_dir, masks_dir)
87
- if sample:
88
- yield key, sample
89
- key += 1
90
-
91
- @staticmethod
92
- def _parse_sample(sample_id, images_dir, masks_dir):
93
- """
94
- Parses a single sample directory and returns a dictionary with all metadata.
95
-
96
- Returns None if the sample is incomplete or malformed.
97
- """
98
- sample_path = os.path.join(images_dir, sample_id)
99
- if not os.path.isdir(sample_path):
100
- return None
101
-
102
- mask_path = os.path.join(masks_dir, sample_id, "mask.tif")
103
- if not os.path.exists(mask_path):
104
- return None
105
-
106
- image_paths, start_ts, end_ts, tile_ids = [], [], [], []
107
- minx_list, maxx_list, miny_list, maxy_list = [], [], [], []
108
-
109
- for ts in sorted(os.listdir(sample_path)):
110
- parts = ts.split("_")
111
- if len(parts) != 3:
112
- continue
113
- try:
114
- start = datetime.strptime(parts[0], "%Y%m%dT%H%M%S")
115
- end = datetime.strptime(parts[1], "%Y%m%dT%H%M%S")
116
- except ValueError:
117
  continue
118
 
119
- tile_id = parts[2]
120
- image_path = os.path.join(sample_path, ts, "all_bands.tif")
121
- if not os.path.exists(image_path):
122
- continue
123
 
124
- try:
125
- with rasterio.open(image_path) as src:
126
- bounds = src.bounds
127
- except Exception:
128
  continue
129
 
130
- image_paths.append(image_path)
131
- start_ts.append(start)
132
- end_ts.append(end)
133
- tile_ids.append(tile_id)
134
- minx_list.append(bounds.left)
135
- maxx_list.append(bounds.right)
136
- miny_list.append(bounds.bottom)
137
- maxy_list.append(bounds.top)
138
-
139
- if not image_paths:
140
- return None
141
-
142
- return {
143
- "image_paths": image_paths,
144
- "mask_path": mask_path,
145
- "start_timestamps": start_ts,
146
- "end_timestamps": end_ts,
147
- "sentinel_tile_ids": tile_ids,
148
- "bboxes": {
149
- "minx": minx_list,
150
- "maxx": maxx_list,
151
- "miny": miny_list,
152
- "maxy": maxy_list
153
  }
154
- }
155
-
156
- @classmethod
157
- def generate_index(cls, dataset_dir, output_path="index.jsonl"):
158
- """
159
- Scans the dataset directory and writes a streaming-friendly index.jsonl file.
160
-
161
- Each line is a complete sample in JSON format.
162
- """
163
- images_dir = os.path.join(dataset_dir, "images")
164
- masks_dir = os.path.join(dataset_dir, "masks")
165
- sample_ids = sorted(os.listdir(images_dir))
166
-
167
- with open(output_path, "w") as f:
168
- for sample_id in tqdm(sample_ids, desc="Generating index", unit="sample"):
169
- sample = cls._parse_sample(sample_id, images_dir, masks_dir)
170
- if sample:
171
- sample["start_timestamps"] = [ts.isoformat() for ts in sample["start_timestamps"]]
172
- sample["end_timestamps"] = [ts.isoformat() for ts in sample["end_timestamps"]]
173
- sample["sample_id"] = sample_id
174
- f.write(json.dumps(sample) + "\n")
175
-
176
- print(f"✅ Index written to {output_path}")
 
1
 
2
  import os
 
3
  import datasets
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
+ class MyDataset(datasets.GeneratorBasedBuilder):
6
  def _info(self):
7
  return datasets.DatasetInfo(
 
8
  features=datasets.Features({
9
+ "sample_id": datasets.Value("string"),
10
+ "image": datasets.Image(), # or datasets.Value("string") if you want paths
11
+ "mask": datasets.Image(),
 
 
 
 
 
 
 
 
12
  }),
 
 
 
 
 
 
 
 
 
13
  )
14
 
15
  def _split_generators(self, dl_manager):
16
+ # Streaming mode: use iter_files to avoid downloading everything
17
+ image_paths = dl_manager.iter_files("images")
18
+ mask_paths = dl_manager.iter_files("masks")
19
+
20
+ return [
21
+ datasets.SplitGenerator(
22
+ name=datasets.Split.TRAIN,
23
+ gen_kwargs={
24
+ "image_paths": image_paths,
25
+ "mask_paths": mask_paths,
26
+ }
27
+ )
28
+ ]
29
+
30
+ def _generate_examples(self, image_paths, mask_paths):
31
+ # Build a lookup for masks
32
+ mask_lookup = {}
33
+ for mask_path in mask_paths:
34
+ sample_id = os.path.basename(os.path.dirname(mask_path))
35
+ mask_lookup[sample_id] = mask_path
36
+
37
+ # Iterate over image folders
38
+ for image_path in image_paths:
39
+ if not image_path.endswith("all_bands.tif"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
  continue
41
 
42
+ parts = image_path.split("/")
43
+ sample_id = parts[-3] # e.g., "0000005"
 
 
44
 
45
+ if sample_id not in mask_lookup:
 
 
 
46
  continue
47
 
48
+ yield self.generate_index(), {
49
+ "sample_id": sample_id,
50
+ "image": image_path,
51
+ "mask": mask_lookup[sample_id],
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  }