| """ |
| download_swim.py |
| Streams and downloads the full paired dataset (images + label txt files) from a Hugging Face Hub repository. |
| It recursively processes all available chunk subfolders (e.g., '000', '001', ...) under given parent paths. |
| Features: |
| - Recursively discovers subdirs (chunks) using HfFileSystem |
| - Optionally flattens the directory structure by removing the deepest chunk level |
| - Saves each .png image with its corresponding .txt label |
| Use this script if you want to download the complete dataset for model training or offline access. |
| Usage: |
| # Download all chunks (flattened) |
| python utils/download_swim.py --output-dir ./SWiM --flatten |
| # Download specific chunks |
| python utils/download_swim.py --chunks 000 001 002 --flatten False |
| Arguments: |
| --repo-id Hugging Face dataset repository ID |
| --images-parent Parent directory for image chunks (e.g., Baseline/images/train) |
| --labels-parent Parent directory for label chunks (e.g., Baseline/labels/train) |
| --output-dir Where to save the downloaded dataset |
| --flatten Remove final 'chunk' subdir in output paths (default: True) |
| --chunks Specific chunk names (e.g., 000 001); omit to download all |
| """ |
|
|
|
|
|
|
| import argparse |
| from io import BytesIO |
| from pathlib import Path |
| from huggingface_hub import list_repo_tree, hf_hub_url, HfFileSystem |
| from huggingface_hub.hf_api import RepoFile |
| import fsspec |
| from PIL import Image |
| from tqdm import tqdm |
|
|
| def enumerate_chunks(repo_id, images_parent): |
| """ |
| Lists all immediate chunk subdirs under the images parent using HfFileSystem. |
| Returns sorted list of subdir names (e.g. ['000', '001', ...]). |
| """ |
| fs = HfFileSystem() |
| repo_path = f"datasets/{repo_id}/{images_parent}" |
| entries = fs.ls(repo_path, detail=True) |
| subdirs = [entry['name'].split('/')[-1] for entry in entries if entry['type'] == 'directory'] |
| subdirs.sort() |
| return subdirs |
|
|
| def sample_dataset( |
| repo_id: str, |
| images_parent: str, |
| labels_parent: str, |
| output_dir: str, |
| |
| flatten: bool, |
| chunks: list = None |
| ): |
| total_downloaded = 0 |
| all_chunks = chunks |
| if all_chunks is None: |
| all_chunks = enumerate_chunks(repo_id, images_parent) |
| print(f"Found chunks: {all_chunks}") |
| for chunk in all_chunks: |
| image_subdir = f"{images_parent}/{chunk}" |
| label_subdir = f"{labels_parent}/{chunk}" |
|
|
| |
| image_files = list_repo_tree( |
| repo_id=repo_id, |
| path_in_repo=image_subdir, |
| repo_type="dataset", |
| recursive=True, |
| ) |
|
|
| for img_file in tqdm(image_files, desc=f"Downloading {chunk}", leave=False): |
| if not isinstance(img_file, RepoFile) or not img_file.path.lower().endswith(".png"): |
| continue |
|
|
| rel_path = Path(img_file.path).relative_to(image_subdir) |
| label_path = f"{label_subdir}/{rel_path.with_suffix('.txt')}" |
|
|
| if flatten: |
| parts = img_file.path.split('/') |
| |
| |
| flat_path = '/'.join(parts[:-2] + [parts[-1]]) |
| |
| flat_label_path = flat_path.replace('.png', '.txt').replace('images', 'labels') |
| local_image_path = Path(output_dir) / flat_path |
| local_label_path = Path(output_dir) / flat_label_path |
| else: |
| local_image_path = Path(output_dir) / img_file.path |
| local_label_path = Path(output_dir) / label_path |
|
|
| local_image_path.parent.mkdir(parents=True, exist_ok=True) |
| local_label_path.parent.mkdir(parents=True, exist_ok=True) |
|
|
| image_url = hf_hub_url(repo_id=repo_id, filename=img_file.path, repo_type="dataset") |
| label_url = hf_hub_url(repo_id=repo_id, filename=label_path, repo_type="dataset") |
| try: |
| with fsspec.open(image_url) as f: |
| image = Image.open(BytesIO(f.read())) |
| image.save(local_image_path) |
| with fsspec.open(label_url) as f: |
| txt_content = f.read() |
| with open(local_label_path, "wb") as out_f: |
| out_f.write(txt_content) |
| total_downloaded += 1 |
| except Exception as e: |
| print(f"Failed {rel_path}: {e}") |
|
|
|
|
|
|
|
|
| print(f"Downloaded {total_downloaded} image/txt pairs.") |
| print(f"Saved under: {Path(output_dir).resolve()}") |
|
|
| def parse_args(): |
| parser = argparse.ArgumentParser(description="Stream and sample paired images + txt labels from a Hugging Face folder-structured dataset, optionally across multiple chunks.") |
| parser.add_argument("--repo-id", default="RiceD2KLab/SWiM-SpacecraftWithMasks", help="Hugging Face dataset repo ID.") |
| parser.add_argument("--images-parent", default="Baseline/images", help="Parent directory for image chunks.") |
| parser.add_argument("--labels-parent", default="Baseline/labels", help="Parent directory for label chunks.") |
| parser.add_argument("--output-dir", default="./SWiM", help="Where to save sampled data.") |
| |
| parser.add_argument("--flatten", action='store_true', help="Save all samples in a single folder without subdirectories.") |
| parser.add_argument("--chunks", nargs="*", default=None, help="Specific chunk names to sample (e.g. 000 001). Leave empty to process all.") |
| return parser.parse_args() |
|
|
| if __name__ == "__main__": |
| args = parse_args() |
| sample_dataset( |
| repo_id=args.repo_id, |
| images_parent=args.images_parent, |
| labels_parent=args.labels_parent, |
| output_dir=args.output_dir, |
| |
| flatten=args.flatten, |
| chunks=args.chunks |
| ) |
|
|