GroMo25 / download_script_for_GroMo25_dataset.py
MrigLabIITRopar's picture
Revert last upload
d541e6d
# site the dataset, use below citation
'''
@inproceedings{10.1145/3746027.3762097,
author = {Bansal, Shreya and Bhatt, Ruchi and Chander, Amanpreet and Kaur, Rupinder and Singh, Malya and Kankanhalli, Mohan and El Saddik, Abdulmotaleb and Saini, Mukesh},
title = {GroMo25: ACM Multimedia 2025 Grand Challenge for Plant Growth Modeling with Multiview Images},
year = {2025},
isbn = {9798400720352},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
url = {https://doi.org/10.1145/3746027.3762097},
doi = {10.1145/3746027.3762097},
abstract = {Understanding plant growth dynamics is a critical component of modern agricultural research, with applications in yield prediction, phenotyping, and sustainable crop management. Despite recent advances in computer vision and deep learning, progress in plant growth modeling has been constrained by the lack of publicly available, high-resolution, multiview, and temporally rich datasets. To address this gap, we introduce Growth Modelling GroMo25, the first international challenge on plant growth modeling using multiview imagery. In this challenge, we propose a dataset that comprises high-resolution images of four crops: wheat, mustard, radish, and okra, captured at consistent time intervals from multiple camera viewpoints under controlled environmental conditions. The challenge focuses on two key tasks: (1) plant age prediction and (2) leaf count estimation, both requiring models to use spatial and temporal plant features. GroMo25 attracted participation from multiple teams worldwide, encouraging benchmarking and innovation in vision-based plant phenotyping. The GitHub repository is publicly available at https://github.com/mriglab/GroMo-Plant-Growth-Modeling-with-Multiview-Images.},
booktitle = {Proceedings of the 33rd ACM International Conference on Multimedia},
pages = {14204–14209},
numpages = {6},
keywords = {growth age prediction, leaf count estimation, multiview},
location = {Dublin, Ireland},
series = {MM '25}
'''
# change the TOKEN varibale with your huggingface accesstoken from huggingface to download this dataset
# this dataset is over 360 GB in size.
# mostly images
# This code will allow you to download all the images
# in batches which does not trigger the rate limit
# put by hugging face
# this download script:
# 1. Resumes automatically if your system stops - already downloaded files are skipped on re-run
# 2. 75 files per batch are downloaded at a time to stay under the 1000 requests/5min limit
# 3. it includes a 36 second wait time between batches to stay under rate limits
# 4. Auto-retry on 429 or any other error without crashing
# check this youtube video to know how to get your own access token : https://youtu.be/uBSbgQ1qPHI?si=M1q9HP03DrcxO1KT
import time
from pathlib import Path
from huggingface_hub import HfApi, hf_hub_download
TOKEN = "paste huggingface token here"
REPO_ID = "MrigLabIITRopar/GroMo25"
LOCAL_DIR = Path("./GroMo25_dataset")
FILES_PER_BATCH = 75
WAIT_BETWEEN_BATCHES = 3600 / 100 # wait for 36 seconds after every batch
api = HfApi(token=TOKEN)
LOCAL_DIR.mkdir(parents=True, exist_ok=True)
# get all files in the repo as a list
print("Fetching file names as list from HuggingFace")
all_files = list(api.list_repo_files(repo_id=REPO_ID, repo_type="dataset", token=TOKEN))
print(f"Total files in repository: {len(all_files)}")
# check already downloaded files to allow resuming
already_downloaded = set()
for f in LOCAL_DIR.rglob("*"):
if f.is_file():
already_downloaded.add(f.relative_to(LOCAL_DIR).as_posix())
print(f"Already downloaded: {len(already_downloaded)} files")
# filter out already downloaded files
remaining = [f for f in all_files if f not in already_downloaded]
print(f"Remaining to download: {len(remaining)} files")
# split into batches
batches = [remaining[i:i+FILES_PER_BATCH] for i in range(0, len(remaining), FILES_PER_BATCH)]
print(f"Total batches: {len(batches)}")
for batch_idx, batch in enumerate(batches):
print(f"\nBatch {batch_idx + 1}/{len(batches)} — downloading {len(batch)} files...")
for file_path in batch:
local_path = LOCAL_DIR / file_path
# skip if already exists
if local_path.exists():
continue
# create parent directories if needed
local_path.parent.mkdir(parents=True, exist_ok=True)
while True:
try:
hf_hub_download(
repo_id=REPO_ID,
repo_type="dataset",
filename=file_path,
local_dir=str(LOCAL_DIR),
token=TOKEN,
)
break
except Exception as e:
if "429" in str(e):
print(f"Rate limited. Waiting 60 seconds before retrying...")
time.sleep(60)
else:
print(f"Error downloading {file_path}: {e}. Retrying in 30 seconds...")
time.sleep(30)
print(f"Batch {batch_idx + 1} done. Waiting {WAIT_BETWEEN_BATCHES:.0f}s...")
time.sleep(WAIT_BETWEEN_BATCHES)
print("\nAll files downloaded successfully!")