DISBench / download_images.py
MengjieDeng's picture
Upload folder using huggingface_hub
971265c verified
"""
fetch photos of selected users from yfcc.
arguments:
--dataset-path: path to the DISBench dataset
--max-workers: maximum number of worker threads for downloading
--clear: if set, the downloaded status will be cleared and you can start downloading from scratch.
input:
- DISBench/photo_ids/<uid>.txt
- each line: {photo_id}\t{hash} or {photo_id}\t{hash}\t{status}
output:
- DISBench/images/<uid>/{photo_id}.jpg
- DISBench/photo_ids/<uid>.txt
- each line: {photo_id}\t{hash}\t{status}
description:
- read the photo ids file, and get the photo ids, hashes, and statuses.
- if clear is set, clear the status, save the file.
- for each photo, if the status is:
- "valid": skip this.
- "success": try to load this file, if the file is valid, set the status to "valid". else, set the status to "error".
- "error" or None: try to download this photo again.
- download photos from s3.
- url: https://multimedia-commons.s3-us-west-2.amazonaws.com/data/images/<first 3 chars in hash>/<next 3 chars in hash>/<hash>.jpg. for example, if the hash is 00024a73d1a4c32fb29732d56a2, the url is https://multimedia-commons.s3-us-west-2.amazonaws.com/data/images/000/24a/00024a73d1a4c32fb29732d56a2.jpg.
- save the photo to the photos folder.
- download with multithreading, workers set to 16 in main().
- if download is successful, set the status to "success". else, set the status to "error".
- save the status to the photo ids file for each user.
"""
import argparse
import os
import time
import requests
from pathlib import Path
from tqdm import tqdm
from concurrent.futures import ThreadPoolExecutor, as_completed
from PIL import Image
def construct_s3_url(hash_value):
"""Construct S3 URL from hash value"""
# Get first 3 chars and next 3 chars
first_3 = hash_value[:3]
next_3 = hash_value[3:6]
return f"https://multimedia-commons.s3-us-west-2.amazonaws.com/data/images/{first_3}/{next_3}/{hash_value}.jpg"
def validate_image_file(image_path):
"""Validate if an image file exists and can be loaded"""
if not image_path.exists():
return False
try:
with Image.open(image_path) as img:
img.load()
return True
except Exception:
return False
def download_photo(photo_id, hash_value, output_path, uid):
"""Download a single photo from S3 with retry logic"""
max_retries = 4
last_error = None
for attempt in range(max_retries):
try:
url = construct_s3_url(hash_value)
response = requests.get(url, timeout=30, stream=True)
response.raise_for_status()
# Save the photo
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(output_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=8192):
f.write(chunk)
return photo_id, "success", None
except Exception as e:
last_error = e
# Retry for other errors
if attempt < max_retries - 1:
time.sleep(1) # Brief delay before retry
continue
return photo_id, "error", str(e)
# If we exhausted all retries, return the last error
return photo_id, "error", str(last_error) if last_error else "Unknown error after retries"
def process_photo_ids_file(photo_ids_file, photos_base_dir, uid):
"""Process a single photo_ids file and return tasks and photo records"""
photos_dir = photos_base_dir / uid
photos_dir.mkdir(parents=True, exist_ok=True)
# Read all photo records
photo_records = [] # List of (photo_id, hash, status)
tasks = [] # List of (photo_id, hash, output_path) for photos to download
has_updates = False # Track if any status was updated
with open(photo_ids_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
if not line:
continue
parts = line.split('\t')
if len(parts) < 2:
continue
photo_id = parts[0].strip()
hash_value = parts[1].strip()
status = parts[2].strip() if len(parts) >= 3 else None
if not photo_id or not hash_value:
continue
output_path = photos_dir / f"{photo_id}.jpg"
# Store the record
photo_records.append({
'photo_id': photo_id,
'hash': hash_value,
'status': status,
'output_path': output_path
})
# Process based on status according to description:
# - "valid": skip this
if status == "valid":
continue
# - "success": try to load this file, if valid set to "valid", else set to "error"
if status == "success":
if validate_image_file(output_path):
photo_records[-1]['status'] = "valid"
has_updates = True
else:
photo_records[-1]['status'] = "error"
has_updates = True
# Need to download again
tasks.append((photo_id, hash_value, output_path))
continue
# - "error" or None: try to download this photo again
if status == "error" or status is None:
tasks.append((photo_id, hash_value, output_path))
return tasks, photo_records, has_updates
def save_photo_ids_file(photo_ids_file, photo_records):
"""Save updated photo records back to the photo_ids file"""
with open(photo_ids_file, 'w', encoding='utf-8') as f:
for record in photo_records:
photo_id = record['photo_id']
hash_value = record['hash']
status = record['status']
# Always write status if it exists
if status:
f.write(f"{photo_id}\t{hash_value}\t{status}\n")
else:
f.write(f"{photo_id}\t{hash_value}\n")
def download_photos_for_uid(uid, photo_ids_file, photos_base_dir, max_workers):
"""Download all photos for a single user ID"""
tasks, photo_records, has_updates = process_photo_ids_file(photo_ids_file, photos_base_dir, uid)
# Create a mapping from photo_id to record for easy updates
photo_id_to_record = {record['photo_id']: record for record in photo_records}
# Count initial statistics
valid_count = sum(1 for r in photo_records if r['status'] == "valid")
skipped_count = sum(1 for r in photo_records if r['status'] and r['status'] != "error")
if not tasks:
print(f"{uid}: No photos to download ({skipped_count} already processed, {valid_count} valid)")
# Save the file if statuses were updated
if has_updates:
save_photo_ids_file(photo_ids_file, photo_records)
return
print(f"Processing {uid}: {len(tasks)} photos to download ({skipped_count} skipped, {valid_count} valid)")
success_count = 0
error_count = 0
with ThreadPoolExecutor(max_workers=max_workers) as executor:
futures = {
executor.submit(download_photo, photo_id, hash_value, output_path, uid): (photo_id, hash_value)
for photo_id, hash_value, output_path in tasks
}
for future in tqdm(as_completed(futures), total=len(futures), desc=f"Downloading {uid}"):
photo_id, hash_value = futures[future]
try:
result_photo_id, status, error = future.result()
# Update the record with the download status
if result_photo_id in photo_id_to_record:
photo_id_to_record[result_photo_id]['status'] = status
has_updates = True
if status == "success":
success_count += 1
elif status == "error":
error_count += 1
if error:
print(f"\nError downloading {result_photo_id}: {error}")
# Sleep for 2 seconds every 50 requests
if (success_count + error_count) % 50 == 0:
time.sleep(2)
except Exception as e:
error_count += 1
print(f"\nException for {photo_id}: {e}")
# Save updated photo_ids file with download statuses only if there were updates
if has_updates:
save_photo_ids_file(photo_ids_file, photo_records)
print(f"Completed {uid}: {success_count} downloaded, {error_count} errors, {skipped_count} skipped, {valid_count} valid")
def clear_statuses(photo_ids_file):
"""Clear all statuses from photo_ids file and save"""
photo_records = []
# Read all photo records
with open(photo_ids_file, 'r', encoding='utf-8') as f:
for line in f:
line = line.strip()
parts = line.split('\t')
photo_id = parts[0].strip()
hash_value = parts[1].strip()
# Clear status (set to None)
photo_records.append({
'photo_id': photo_id,
'hash': hash_value,
'status': None
})
# Save with cleared statuses
save_photo_ids_file(photo_ids_file, photo_records)
return
def main():
parser = argparse.ArgumentParser(description="Fetch photos of selected users from YFCC")
parser.add_argument(
"--photo-ids-path",
type=str,
default="photo_ids",
help="Path to the DISBench dataset"
)
parser.add_argument(
"--images-path",
type=str,
default="images",
help="Path to the images directory"
)
parser.add_argument(
"--max-workers",
type=int,
default=16,
help="Maximum number of worker threads for downloading"
)
parser.add_argument(
"--clear",
action="store_true",
help="Clear the downloaded status and exit"
)
args = parser.parse_args()
photo_ids_path = Path(args.photo_ids_path)
images_path = Path(args.images_path)
max_workers = args.max_workers
# Get all photo_ids files
photo_ids_files = sorted([f for f in photo_ids_path.glob("*.txt")])
if not photo_ids_files:
print(f"No photo_ids files found in {photo_ids_path}")
return
# If clear is set, clear statuses and exit
if args.clear:
print(f"Clearing statuses for {len(photo_ids_files)} photo_ids files...")
for photo_ids_file in photo_ids_files:
uid = photo_ids_file.stem
print(f"Clearing statuses for {uid}...")
clear_statuses(photo_ids_file)
print("All statuses cleared!")
return
print(f"Found {len(photo_ids_files)} photo_ids files to process")
# Process each user's photo_ids file
for photo_ids_file in photo_ids_files:
uid = photo_ids_file.stem # Get filename without extension
download_photos_for_uid(uid, photo_ids_file, images_path, max_workers)
print("All downloads completed!")
return
if __name__ == '__main__':
main()