| |
| """ |
| Upload VR Scene Evaluation Dataset to Hugging Face Hub |
| |
| This script uploads a YOLO format dataset to Hugging Face Hub as a dataset repository. |
| For very large datasets, you can also use the command line approach: |
| |
| 1. Install huggingface-hub: pip install huggingface-hub |
| 2. Login: huggingface-cli login |
| 3. Upload: hf upload-large-folder <username>/<repo-name> /path/to/dataset --repo-type=dataset |
| |
| This Python script provides more control and better error handling. |
| """ |
|
|
| import os |
| import yaml |
| from pathlib import Path |
| from huggingface_hub import HfApi, login, create_repo |
| import shutil |
| import tempfile |
|
|
| |
| DATASET_NAME = "DISCOVR" |
| HF_USERNAME = None |
| DATASET_PATH = "/home/daniel/_datasets/post-2/aggregate" |
| REPO_TYPE = "dataset" |
|
|
| def load_dataset_config(): |
| """Load the dataset configuration from data.yaml""" |
| with open(os.path.join(DATASET_PATH, "data.yaml"), 'r') as f: |
| config = yaml.safe_load(f) |
| return config |
|
|
| def create_dataset_card(config): |
| """Create a README.md file for the dataset""" |
| |
| class_names = config['names'] |
| num_classes = config['nc'] |
| |
| readme_content = f"""--- |
| license: cc-by-4.0 |
| task_categories: |
| - object-detection |
| language: |
| - en |
| tags: |
| - computer-vision |
| - object-detection |
| - yolo |
| - virtual-reality |
| - vr |
| - scene-evaluation |
| size_categories: |
| - 1K<n<10K |
| --- |
| |
| # VR Scene Evaluation Dataset |
| |
| ## Dataset Description |
| |
| This dataset contains {num_classes} object classes for VR scene evaluation, formatted for YOLO object detection models. |
| |
| ### Classes ({num_classes} total): |
| {chr(10).join([f"- {i}: {name}" for i, name in enumerate(class_names)])} |
| |
| ## Dataset Structure |
| |
| ``` |
| βββ train/ |
| β βββ images/ |
| β βββ labels/ |
| βββ valid/ |
| β βββ images/ |
| β βββ labels/ |
| βββ test/ |
| β βββ images/ |
| β βββ labels/ |
| βββ data.yaml |
| ``` |
| |
| ## Usage |
| |
| ### With YOLOv8 |
| |
| ```python |
| from ultralytics import YOLO |
| |
| # Load a model |
| model = YOLO('yolov8n.pt') |
| |
| # Train the model |
| results = model.train(data='path/to/data.yaml', epochs=100, imgsz=640) |
| ``` |
| |
| ### With Hugging Face Datasets |
| |
| ```python |
| from datasets import load_dataset |
| |
| dataset = load_dataset("{HF_USERNAME}/{DATASET_NAME}") |
| ``` |
| |
| ## License |
| |
| This dataset is licensed under CC BY 4.0. |
| |
| ## Citation |
| |
| ``` |
| @dataset{{vr_scene_evaluation, |
| title={{VR Scene Evaluation Dataset}}, |
| year={{2025}}, |
| publisher={{Hugging Face}}, |
| version={{1.0}}, |
| }} |
| ``` |
| |
| ## Original Source |
| |
| This dataset was originally sourced from Roboflow: |
| - Workspace: my-workspace-zhz1m |
| - Project: vr-scene-evaluation-o1hbg |
| - Version: 6 |
| - URL: https://universe.roboflow.com/my-workspace-zhz1m/vr-scene-evaluation-o1hbg/dataset/6 |
| """ |
| return readme_content |
|
|
| def prepare_upload_directory(): |
| """Prepare a clean directory for upload""" |
| upload_dir = tempfile.mkdtemp() |
| |
| |
| files_to_copy = [ |
| "data.yaml", |
| "README.dataset.txt", |
| "README.roboflow.txt" |
| ] |
| |
| for file in files_to_copy: |
| src = os.path.join(DATASET_PATH, file) |
| if os.path.exists(src): |
| shutil.copy2(src, upload_dir) |
| |
| |
| for split in ["train", "valid", "test"]: |
| src_dir = os.path.join(DATASET_PATH, split) |
| if os.path.exists(src_dir): |
| dst_dir = os.path.join(upload_dir, split) |
| shutil.copytree(src_dir, dst_dir) |
| |
| return upload_dir |
|
|
| def main(): |
| global HF_USERNAME |
| |
| print("=== Hugging Face Dataset Upload Script ===") |
| print(f"Dataset path: {DATASET_PATH}") |
| print(f"Dataset name: {DATASET_NAME}") |
| |
| |
| try: |
| config = load_dataset_config() |
| print(f"β Loaded dataset config: {config['nc']} classes") |
| except Exception as e: |
| print(f"β Error loading dataset config: {e}") |
| return |
| |
| |
| print("\n1. Logging into Hugging Face...") |
| print("You need a Hugging Face account and access token.") |
| print("Get your token from: https://huggingface.co/settings/tokens") |
| |
| try: |
| login() |
| api = HfApi() |
| user_info = api.whoami() |
| HF_USERNAME = user_info['name'] |
| print(f"β Logged in as: {HF_USERNAME}") |
| except Exception as e: |
| print(f"β Login failed: {e}") |
| print("Make sure you have a valid token and internet connection.") |
| return |
| |
| |
| repo_id = f"{HF_USERNAME}/{DATASET_NAME}" |
| print(f"\n2. Creating repository: {repo_id}") |
| |
| try: |
| create_repo( |
| repo_id=repo_id, |
| repo_type=REPO_TYPE, |
| private=False, |
| exist_ok=True |
| ) |
| print("β Repository created/verified") |
| except Exception as e: |
| print(f"β Error creating repository: {e}") |
| return |
| |
| |
| print("\n3. Preparing files for upload...") |
| try: |
| upload_dir = prepare_upload_directory() |
| print(f"β Files prepared in: {upload_dir}") |
| |
| |
| readme_content = create_dataset_card(config) |
| with open(os.path.join(upload_dir, "README.md"), 'w') as f: |
| f.write(readme_content) |
| print("β Dataset card created") |
| |
| except Exception as e: |
| print(f"β Error preparing files: {e}") |
| return |
| |
| |
| print(f"\n4. Uploading to {repo_id}...") |
| print("This may take a while depending on dataset size...") |
| print("Using upload_large_folder for better handling of large datasets...") |
| |
| try: |
| |
| api.upload_large_folder( |
| folder_path=upload_dir, |
| repo_id=repo_id, |
| repo_type=REPO_TYPE, |
| num_workers=4, |
| create_pr=False, |
| allow_patterns=["**/*"], |
| ignore_patterns=[".git/**", "**/.DS_Store", "**/__pycache__/**"] |
| ) |
| print("β Upload completed successfully!") |
| print(f"\nπ Your dataset is now available at:") |
| print(f"https://huggingface.co/datasets/{repo_id}") |
| |
| except Exception as e: |
| print(f"β Upload failed: {e}") |
| print("If the upload failed due to size, you can try:") |
| print("1. Reducing the number of workers (num_workers parameter)") |
| print("2. Using the command line: hf upload-large-folder") |
| print("3. Splitting the dataset into smaller chunks") |
| return |
| finally: |
| |
| shutil.rmtree(upload_dir) |
| print("β Temporary files cleaned up") |
|
|
| if __name__ == "__main__": |
| main() |