{ "cells": [ { "cell_type": "code", "execution_count": null, "id": "55b82167", "metadata": { "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19", "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5", "id": "yhVNR6GETKyA", "papermill": { "duration": 0.004512, "end_time": "2026-01-25T04:40:26.589927", "exception": false, "start_time": "2026-01-25T04:40:26.585415", "status": "completed" }, "tags": [] }, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": 1, "id": "1c32a17b", "metadata": { "_kg_hide-output": true, "execution": { "iopub.execute_input": "2026-01-25T04:40:26.598614Z", "iopub.status.busy": "2026-01-25T04:40:26.598122Z", "iopub.status.idle": "2026-01-25T04:40:55.152139Z", "shell.execute_reply": "2026-01-25T04:40:55.151461Z" }, "id": "6C3QGJD8TKyC", "outputId": "b362f97d-fbc1-474f-f2cb-b84b565acdb9", "papermill": { "duration": 28.560329, "end_time": "2026-01-25T04:40:55.154003", "exception": false, "start_time": "2026-01-25T04:40:26.593674", "status": "completed" }, "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Collecting roma\r\n", " Downloading roma-1.5.4-py3-none-any.whl.metadata (5.5 kB)\r\n", "Requirement already satisfied: einops in /usr/local/lib/python3.12/dist-packages (0.8.1)\r\n", "Requirement already satisfied: timm in /usr/local/lib/python3.12/dist-packages (1.0.20)\r\n", "Requirement already satisfied: huggingface_hub in /usr/local/lib/python3.12/dist-packages (0.36.0)\r\n", "Requirement already satisfied: torch in /usr/local/lib/python3.12/dist-packages (from timm) (2.8.0+cu126)\r\n", "Requirement already satisfied: torchvision in /usr/local/lib/python3.12/dist-packages (from timm) (0.23.0+cu126)\r\n", "Requirement already satisfied: pyyaml in /usr/local/lib/python3.12/dist-packages (from timm) (6.0.3)\r\n", "Requirement already satisfied: safetensors in /usr/local/lib/python3.12/dist-packages (from timm) (0.6.2)\r\n", "Requirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (3.20.3)\r\n", "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (2025.10.0)\r\n", "Requirement already satisfied: packaging>=20.9 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (26.0rc2)\r\n", "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (2.32.5)\r\n", "Requirement already satisfied: tqdm>=4.42.1 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (4.67.1)\r\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (4.15.0)\r\n", "Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface_hub) (1.2.1rc0)\r\n", "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (3.4.4)\r\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (3.11)\r\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (2.6.3)\r\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->huggingface_hub) (2026.1.4)\r\n", "Requirement already satisfied: setuptools in /usr/local/lib/python3.12/dist-packages (from torch->timm) (75.2.0)\r\n", "Requirement already satisfied: sympy>=1.13.3 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (1.13.3)\r\n", "Requirement already satisfied: networkx in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.5)\r\n", "Requirement already satisfied: jinja2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.1.6)\r\n", "Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\r\n", "Requirement already satisfied: nvidia-cuda-runtime-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\r\n", "Requirement already satisfied: nvidia-cuda-cupti-cu12==12.6.80 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.80)\r\n", "Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (9.10.2.21)\r\n", "Requirement already satisfied: nvidia-cublas-cu12==12.6.4.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.4.1)\r\n", "Requirement already satisfied: nvidia-cufft-cu12==11.3.0.4 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (11.3.0.4)\r\n", "Requirement already satisfied: nvidia-curand-cu12==10.3.7.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (10.3.7.77)\r\n", "Requirement already satisfied: nvidia-cusolver-cu12==11.7.1.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (11.7.1.2)\r\n", "Requirement already satisfied: nvidia-cusparse-cu12==12.5.4.2 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.5.4.2)\r\n", "Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (0.7.1)\r\n", "Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (2.27.3)\r\n", "Requirement already satisfied: nvidia-nvtx-cu12==12.6.77 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.77)\r\n", "Requirement already satisfied: nvidia-nvjitlink-cu12==12.6.85 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (12.6.85)\r\n", "Requirement already satisfied: nvidia-cufile-cu12==1.11.1.6 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (1.11.1.6)\r\n", "Requirement already satisfied: triton==3.4.0 in /usr/local/lib/python3.12/dist-packages (from torch->timm) (3.4.0)\r\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from torchvision->timm) (2.0.2)\r\n", "Requirement already satisfied: pillow!=8.3.*,>=5.3.0 in /usr/local/lib/python3.12/dist-packages (from torchvision->timm) (11.3.0)\r\n", "Requirement already satisfied: mpmath<1.4,>=1.1.0 in /usr/local/lib/python3.12/dist-packages (from sympy>=1.13.3->torch->timm) (1.3.0)\r\n", "Requirement already satisfied: MarkupSafe>=2.0 in /usr/local/lib/python3.12/dist-packages (from jinja2->torch->timm) (3.0.3)\r\n", "Downloading roma-1.5.4-py3-none-any.whl (25 kB)\r\n", "Installing collected packages: roma\r\n", "Successfully installed roma-1.5.4\r\n", "Requirement already satisfied: opencv-python in /usr/local/lib/python3.12/dist-packages (4.12.0.88)\r\n", "Requirement already satisfied: pillow in /usr/local/lib/python3.12/dist-packages (11.3.0)\r\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.12/dist-packages (4.67.1)\r\n", "Requirement already satisfied: pyaml in /usr/local/lib/python3.12/dist-packages (25.7.0)\r\n", "Requirement already satisfied: cython in /usr/local/lib/python3.12/dist-packages (3.0.12)\r\n", "Collecting plyfile\r\n", " Downloading plyfile-1.1.3-py3-none-any.whl.metadata (43 kB)\r\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m43.3/43.3 kB\u001b[0m \u001b[31m1.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\r\n", "\u001b[?25hRequirement already satisfied: numpy<2.3.0,>=2 in /usr/local/lib/python3.12/dist-packages (from opencv-python) (2.0.2)\r\n", "Requirement already satisfied: PyYAML in /usr/local/lib/python3.12/dist-packages (from pyaml) (6.0.3)\r\n", "Downloading plyfile-1.1.3-py3-none-any.whl (36 kB)\r\n", "Installing collected packages: plyfile\r\n", "Successfully installed plyfile-1.1.3\r\n", "Collecting pycolmap\r\n", " Downloading pycolmap-3.13.0-cp312-cp312-manylinux_2_28_x86_64.whl.metadata (10 kB)\r\n", "Collecting trimesh\r\n", " Downloading trimesh-4.11.1-py3-none-any.whl.metadata (13 kB)\r\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.12/dist-packages (from pycolmap) (2.0.2)\r\n", "Downloading pycolmap-3.13.0-cp312-cp312-manylinux_2_28_x86_64.whl (20.3 MB)\r\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m20.3/20.3 MB\u001b[0m \u001b[31m55.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\r\n", "\u001b[?25hDownloading trimesh-4.11.1-py3-none-any.whl (740 kB)\r\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m740.4/740.4 kB\u001b[0m \u001b[31m43.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\r\n", "\u001b[?25hInstalling collected packages: trimesh, pycolmap\r\n", "Successfully installed pycolmap-3.13.0 trimesh-4.11.1\r\n", "Collecting transformers==4.40.0\r\n", " Downloading transformers-4.40.0-py3-none-any.whl.metadata (137 kB)\r\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m137.6/137.6 kB\u001b[0m \u001b[31m4.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\r\n", "\u001b[?25hRequirement already satisfied: filelock in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (3.20.3)\r\n", "Requirement already satisfied: huggingface-hub<1.0,>=0.19.3 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (0.36.0)\r\n", "Requirement already satisfied: numpy>=1.17 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (2.0.2)\r\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (26.0rc2)\r\n", "Requirement already satisfied: pyyaml>=5.1 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (6.0.3)\r\n", "Requirement already satisfied: regex!=2019.12.17 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (2025.11.3)\r\n", "Requirement already satisfied: requests in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (2.32.5)\r\n", "Collecting tokenizers<0.20,>=0.19 (from transformers==4.40.0)\r\n", " Downloading tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (6.7 kB)\r\n", "Requirement already satisfied: safetensors>=0.4.1 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (0.6.2)\r\n", "Requirement already satisfied: tqdm>=4.27 in /usr/local/lib/python3.12/dist-packages (from transformers==4.40.0) (4.67.1)\r\n", "Requirement already satisfied: fsspec>=2023.5.0 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<1.0,>=0.19.3->transformers==4.40.0) (2025.10.0)\r\n", "Requirement already satisfied: typing-extensions>=3.7.4.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<1.0,>=0.19.3->transformers==4.40.0) (4.15.0)\r\n", "Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in /usr/local/lib/python3.12/dist-packages (from huggingface-hub<1.0,>=0.19.3->transformers==4.40.0) (1.2.1rc0)\r\n", "Requirement already satisfied: charset_normalizer<4,>=2 in /usr/local/lib/python3.12/dist-packages (from requests->transformers==4.40.0) (3.4.4)\r\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.12/dist-packages (from requests->transformers==4.40.0) (3.11)\r\n", "Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.12/dist-packages (from requests->transformers==4.40.0) (2.6.3)\r\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.12/dist-packages (from requests->transformers==4.40.0) (2026.1.4)\r\n", "Downloading transformers-4.40.0-py3-none-any.whl (9.0 MB)\r\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m9.0/9.0 MB\u001b[0m \u001b[31m67.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\r\n", "\u001b[?25hDownloading tokenizers-0.19.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (3.6 MB)\r\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.6/3.6 MB\u001b[0m \u001b[31m99.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\r\n", "\u001b[?25hInstalling collected packages: tokenizers, transformers\r\n", " Attempting uninstall: tokenizers\r\n", " Found existing installation: tokenizers 0.22.1\r\n", " Uninstalling tokenizers-0.22.1:\r\n", " Successfully uninstalled tokenizers-0.22.1\r\n", " Attempting uninstall: transformers\r\n", " Found existing installation: transformers 4.57.1\r\n", " Uninstalling transformers-4.57.1:\r\n", " Successfully uninstalled transformers-4.57.1\r\n", "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\r\n", "sentence-transformers 5.1.1 requires transformers<5.0.0,>=4.41.0, but you have transformers 4.40.0 which is incompatible.\u001b[0m\u001b[31m\r\n", "\u001b[0mSuccessfully installed tokenizers-0.19.1 transformers-4.40.0\r\n", "Requirement already satisfied: scipy in /usr/local/lib/python3.12/dist-packages (1.15.3)\r\n", "Requirement already satisfied: numpy<2.5,>=1.23.5 in /usr/local/lib/python3.12/dist-packages (from scipy) (2.0.2)\r\n" ] } ], "source": [ "\n", "# =====================================================================\n", "# CELL 1: Install Dependencies\n", "# =====================================================================\n", "!pip install roma einops timm huggingface_hub\n", "!pip install opencv-python pillow tqdm pyaml cython plyfile\n", "!pip install pycolmap trimesh\n", "!pip install transformers==4.40.0\n", "!pip install scipy\n", "\n", "#!pip uninstall -y numpy scipy\n", "#!pip install numpy==1.26.4 scipy==1.11.4\n", "\n", "#print()\n", "#print(\"Restart, Run After\")\n", "#break\n" ] }, { "cell_type": "code", "execution_count": 2, "id": "4f1ebf2e", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:40:55.166226Z", "iopub.status.busy": "2026-01-25T04:40:55.165647Z", "iopub.status.idle": "2026-01-25T04:41:01.826654Z", "shell.execute_reply": "2026-01-25T04:41:01.825659Z" }, "id": "OWJEB1oQTKyD", "outputId": "fa123527-2b15-4fa5-8d3c-c830ccc43365", "papermill": { "duration": 6.668774, "end_time": "2026-01-25T04:41:01.828379", "exception": false, "start_time": "2026-01-25T04:40:55.159605", "status": "completed" }, "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Cloning MASt3R repository...\n", "Cloning into '/kaggle/working/mast3r'...\r\n", "remote: Enumerating objects: 269, done.\u001b[K\r\n", "remote: Counting objects: 100% (170/170), done.\u001b[K\r\n", "remote: Compressing objects: 100% (61/61), done.\u001b[K\r\n", "remote: Total 269 (delta 115), reused 109 (delta 109), pack-reused 99 (from 1)\u001b[K\r\n", "Receiving objects: 100% (269/269), 3.59 MiB | 20.32 MiB/s, done.\r\n", "Resolving deltas: 100% (151/151), done.\r\n", "Submodule 'dust3r' (https://github.com/naver/dust3r) registered for path 'dust3r'\r\n", "Cloning into '/kaggle/working/mast3r/dust3r'...\r\n", "remote: Enumerating objects: 611, done. \r\n", "remote: Total 611 (delta 0), reused 0 (delta 0), pack-reused 611 (from 1) \r\n", "Receiving objects: 100% (611/611), 756.60 KiB | 7.27 MiB/s, done.\r\n", "Resolving deltas: 100% (355/355), done.\r\n", "Submodule path 'dust3r': checked out '3cc8c88c413bb9e34c41db0e0eef99c2ee010b12'\r\n", "Submodule 'croco' (https://github.com/naver/croco) registered for path 'dust3r/croco'\r\n", "Cloning into '/kaggle/working/mast3r/dust3r/croco'...\r\n", "remote: Enumerating objects: 198, done. \r\n", "remote: Counting objects: 100% (87/87), done. \r\n", "remote: Compressing objects: 100% (54/54), done. \r\n", "remote: Total 198 (delta 54), reused 33 (delta 33), pack-reused 111 (from 1) \r\n", "Receiving objects: 100% (198/198), 403.93 KiB | 4.87 MiB/s, done.\r\n", "Resolving deltas: 100% (94/94), done.\r\n", "Submodule path 'dust3r/croco': checked out 'd7de0705845239092414480bd829228723bf20de'\r\n", "✓ MASt3R cloned\n", "✓ DUSt3R already exists\n", "Warning, cannot find cuda-compiled version of RoPE2D, using a slow pytorch version instead\n", "✓ dust3r.model imported successfully\n", "Cloning CroCo repository...\n", "Cloning into '/kaggle/working/mast3r/croco'...\r\n", "remote: Enumerating objects: 198, done.\u001b[K\r\n", "remote: Counting objects: 100% (87/87), done.\u001b[K\r\n", "remote: Compressing objects: 100% (54/54), done.\u001b[K\r\n", "remote: Total 198 (delta 54), reused 33 (delta 33), pack-reused 111 (from 1)\u001b[K\r\n", "Receiving objects: 100% (198/198), 403.93 KiB | 5.11 MiB/s, done.\r\n", "Resolving deltas: 100% (94/94), done.\r\n", "✓ CroCo cloned\n" ] } ], "source": [ "# =====================================================================\n", "# CELL 3: Clone Repositories\n", "# =====================================================================\n", "import os\n", "import sys\n", "\n", "# Clone MASt3R\n", "if not os.path.exists('/kaggle/working/mast3r'):\n", " print(\"Cloning MASt3R repository...\")\n", " !git clone --recursive https://github.com/naver/mast3r.git /kaggle/working/mast3r\n", " print(\"✓ MASt3R cloned\")\n", "else:\n", " print(\"✓ MASt3R already exists\")\n", "\n", "# Clone DUSt3R (Required inside MASt3R)\n", "if not os.path.exists('/kaggle/working/mast3r/dust3r'):\n", " print(\"Cloning DUSt3R repository...\")\n", " !git clone --recursive https://github.com/naver/dust3r.git /kaggle/working/mast3r/dust3r\n", " print(\"✓ DUSt3R cloned\")\n", "else:\n", " print(\"✓ DUSt3R already exists\")\n", "\n", "# Add directories to system path\n", "sys.path.insert(0, '/kaggle/working/mast3r')\n", "sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n", "\n", "# Verify imports\n", "try:\n", " from dust3r.model import AsymmetricCroCo3DStereo\n", " print(\"✓ dust3r.model imported successfully\")\n", "except ImportError as e:\n", " print(f\"✗ Import error: {e}\")\n", "\n", "# Clone CroCo (Dependency for MASt3R)\n", "if not os.path.exists('/kaggle/working/mast3r/croco'):\n", " print(\"Cloning CroCo repository...\")\n", " !git clone --recursive https://github.com/naver/croco.git /kaggle/working/mast3r/croco\n", " print(\"✓ CroCo cloned\")" ] }, { "cell_type": "code", "execution_count": 3, "id": "d9c6184e", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:01.844809Z", "iopub.status.busy": "2026-01-25T04:41:01.843843Z", "iopub.status.idle": "2026-01-25T04:41:19.355800Z", "shell.execute_reply": "2026-01-25T04:41:19.355094Z" }, "id": "OWJEB1oQTKyD", "outputId": "fa123527-2b15-4fa5-8d3c-c830ccc43365", "papermill": { "duration": 17.521588, "end_time": "2026-01-25T04:41:19.357479", "exception": false, "start_time": "2026-01-25T04:41:01.835891", "status": "completed" }, "tags": [] }, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2026-01-25 04:41:06.481629: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", "E0000 00:00:1769316066.702759 23 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "E0000 00:00:1769316066.768493 23 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "W0000 00:00:1769316067.325904 23 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1769316067.325949 23 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1769316067.325955 23 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1769316067.325961 23 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n" ] } ], "source": [ "# =====================================================================\n", "# CELL 5: Import Core Libraries and Configure Memory\n", "# =====================================================================\n", "import os\n", "import sys\n", "import gc\n", "import torch\n", "import numpy as np\n", "from pathlib import Path\n", "from tqdm import tqdm\n", "import torch.nn.functional as F\n", "import shutil\n", "from PIL import Image\n", "from transformers import AutoImageProcessor, AutoModel\n", "\n", "# MEMORY MANAGEMENT\n", "os.environ['PYTORCH_CUDA_ALLOC_CONF'] = 'expandable_segments:True'\n", "\n", "def clear_memory():\n", " \"\"\"Function to clear memory and release GPU cache.\"\"\"\n", " gc.collect()\n", " if torch.cuda.is_available():\n", " torch.cuda.empty_cache()\n", " torch.cuda.synchronize()\n", "\n", "def get_memory_info():\n", " \"\"\"Retrieve current GPU and CPU memory usage statistics.\"\"\"\n", " if torch.cuda.is_available():\n", " allocated = torch.cuda.memory_allocated() / 1024**3\n", " reserved = torch.cuda.memory_reserved() / 1024**3\n", " print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n", "\n", " import psutil\n", " cpu_mem = psutil.virtual_memory().percent\n", " print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")\n", "\n", "\n", "# CONFIGURATION\n", "class Config:\n", " DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " \n", " # Model Weights\n", " MAST3R_WEIGHTS = \"naver/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric\"\n", " DUSt3R_WEIGHTS = \"naver/DUSt3R_ViTLarge_BaseDecoder_512_dpt\"\n", "\n", " # DINO Settings\n", " DINO_MODEL = \"facebook/dinov2-base\"\n", " GLOBAL_TOPK = 20 # Top K pairs to match for each image\n", " IMAGE_SIZE = 224" ] }, { "cell_type": "code", "execution_count": 4, "id": "a844ccb0", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.372906Z", "iopub.status.busy": "2026-01-25T04:41:19.372431Z", "iopub.status.idle": "2026-01-25T04:41:19.387295Z", "shell.execute_reply": "2026-01-25T04:41:19.386825Z" }, "id": "OWJEB1oQTKyD", "outputId": "fa123527-2b15-4fa5-8d3c-c830ccc43365", "papermill": { "duration": 0.023976, "end_time": "2026-01-25T04:41:19.388616", "exception": false, "start_time": "2026-01-25T04:41:19.364640", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 6: Image Preprocessing Functions (Biplet)\n", "# =====================================================================\n", "def normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n", " \"\"\"\n", " Generates two square crops (Left & Right or Top & Bottom)\n", " from each image in a directory.\n", " \"\"\"\n", " if output_dir is None:\n", " output_dir = input_dir + \"_biplet\"\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " print(f\"\\n=== Generating Biplet Crops ({size}x{size}) ===\")\n", "\n", " converted_count = 0\n", " size_stats = {}\n", "\n", " for img_file in tqdm(sorted(os.listdir(input_dir)), desc=\"Creating biplets\"):\n", " if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n", " continue\n", "\n", " input_path = os.path.join(input_dir, img_file)\n", "\n", " try:\n", " img = Image.open(input_path)\n", " original_size = img.size\n", "\n", " size_key = f\"{original_size[0]}x{original_size[1]}\"\n", " size_stats[size_key] = size_stats.get(size_key, 0) + 1\n", "\n", " # Generate 2 crops\n", " crops = generate_two_crops(img, size)\n", "\n", " base_name, ext = os.path.splitext(img_file)\n", " for mode, cropped_img in crops.items():\n", " output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n", " cropped_img.save(output_path, quality=95)\n", "\n", " converted_count += 1\n", "\n", " except Exception as e:\n", " print(f\" ✗ Error processing {img_file}: {e}\")\n", "\n", " print(f\"\\n✓ Biplet generation complete:\")\n", " print(f\" Source images: {converted_count}\")\n", " print(f\" Biplet crops generated: {converted_count * 2}\")\n", " print(f\" Original size distribution: {size_stats}\")\n", "\n", " return output_dir\n", "\n", "\n", "def generate_two_crops(img, size):\n", " \"\"\"\n", " Crops the image into a square and returns 2 variations\n", " \"\"\"\n", " width, height = img.size\n", " crop_size = min(width, height)\n", " crops = {}\n", "\n", " if width > height:\n", " # Landscape → Left & Right\n", " positions = {\n", " 'left': 0,\n", " 'right': width - crop_size\n", " }\n", " for mode, x_offset in positions.items():\n", " box = (x_offset, 0, x_offset + crop_size, crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", " else:\n", " # Portrait or Square → Top & Bottom\n", " positions = {\n", " 'top': 0,\n", " 'bottom': height - crop_size\n", " }\n", " for mode, y_offset in positions.items():\n", " box = (0, y_offset, crop_size, y_offset + crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", "\n", " return crops\n", "\n", "# =====================================================================\n", "# CELL 7: Image Loading Function\n", "# =====================================================================\n", "def load_images_from_directory(image_dir, max_images=200):\n", " print(f\"\\nLoading images from: {image_dir}\")\n", "\n", " valid_extensions = {'.jpg', '.jpeg', '.png', '.bmp'}\n", " image_paths = []\n", "\n", " for ext in valid_extensions:\n", " image_paths.extend(sorted(Path(image_dir).glob(f'*{ext}')))\n", " image_paths.extend(sorted(Path(image_dir).glob(f'*{ext.upper()}')))\n", "\n", " image_paths = sorted(set(str(p) for p in image_paths))\n", "\n", " if len(image_paths) > max_images:\n", " print(f\"⚠️ Limiting from {len(image_paths)} to {max_images} images\")\n", " image_paths = image_paths[:max_images]\n", "\n", " print(f\"✓ Found {len(image_paths)} images\")\n", " return image_paths\n", "\n", "# =====================================================================\n", "# CELL 8: MASt3R Model Loading\n", "# =====================================================================\n", "def load_mast3r_model(device):\n", " print(\"\\n=== Loading MASt3R Model ===\")\n", " if '/kaggle/working/mast3r' not in sys.path:\n", " sys.path.insert(0, '/kaggle/working/mast3r')\n", " if '/kaggle/working/mast3r/dust3r' not in sys.path:\n", " sys.path.insert(0, '/kaggle/working/mast3r/dust3r')\n", " \n", " try:\n", " print(f\"Attempting to load: {Config.MAST3R_WEIGHTS}\")\n", " # Use the correct MASt3R model class\n", " from mast3r.model import AsymmetricMASt3R\n", " model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_WEIGHTS).to(device)\n", " print(\"✓ Loaded MASt3R model\")\n", " except Exception as e:\n", " print(f\"⚠️ Failed to load MASt3R: {e}\")\n", " print(f\"Trying DUSt3R instead: {Config.DUSt3R_WEIGHTS}\")\n", " try:\n", " # Use DUSt3R model class for fallback\n", " from dust3r.model import AsymmetricCroCo3DStereo\n", " model = AsymmetricCroCo3DStereo.from_pretrained(Config.DUSt3R_WEIGHTS).to(device)\n", " print(\"✓ Loaded DUSt3R model as fallback\")\n", " except Exception as e2:\n", " print(f\"⚠️ Failed to load DUSt3R: {e2}\")\n", " raise RuntimeError(\"Could not load either MASt3R or DUSt3R models\") from e2\n", " \n", " model.eval()\n", " print(f\"✓ Model loaded on {device}\")\n", " return model" ] }, { "cell_type": "code", "execution_count": 5, "id": "385ab53a", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.402753Z", "iopub.status.busy": "2026-01-25T04:41:19.402527Z", "iopub.status.idle": "2026-01-25T04:41:19.415904Z", "shell.execute_reply": "2026-01-25T04:41:19.415336Z" }, "id": "OWJEB1oQTKyD", "outputId": "fa123527-2b15-4fa5-8d3c-c830ccc43365", "papermill": { "duration": 0.022184, "end_time": "2026-01-25T04:41:19.417355", "exception": false, "start_time": "2026-01-25T04:41:19.395171", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 9: DINO Pair Selection (REPLACES ASMK)\n", "# =====================================================================\n", "def load_torch_image(fname, device):\n", " \"\"\"Load image as torch tensor\"\"\"\n", " import torchvision.transforms as T\n", "\n", " img = Image.open(fname).convert('RGB')\n", " transform = T.Compose([\n", " T.ToTensor(),\n", " ])\n", " return transform(img).unsqueeze(0).to(device)\n", "\n", "def extract_dino_global(image_paths, model_path, device):\n", " \"\"\"Extract DINO global descriptors with memory management\"\"\"\n", " print(\"\\n=== Extracting DINO Global Features ===\")\n", " print(\"Initial memory state:\")\n", " get_memory_info()\n", "\n", " processor = AutoImageProcessor.from_pretrained(model_path)\n", " model = AutoModel.from_pretrained(model_path).eval().to(device)\n", "\n", " global_descs = []\n", " batch_size = 4 # Small batch to save memory\n", "\n", " for i in tqdm(range(0, len(image_paths), batch_size), desc=\"DINO extraction\"):\n", " batch_paths = image_paths[i:i+batch_size]\n", " batch_imgs = []\n", "\n", " for img_path in batch_paths:\n", " img = load_torch_image(img_path, device)\n", " batch_imgs.append(img)\n", "\n", " batch_tensor = torch.cat(batch_imgs, dim=0)\n", "\n", " with torch.no_grad():\n", " inputs = processor(images=batch_tensor, return_tensors=\"pt\", do_rescale=False).to(device)\n", " outputs = model(**inputs)\n", " desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n", " global_descs.append(desc.cpu())\n", "\n", " # Clear batch memory\n", " del batch_tensor, inputs, outputs, desc\n", " clear_memory()\n", "\n", " global_descs = torch.cat(global_descs, dim=0)\n", "\n", " del model, processor\n", " clear_memory()\n", "\n", " print(\"After DINO extraction:\")\n", " get_memory_info()\n", "\n", " return global_descs\n", "\n", "def build_topk_pairs(global_feats, k, device):\n", " \"\"\"Build top-k similar pairs from global features\"\"\"\n", " g = global_feats.to(device)\n", " sim = g @ g.T\n", " sim.fill_diagonal_(-1)\n", "\n", " N = sim.size(0)\n", " k = min(k, N - 1)\n", "\n", " topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n", "\n", " pairs = []\n", " for i in range(N):\n", " for j in topk_indices[i]:\n", " j = j.item()\n", " if i < j:\n", " pairs.append((i, j))\n", "\n", " # Remove duplicates\n", " pairs = list(set(pairs))\n", "\n", " return pairs\n", "\n", "def select_diverse_pairs(pairs, max_pairs, num_images):\n", " \"\"\"\n", " Select diverse pairs to ensure good image coverage\n", " \"\"\"\n", " import random\n", " random.seed(42)\n", "\n", " if len(pairs) <= max_pairs:\n", " return pairs\n", "\n", " print(f\"Selecting {max_pairs} diverse pairs from {len(pairs)} candidates...\")\n", "\n", " # Count how many times each image appears in pairs\n", " image_counts = {i: 0 for i in range(num_images)}\n", " for i, j in pairs:\n", " image_counts[i] += 1\n", " image_counts[j] += 1\n", "\n", " # Sort pairs by: prefer pairs with less-connected images\n", " def pair_score(pair):\n", " i, j = pair\n", " return image_counts[i] + image_counts[j]\n", "\n", " pairs_scored = [(pair, pair_score(pair)) for pair in pairs]\n", " pairs_scored.sort(key=lambda x: x[1])\n", "\n", " # Select pairs greedily to maximize coverage\n", " selected = []\n", " selected_images = set()\n", "\n", " # Phase 1: Select pairs that add new images\n", " for pair, score in pairs_scored:\n", " if len(selected) >= max_pairs:\n", " break\n", " i, j = pair\n", " if i not in selected_images or j not in selected_images:\n", " selected.append(pair)\n", " selected_images.add(i)\n", " selected_images.add(j)\n", "\n", " # Phase 2: Fill remaining slots\n", " if len(selected) < max_pairs:\n", " remaining = [p for p, s in pairs_scored if p not in selected]\n", " random.shuffle(remaining)\n", " selected.extend(remaining[:max_pairs - len(selected)])\n", "\n", " print(f\"Selected pairs cover {len(selected_images)} / {num_images} images ({100*len(selected_images)/num_images:.1f}%)\")\n", "\n", " return selected\n", "\n", "def get_image_pairs_dino(image_paths, max_pairs=None):\n", " \"\"\"DINO-based pair selection\"\"\"\n", " device = Config.DEVICE\n", "\n", " # DINO global features\n", " global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n", " pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n", "\n", " print(f\"Initial pairs from DINO: {len(pairs)}\")\n", "\n", " # Apply intelligent pair selection if limit specified\n", " if max_pairs and len(pairs) > max_pairs:\n", " pairs = select_diverse_pairs(pairs, max_pairs, len(image_paths))\n", "\n", " return pairs" ] }, { "cell_type": "code", "execution_count": 6, "id": "302a1b16", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.431558Z", "iopub.status.busy": "2026-01-25T04:41:19.431348Z", "iopub.status.idle": "2026-01-25T04:41:19.438723Z", "shell.execute_reply": "2026-01-25T04:41:19.438047Z" }, "id": "OWJEB1oQTKyD", "outputId": "fa123527-2b15-4fa5-8d3c-c830ccc43365", "papermill": { "duration": 0.016044, "end_time": "2026-01-25T04:41:19.440034", "exception": false, "start_time": "2026-01-25T04:41:19.423990", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 10: MASt3R Reconstruction\n", "# =====================================================================\n", "def run_mast3r_pairs(model, image_paths, pairs, device, batch_size=1, max_pairs=None):\n", " \"\"\"Run MASt3R on selected pairs with memory management\"\"\"\n", " print(\"\\n=== Running MASt3R Reconstruction ===\")\n", " print(\"Initial memory state:\")\n", " get_memory_info()\n", "\n", " from dust3r.inference import inference\n", " from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n", " from dust3r.utils.image import load_images\n", "\n", " # Limit number of pairs if specified\n", " if max_pairs and len(pairs) > max_pairs:\n", " print(f\"Limiting pairs from {len(pairs)} to {max_pairs}\")\n", " step = max(1, len(pairs) // max_pairs)\n", " pairs = pairs[::step][:max_pairs]\n", "\n", " print(f\"Processing {len(pairs)} pairs...\")\n", "\n", " # Load images in smaller size\n", " print(f\"Loading {len(image_paths)} images at {Config.IMAGE_SIZE}x{Config.IMAGE_SIZE}...\")\n", " images = load_images(image_paths, size=Config.IMAGE_SIZE)\n", "\n", " print(f\"Loaded {len(images)} images\")\n", " print(\"After loading images:\")\n", " get_memory_info()\n", "\n", " # Create all image pairs\n", " print(f\"Creating {len(pairs)} image pairs...\")\n", " mast3r_pairs = []\n", " for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n", " mast3r_pairs.append((images[idx1], images[idx2]))\n", "\n", " print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n", "\n", " # Run inference\n", " output = inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n", "\n", " del mast3r_pairs\n", " clear_memory()\n", "\n", " print(\"✓ MASt3R inference complete\")\n", " print(\"After inference:\")\n", " get_memory_info()\n", "\n", " # Global alignment\n", " print(\"Running global alignment...\")\n", " scene = global_aligner(\n", " output,\n", " device=device,\n", " mode=GlobalAlignerMode.PointCloudOptimizer\n", " )\n", "\n", " del output\n", " clear_memory()\n", "\n", " print(\"Computing global alignment...\")\n", " loss = scene.compute_global_alignment(\n", " init=\"mst\",\n", " niter=50, # Reduced iterations\n", " schedule='cosine',\n", " lr=0.01\n", " )\n", "\n", " print(f\"✓ Global alignment complete (final loss: {loss:.6f})\")\n", " print(\"Final memory state:\")\n", " get_memory_info()\n", "\n", " return scene, images" ] }, { "cell_type": "code", "execution_count": 7, "id": "c36dcfaa", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.454500Z", "iopub.status.busy": "2026-01-25T04:41:19.454273Z", "iopub.status.idle": "2026-01-25T04:41:19.471298Z", "shell.execute_reply": "2026-01-25T04:41:19.470593Z" }, "papermill": { "duration": 0.025791, "end_time": "2026-01-25T04:41:19.472661", "exception": false, "start_time": "2026-01-25T04:41:19.446870", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 11: Camera Parameter Extraction (Revised)\n", "# =====================================================================\n", "def extract_camera_params_process2(scene, image_paths, conf_threshold=1.5):\n", " \"\"\"\n", " Extracts camera parameters and 3D points from the scene (with scaling correction).\n", " \"\"\"\n", " print(\"\\n=== Extracting Camera Parameters ===\")\n", "\n", " cameras_dict = {}\n", " all_pts3d = []\n", " all_confidence = []\n", "\n", " try:\n", " # Attempt to get camera poses\n", " if hasattr(scene, 'get_im_poses'):\n", " poses = scene.get_im_poses()\n", " elif hasattr(scene, 'im_poses'):\n", " poses = scene.im_poses\n", " else:\n", " poses = None\n", "\n", " # Attempt to get focal lengths\n", " if hasattr(scene, 'get_focals'):\n", " focals = scene.get_focals()\n", " elif hasattr(scene, 'im_focals'):\n", " focals = scene.im_focals\n", " else:\n", " focals = None\n", "\n", " # Attempt to get principal points\n", " if hasattr(scene, 'get_principal_points'):\n", " pps = scene.get_principal_points()\n", " elif hasattr(scene, 'im_pp'):\n", " pps = scene.im_pp\n", " else:\n", " pps = None\n", " except Exception as e:\n", " print(f\"⚠️ Error getting camera parameters: {e}\")\n", " poses = None\n", " focals = None\n", " pps = None\n", "\n", " # [Important] MASt3R internal processing size\n", " mast3r_size = 224.0\n", "\n", " n_images = min(len(poses) if poses is not None else len(image_paths), len(image_paths))\n", "\n", " for idx in range(n_images):\n", " img_name = os.path.basename(image_paths[idx])\n", "\n", " try:\n", " # Get original image dimensions\n", " img = Image.open(image_paths[idx])\n", " W, H = img.size\n", " img.close()\n", "\n", " # Calculate scaling ratio\n", " scale = W / mast3r_size\n", "\n", " # Get Pose (Convert camera-to-world to world-to-camera)\n", " if poses is not None and idx < len(poses):\n", " pose_c2w = poses[idx]\n", " if isinstance(pose_c2w, torch.Tensor):\n", " pose_c2w = pose_c2w.detach().cpu().numpy()\n", " if not isinstance(pose_c2w, np.ndarray) or pose_c2w.shape != (4, 4):\n", " pose_c2w = np.eye(4)\n", "\n", " # Invert to get world-to-camera pose\n", " pose = np.linalg.inv(pose_c2w)\n", " else:\n", " pose = np.eye(4)\n", "\n", " # Get and scale focal length\n", " if focals is not None and idx < len(focals):\n", " focal_mast3r = focals[idx]\n", " if isinstance(focal_mast3r, torch.Tensor):\n", " focal_mast3r = focal_mast3r.detach().cpu().item()\n", " else:\n", " focal_mast3r = float(focal_mast3r)\n", "\n", " # 🔧 Apply scaling\n", " if focals.shape[1] == 1:\n", " # Isotropic camera (fx = fy)\n", " focal = focal_mast3r * scale\n", " else:\n", " # Anisotropic camera\n", " focal = float(focals[idx, 0]) * scale\n", " else:\n", " focal = 1000.0\n", "\n", " # Get and scale principal point\n", " if pps is not None and idx < len(pps):\n", " pp_mast3r = pps[idx]\n", " if isinstance(pp_mast3r, torch.Tensor):\n", " pp_mast3r = pp_mast3r.detach().cpu().numpy()\n", "\n", " # 🔧 Apply scaling\n", " pp = pp_mast3r * scale\n", " else:\n", " pp = np.array([W / 2.0, H / 2.0])\n", "\n", " # Store camera parameters\n", " cameras_dict[img_name] = {\n", " 'focal': focal,\n", " 'pp': pp,\n", " 'pose': pose,\n", " 'rotation': pose[:3, :3],\n", " 'translation': pose[:3, 3],\n", " 'width': W,\n", " 'height': H\n", " }\n", "\n", " # Debugging info (First image only)\n", " if idx == 0:\n", " print(f\"\\nExample camera 0:\")\n", " print(f\" Original size: {W}x{H}\")\n", " print(f\" MASt3R size: {mast3r_size}\")\n", " print(f\" Scale factor: {scale:.3f}\")\n", " print(f\" MASt3R focal: {focal_mast3r:.2f}\")\n", " print(f\" Scaled focal: {focal:.2f}\")\n", " print(f\" MASt3R pp: [{pp_mast3r[0]:.2f}, {pp_mast3r[1]:.2f}]\")\n", " print(f\" Scaled pp: [{pp[0]:.2f}, {pp[1]:.2f}]\")\n", "\n", " # Extract 3D points\n", " if hasattr(scene, 'im_pts3d') and idx < len(scene.im_pts3d):\n", " pts3d_img = scene.im_pts3d[idx]\n", " elif hasattr(scene, 'get_pts3d'):\n", " pts3d_all = scene.get_pts3d()\n", " pts3d_img = pts3d_all[idx] if idx < len(pts3d_all) else None\n", " else:\n", " pts3d_img = None\n", "\n", " # Extract confidence scores\n", " if hasattr(scene, 'im_conf') and idx < len(scene.im_conf):\n", " conf_img = scene.im_conf[idx]\n", " elif hasattr(scene, 'get_conf'):\n", " conf_all = scene.get_conf()\n", " conf_img = conf_all[idx] if idx < len(conf_all) else None\n", " else:\n", " conf_img = None\n", "\n", " # Process 3D points and confidence\n", " if pts3d_img is not None:\n", " if isinstance(pts3d_img, torch.Tensor):\n", " pts3d_img = pts3d_img.detach().cpu().numpy()\n", "\n", " pts3d_flat = pts3d_img.reshape(-1, 3) if pts3d_img.ndim == 3 else pts3d_img\n", " all_pts3d.append(pts3d_flat)\n", "\n", " if conf_img is not None:\n", " if isinstance(conf_img, (list, torch.Tensor)):\n", " conf_img = np.array(conf_img) if isinstance(conf_img, list) else conf_img.detach().cpu().numpy()\n", "\n", " conf_flat = conf_img.reshape(-1) if conf_img.ndim > 1 else conf_img\n", " \n", " if len(conf_flat) != len(pts3d_flat):\n", " conf_flat = np.ones(len(pts3d_flat))\n", " \n", " all_confidence.append(conf_flat)\n", " else:\n", " all_confidence.append(np.ones(len(pts3d_flat)))\n", "\n", " except Exception as e:\n", " print(f\"⚠️ Error processing image {idx} ({img_name}): {e}\")\n", " # Fallback to default values with scaling applied\n", " img = Image.open(image_paths[idx])\n", " W, H = img.size\n", " img.close()\n", "\n", " cameras_dict[img_name] = {\n", " 'focal': 1000.0 * (W / mast3r_size),\n", " 'pp': np.array([W / 2.0, H / 2.0]),\n", " 'pose': np.eye(4),\n", " 'rotation': np.eye(3),\n", " 'translation': np.zeros(3),\n", " 'width': W,\n", " 'height': H\n", " }\n", " continue\n", "\n", " # Consolidate all 3D points\n", " if all_pts3d:\n", " pts3d = np.vstack(all_pts3d)\n", " confidence = np.concatenate(all_confidence)\n", " else:\n", " pts3d = np.zeros((0, 3))\n", " confidence = np.zeros(0)\n", "\n", " print(f\"✓ Extracted parameters for {len(cameras_dict)} cameras\")\n", " print(f\"✓ Total 3D points: {len(pts3d)}\")\n", "\n", " # Filter points by confidence\n", " if len(confidence) > 0:\n", " valid_mask = confidence > conf_threshold\n", " pts3d = pts3d[valid_mask]\n", " confidence = confidence[valid_mask]\n", " print(f\"✓ Points after confidence filtering (>{conf_threshold}): {len(pts3d)}\")\n", "\n", " return cameras_dict, pts3d, confidence" ] }, { "cell_type": "code", "execution_count": 8, "id": "2ccf338a", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.487127Z", "iopub.status.busy": "2026-01-25T04:41:19.486891Z", "iopub.status.idle": "2026-01-25T04:41:19.504352Z", "shell.execute_reply": "2026-01-25T04:41:19.503798Z" }, "papermill": { "duration": 0.026279, "end_time": "2026-01-25T04:41:19.505586", "exception": false, "start_time": "2026-01-25T04:41:19.479307", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 12: COLMAP Export Functions (PINHOLE Model)\n", "# =====================================================================\n", "\n", "import struct\n", "import numpy as np\n", "from pathlib import Path\n", "\n", "def rotmat_to_qvec(R):\n", " \"\"\"Convert rotation matrix to quaternion.\"\"\"\n", " R = np.asarray(R, dtype=np.float64)\n", " trace = np.trace(R)\n", "\n", " if trace > 0:\n", " s = 0.5 / np.sqrt(trace + 1.0)\n", " w = 0.25 / s\n", " x = (R[2, 1] - R[1, 2]) * s\n", " y = (R[0, 2] - R[2, 0]) * s\n", " z = (R[1, 0] - R[0, 1]) * s\n", " elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n", " w = (R[2, 1] - R[1, 2]) / s\n", " x = 0.25 * s\n", " y = (R[0, 1] + R[1, 0]) / s\n", " z = (R[0, 2] + R[2, 0]) / s\n", " elif R[1, 1] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n", " w = (R[0, 2] - R[2, 0]) / s\n", " x = (R[0, 1] + R[1, 0]) / s\n", " y = 0.25 * s\n", " z = (R[1, 2] + R[2, 1]) / s\n", " else:\n", " s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n", " w = (R[1, 0] - R[0, 1]) / s\n", " x = (R[0, 2] + R[2, 0]) / s\n", " y = (R[1, 2] + R[2, 1]) / s\n", " z = 0.25 * s\n", "\n", " qvec = np.array([w, x, y, z], dtype=np.float64)\n", " qvec = qvec / np.linalg.norm(qvec)\n", "\n", " return qvec\n", "\n", "\n", "def write_cameras_binary(cameras_dict, image_size, output_file):\n", " \"\"\"\n", " Export cameras.bin using the PINHOLE model.\n", " \"\"\"\n", " width, height = image_size\n", " num_cameras = len(cameras_dict)\n", "\n", " # COLMAP camera models\n", " PINHOLE = 1 # 🔧 Changed from SIMPLE_PINHOLE (0) to PINHOLE (1)\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', num_cameras))\n", "\n", " for camera_id, (img_id, cam_params) in enumerate(cameras_dict.items(), start=1):\n", " focal = cam_params['focal']\n", "\n", " # For PINHOLE: fx, fy, cx, cy\n", " fx = fy = focal # Assuming isotropic camera\n", "\n", " # Get principal point (default to center if not present)\n", " if 'pp' in cam_params:\n", " pp = cam_params['pp']\n", " cx = float(pp[0])\n", " cy = float(pp[1])\n", " else:\n", " cx = width / 2.0\n", " cy = height / 2.0\n", "\n", " # camera_id\n", " f.write(struct.pack('I', camera_id))\n", " # model_id (PINHOLE = 1)\n", " f.write(struct.pack('i', PINHOLE))\n", " # width\n", " f.write(struct.pack('Q', int(width)))\n", " # height\n", " f.write(struct.pack('Q', int(height)))\n", " # params: fx, fy, cx, cy (4 parameters)\n", " f.write(struct.pack('d', fx))\n", " f.write(struct.pack('d', fy))\n", " f.write(struct.pack('d', cx))\n", " f.write(struct.pack('d', cy))\n", "\n", " print(f\"COLMAP cameras.bin saved to {output_file}\")\n", "\n", "\n", "def write_images_binary(cameras_dict, output_file):\n", " \"\"\"Export images.bin.\"\"\"\n", " num_images = len(cameras_dict)\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', num_images))\n", "\n", " for image_id, (img_id, cam_params) in enumerate(cameras_dict.items(), start=1):\n", " R = cam_params['rotation']\n", " quat = rotmat_to_qvec(R)\n", " t = cam_params['translation']\n", " camera_id = image_id\n", "\n", " f.write(struct.pack('I', image_id))\n", " for q in quat:\n", " f.write(struct.pack('d', q))\n", " for ti in t:\n", " f.write(struct.pack('d', ti))\n", " f.write(struct.pack('I', camera_id))\n", "\n", " # Image name (null-terminated)\n", " name_bytes = img_id.encode('utf-8') + b'\\x00'\n", " f.write(name_bytes)\n", " # x, y coordinates and point3D_id (Set to 0/empty for this export)\n", " f.write(struct.pack('Q', 0))\n", "\n", " print(f\"COLMAP images.bin saved to {output_file}\")\n", "\n", "\n", "def write_points3D_binary(pts3d, confidence, output_file):\n", " \"\"\"Export points3D.bin.\"\"\"\n", " num_points = len(pts3d)\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', num_points))\n", "\n", " for point_id, pt in enumerate(pts3d, start=1):\n", " x, y, z = pt\n", "\n", " f.write(struct.pack('Q', point_id))\n", " f.write(struct.pack('d', x))\n", " f.write(struct.pack('d', y))\n", " f.write(struct.pack('d', z))\n", "\n", " # RGB Color (Default: Gray)\n", " f.write(struct.pack('B', 128))\n", " f.write(struct.pack('B', 128))\n", " f.write(struct.pack('B', 128))\n", "\n", " # Error estimation\n", " if confidence is not None and point_id <= len(confidence):\n", " error = 1.0 / max(confidence[point_id-1], 0.001)\n", " else:\n", " error = 1.0\n", " f.write(struct.pack('d', error))\n", "\n", " # track_length (Set to 0)\n", " f.write(struct.pack('Q', 0))\n", "\n", " print(f\"COLMAP points3D.bin saved to {output_file}\")\n", "\n", "\n", "def export_colmap_binary(cameras_dict, pts3d, confidence, image_size, output_dir):\n", " \"\"\"Main function to export all COLMAP binary files.\"\"\"\n", " output_path = Path(output_dir)\n", " output_path.mkdir(parents=True, exist_ok=True)\n", "\n", " write_cameras_binary(\n", " cameras_dict,\n", " image_size,\n", " output_path / 'cameras.bin'\n", " )\n", "\n", " write_images_binary(\n", " cameras_dict,\n", " output_path / 'images.bin'\n", " )\n", "\n", " write_points3D_binary(\n", " pts3d,\n", " confidence,\n", " output_path / 'points3D.bin'\n", " )\n", "\n", " print(f\"\\nCOLMAP binary files exported to {output_dir}/\")\n", " print(f\" - cameras.bin: {len(cameras_dict)} cameras (PINHOLE model)\")\n", " print(f\" - images.bin: {len(cameras_dict)} images\")\n", " print(f\" - points3D.bin: {len(pts3d)} points\")" ] }, { "cell_type": "code", "execution_count": 9, "id": "789a8fd2", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.519875Z", "iopub.status.busy": "2026-01-25T04:41:19.519668Z", "iopub.status.idle": "2026-01-25T04:41:19.545918Z", "shell.execute_reply": "2026-01-25T04:41:19.545240Z" }, "papermill": { "duration": 0.035285, "end_time": "2026-01-25T04:41:19.547375", "exception": false, "start_time": "2026-01-25T04:41:19.512090", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 20: Traditional Method Functions (for comparison)\n", "# =====================================================================\n", "import struct\n", "import numpy as np\n", "from pathlib import Path\n", "\n", "# ===== Traditional Method: extract_colmap_data =====\n", "def extract_colmap_data_traditional(scene, image_paths, max_points=1000000):\n", " \"\"\"\n", " Traditional Method: Extract COLMAP-compatible data from a MASt3R scene.\n", " (Derived from dino-mast3r-gs-kg-34oo.ipynb)\n", " \"\"\"\n", " print(\"\\n=== [TRADITIONAL] Extracting COLMAP-compatible data ===\")\n", "\n", " # Extract point cloud\n", " pts_all = scene.get_pts3d()\n", " print(f\"pts_all type: {type(pts_all)}\")\n", "\n", " if isinstance(pts_all, list):\n", " print(f\"pts_all is a list with {len(pts_all)} elements\")\n", " if len(pts_all) > 0:\n", " print(f\"First element type: {type(pts_all[0])}\")\n", " if hasattr(pts_all[0], 'shape'):\n", " print(f\"First element shape: {pts_all[0].shape}\")\n", "\n", " pts_all = torch.stack([p if isinstance(p, torch.Tensor) else torch.tensor(p)\n", " for p in pts_all])\n", " print(f\"pts_all shape after conversion: {pts_all.shape}\")\n", "\n", " if len(pts_all.shape) == 4:\n", " print(f\"Found batched point cloud: {pts_all.shape}\")\n", " B, H, W, _ = pts_all.shape\n", " pts3d = pts_all.reshape(-1, 3).detach().cpu().numpy()\n", "\n", " # Extract colors\n", " colors = []\n", " for img_path in image_paths:\n", " img = Image.open(img_path).resize((W, H))\n", " colors.append(np.array(img))\n", " colors = np.stack(colors).reshape(-1, 3) / 255.0\n", " else:\n", " pts3d = pts_all.detach().cpu().numpy() if isinstance(pts_all, torch.Tensor) else pts_all\n", " colors = np.ones((len(pts3d), 3)) * 0.5\n", "\n", " print(f\"✓ Extracted {len(pts3d)} 3D points from {len(image_paths)} images\")\n", "\n", " # Downsample points\n", " if len(pts3d) > max_points:\n", " print(f\"\\n⚠ Downsampling from {len(pts3d)} to {max_points} points...\")\n", " valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n", " pts3d_valid = pts3d[valid_mask]\n", " colors_valid = colors[valid_mask]\n", " indices = np.random.choice(len(pts3d_valid), size=max_points, replace=False)\n", " pts3d = pts3d_valid[indices]\n", " colors = colors_valid[indices]\n", " print(f\"✓ Downsampled to {len(pts3d)} points\")\n", "\n", " # Extract camera parameters\n", " print(\"Extracting camera parameters...\")\n", "\n", " # [Important] Convert camera-to-world (C2W) to world-to-camera (W2C)\n", " poses_c2w = scene.get_im_poses().detach().cpu().numpy()\n", " print(f\"Retrieved camera-to-world poses: shape {poses_c2w.shape}\")\n", "\n", " poses = []\n", " for i, pose_c2w in enumerate(poses_c2w):\n", " pose_w2c = np.linalg.inv(pose_c2w)\n", " poses.append(pose_w2c)\n", " poses = np.array(poses)\n", " print(\"Converted to world-to-camera poses for COLMAP\")\n", "\n", " focals = scene.get_focals().detach().cpu().numpy()\n", " pp = scene.get_principal_points().detach().cpu().numpy()\n", " print(f\"Focals shape: {focals.shape}\")\n", " print(f\"Principal points shape: {pp.shape}\")\n", "\n", " mast3r_size = 224.0\n", "\n", " cameras = []\n", " for i, img_path in enumerate(image_paths):\n", " img = Image.open(img_path)\n", " W, H = img.size\n", " scale = W / mast3r_size\n", "\n", " if focals.shape[1] == 1:\n", " focal_mast3r = float(focals[i, 0])\n", " fx = fy = focal_mast3r * scale\n", " else:\n", " fx = float(focals[i, 0]) * scale\n", " fy = float(focals[i, 1]) * scale\n", "\n", " cx = float(pp[i, 0]) * scale\n", " cy = float(pp[i, 1]) * scale\n", "\n", " camera = {\n", " 'camera_id': i + 1,\n", " 'model': 'PINHOLE',\n", " 'width': W,\n", " 'height': H,\n", " 'params': [fx, fy, cx, cy]\n", " }\n", " cameras.append(camera)\n", "\n", " if i == 0:\n", " print(f\"\\nExample camera 0:\")\n", " print(f\" Image size: {W}x{H}\")\n", " print(f\" MASt3R focal: {focal_mast3r:.2f}, pp: ({pp[i,0]:.2f}, {pp[i,1]:.2f})\")\n", " print(f\" Scaled fx={fx:.2f}, fy={fy:.2f}, cx={cx:.2f}, cy={cy:.2f}\")\n", " print(f\" Pose (first row): {poses[i][0]}\")\n", "\n", " print(f\"\\n✓ Extracted {len(cameras)} cameras and {len(poses)} poses\")\n", "\n", " return pts3d, colors, cameras, poses\n", "\n", "\n", "# ===== Traditional Method: rotmat2qvec =====\n", "def rotmat2qvec_traditional(R):\n", " \"\"\"Traditional Method: Convert rotation matrix to quaternion.\"\"\"\n", " R = np.asarray(R, dtype=np.float64)\n", " trace = np.trace(R)\n", "\n", " if trace > 0:\n", " s = 0.5 / np.sqrt(trace + 1.0)\n", " w = 0.25 / s\n", " x = (R[2, 1] - R[1, 2]) * s\n", " y = (R[0, 2] - R[2, 0]) * s\n", " z = (R[1, 0] - R[0, 1]) * s\n", " elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n", " w = (R[2, 1] - R[1, 2]) / s\n", " x = 0.25 * s\n", " y = (R[0, 1] + R[1, 0]) / s\n", " z = (R[0, 2] + R[2, 0]) / s\n", " elif R[1, 1] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n", " w = (R[0, 2] - R[2, 0]) / s\n", " x = (R[0, 1] + R[1, 0]) / s\n", " y = 0.25 * s\n", " z = (R[1, 2] + R[2, 1]) / s\n", " else:\n", " s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n", " w = (R[1, 0] - R[0, 1]) / s\n", " x = (R[0, 2] + R[2, 0]) / s\n", " y = (R[1, 2] + R[2, 1]) / s\n", " z = 0.25 * s\n", "\n", " qvec = np.array([w, x, y, z], dtype=np.float64)\n", " qvec = qvec / np.linalg.norm(qvec)\n", "\n", " return qvec\n", "\n", "\n", "# ===== Traditional Method: Save Functions =====\n", "def write_cameras_binary_traditional(cameras, output_file):\n", " \"\"\"Traditional Method: Write cameras.bin.\"\"\"\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(cameras)))\n", "\n", " for i, cam in enumerate(cameras):\n", " camera_id = cam.get('camera_id', i + 1)\n", " model_id = 1 # PINHOLE\n", " width = cam['width']\n", " height = cam['height']\n", " params = cam['params']\n", "\n", " f.write(struct.pack('i', camera_id))\n", " f.write(struct.pack('i', model_id))\n", " f.write(struct.pack('Q', width))\n", " f.write(struct.pack('Q', height))\n", "\n", " for param in params[:4]:\n", " f.write(struct.pack('d', param))\n", "\n", "\n", "def write_images_binary_traditional(image_paths, cameras, poses, output_file):\n", " \"\"\"Traditional Method: Write images.bin.\"\"\"\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(image_paths)))\n", "\n", " for i, (img_path, pose) in enumerate(zip(image_paths, poses)):\n", " image_id = i + 1\n", " camera_id = cameras[i].get('camera_id', i + 1)\n", " image_name = os.path.basename(img_path)\n", "\n", " R = pose[:3, :3]\n", " t = pose[:3, 3]\n", " qvec = rotmat2qvec_traditional(R)\n", " tvec = t\n", "\n", " f.write(struct.pack('i', image_id))\n", " for q in qvec:\n", " f.write(struct.pack('d', float(q)))\n", " for tv in tvec:\n", " f.write(struct.pack('d', float(tv)))\n", " f.write(struct.pack('i', camera_id))\n", " f.write(image_name.encode('utf-8') + b'\\x00')\n", " f.write(struct.pack('Q', 0))\n", "\n", "\n", "def write_points3d_binary_traditional(pts3d, colors, output_file):\n", " \"\"\"Traditional Method: Write points3D.bin.\"\"\"\n", " valid_indices = []\n", " for i, pt in enumerate(pts3d):\n", " if not (np.isnan(pt).any() or np.isinf(pt).any()):\n", " valid_indices.append(i)\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(valid_indices)))\n", "\n", " for idx, point_id in enumerate(valid_indices):\n", " pt = pts3d[point_id]\n", " color = colors[point_id]\n", "\n", " f.write(struct.pack('Q', point_id))\n", " for coord in pt:\n", " f.write(struct.pack('d', float(coord)))\n", "\n", " col_int = (color * 255).astype(np.uint8)\n", " for c in col_int:\n", " f.write(struct.pack('B', int(c)))\n", "\n", " f.write(struct.pack('d', 0.0))\n", " f.write(struct.pack('Q', 0))\n", "\n", " return len(valid_indices)\n", "\n", "\n", "def save_colmap_reconstruction_traditional(pts3d, colors, cameras, poses, image_paths, output_dir):\n", " \"\"\"Traditional Method: Save COLMAP reconstruction.\"\"\"\n", " print(\"\\n=== [TRADITIONAL] Saving COLMAP reconstruction ===\")\n", "\n", " sparse_dir = Path(output_dir) / 'sparse_traditional' / '0'\n", " sparse_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " write_cameras_binary_traditional(cameras, sparse_dir / 'cameras.bin')\n", " print(f\" ✓ Wrote {len(cameras)} cameras\")\n", "\n", " write_images_binary_traditional(image_paths, cameras, poses, sparse_dir / 'images.bin')\n", " print(f\" ✓ Wrote {len(image_paths)} images\")\n", "\n", " num_points = write_points3d_binary_traditional(pts3d, colors, sparse_dir / 'points3D.bin')\n", " print(f\" ✓ Wrote {num_points} 3D points\")\n", "\n", " print(f\"\\n✓ Traditional COLMAP reconstruction saved to {sparse_dir}\")\n", "\n", " return sparse_dir" ] }, { "cell_type": "code", "execution_count": 10, "id": "647d1a63", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.562581Z", "iopub.status.busy": "2026-01-25T04:41:19.562349Z", "iopub.status.idle": "2026-01-25T04:41:19.576275Z", "shell.execute_reply": "2026-01-25T04:41:19.575749Z" }, "id": "c7A05pXLFt2E", "papermill": { "duration": 0.022686, "end_time": "2026-01-25T04:41:19.577582", "exception": false, "start_time": "2026-01-25T04:41:19.554896", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 21: Convert BIN to CSV for Easy Comparison\n", "# =====================================================================\n", "import pandas as pd\n", "import struct\n", "\n", "def bin_to_csv_cameras(bin_file, csv_file):\n", " \"\"\"cameras.bin → CSV\"\"\"\n", " data = []\n", " with open(bin_file, 'rb') as f:\n", " num_cameras = struct.unpack('Q', f.read(8))[0]\n", " for _ in range(num_cameras):\n", " camera_id = struct.unpack('i', f.read(4))[0]\n", " model_id = struct.unpack('i', f.read(4))[0]\n", " width = struct.unpack('Q', f.read(8))[0]\n", " height = struct.unpack('Q', f.read(8))[0]\n", "\n", " # PINHOLE: 4 params\n", " if model_id == 1:\n", " params = struct.unpack('dddd', f.read(32))\n", " # SIMPLE_PINHOLE: 3 params\n", " else:\n", " params = struct.unpack('ddd', f.read(24))\n", "\n", " data.append({\n", " 'camera_id': camera_id,\n", " 'model_id': model_id,\n", " 'width': width,\n", " 'height': height,\n", " 'fx': params[0] if len(params) >= 1 else None,\n", " 'fy': params[1] if len(params) >= 2 else params[0] if len(params) == 1 else None,\n", " 'cx': params[2] if len(params) >= 3 else None,\n", " 'cy': params[3] if len(params) >= 4 else None\n", " })\n", "\n", " df = pd.DataFrame(data)\n", " df.to_csv(csv_file, index=False)\n", " print(f\"✓ Cameras CSV saved: {csv_file}\")\n", " return df\n", "\n", "\n", "def bin_to_csv_images(bin_file, csv_file):\n", " \"\"\"images.bin → CSV\"\"\"\n", " data = []\n", " with open(bin_file, 'rb') as f:\n", " num_images = struct.unpack('Q', f.read(8))[0]\n", " for _ in range(num_images):\n", " image_id = struct.unpack('i', f.read(4))[0]\n", " qvec = struct.unpack('dddd', f.read(32))\n", " tvec = struct.unpack('ddd', f.read(24))\n", " camera_id = struct.unpack('i', f.read(4))[0]\n", "\n", " name = b''\n", " while True:\n", " char = f.read(1)\n", " if char == b'\\x00':\n", " break\n", " name += char\n", " name = name.decode('utf-8')\n", "\n", " num_points2D = struct.unpack('Q', f.read(8))[0]\n", " f.read(num_points2D * 24)\n", "\n", " data.append({\n", " 'image_id': image_id,\n", " 'qw': qvec[0],\n", " 'qx': qvec[1],\n", " 'qy': qvec[2],\n", " 'qz': qvec[3],\n", " 'tx': tvec[0],\n", " 'ty': tvec[1],\n", " 'tz': tvec[2],\n", " 'camera_id': camera_id,\n", " 'name': name\n", " })\n", "\n", " df = pd.DataFrame(data)\n", " df.to_csv(csv_file, index=False)\n", " print(f\"✓ Images CSV saved: {csv_file}\")\n", " return df\n", "\n", "\n", "def bin_to_csv_points3d(bin_file, csv_file, max_rows=10000):\n", " \"\"\"Convert points3D.bin to CSV with downsampling.\"\"\"\n", " data = []\n", " with open(bin_file, 'rb') as f:\n", " num_points = struct.unpack('Q', f.read(8))[0]\n", "\n", " # Calculate sampling interval (step size)\n", " step = max(1, num_points // max_rows)\n", "\n", " for i in range(num_points):\n", " point_id = struct.unpack('Q', f.read(8))[0]\n", " xyz = struct.unpack('ddd', f.read(24))\n", " rgb = struct.unpack('BBB', f.read(3))\n", " error = struct.unpack('d', f.read(8))[0]\n", " track_length = struct.unpack('Q', f.read(8))[0]\n", " # Skip track data (image_id and point2D_idx pairs)\n", " f.read(track_length * 8)\n", "\n", " # Downsampling logic\n", " if i % step == 0:\n", " data.append({\n", " 'point_id': point_id,\n", " 'x': xyz[0],\n", " 'y': xyz[1],\n", " 'z': xyz[2],\n", " 'r': rgb[0],\n", " 'g': rgb[1],\n", " 'b': rgb[2],\n", " 'error': error\n", " })\n", "\n", " df = pd.DataFrame(data)\n", " df.to_csv(csv_file, index=False)\n", " print(f\"✓ Points3D CSV saved: {csv_file} (sampled {len(df)} / {num_points} points)\")\n", " return df\n", "\n", "\n", "def convert_colmap_bins_to_csv(sparse_dir, output_prefix):\n", " \"\"\"Convert all COLMAP binary files in a directory to CSV format.\"\"\"\n", " print(f\"\\n=== Converting {sparse_dir} to CSV ===\")\n", "\n", " cameras_df = bin_to_csv_cameras(\n", " os.path.join(sparse_dir, 'cameras.bin'),\n", " f\"{output_prefix}_cameras.csv\"\n", " )\n", "\n", " images_df = bin_to_csv_images(\n", " os.path.join(sparse_dir, 'images.bin'),\n", " f\"{output_prefix}_images.csv\"\n", " )\n", "\n", " points_df = bin_to_csv_points3d(\n", " os.path.join(sparse_dir, 'points3D.bin'),\n", " f\"{output_prefix}_points3d.csv\",\n", " max_rows=10000\n", " )\n", "\n", " return cameras_df, images_df, points_df" ] }, { "cell_type": "code", "execution_count": 11, "id": "a6753f67", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.591900Z", "iopub.status.busy": "2026-01-25T04:41:19.591646Z", "iopub.status.idle": "2026-01-25T04:41:19.605923Z", "shell.execute_reply": "2026-01-25T04:41:19.605341Z" }, "id": "SN1a_CbWEkIg", "papermill": { "duration": 0.023058, "end_time": "2026-01-25T04:41:19.607253", "exception": false, "start_time": "2026-01-25T04:41:19.584195", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 22: Comparison Function\n", "# =====================================================================\n", "def compare_extraction_methods(scene, image_paths, output_dir, conf_threshold=0.5, max_points=100000):\n", " \"\"\"\n", " Exports COLMAP format data using both the new and traditional methods for comparison.\n", "\n", " Args:\n", " scene: MASt3R scene object.\n", " image_paths: List of paths to the input images.\n", " output_dir: Directory where the output will be saved.\n", " conf_threshold: Confidence threshold for point filtering (New method).\n", " max_points: Maximum number of points to extract (Traditional method).\n", "\n", " Returns:\n", " dict: A dictionary containing the comparison results.\n", " \"\"\"\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"COMPARISON: New vs Traditional Extraction Methods\")\n", " print(\"=\"*70)\n", "\n", " # ===== METHOD 1: New Implementation (extract_camera_params_process2) =====\n", " print(\"\\n--- METHOD 1: New Implementation (extract_camera_params_process2) ---\")\n", "\n", " cameras_dict_new, pts3d_new, confidence_new = extract_camera_params_process2(\n", " scene=scene,\n", " image_paths=image_paths,\n", " conf_threshold=conf_threshold\n", " )\n", "\n", " # Get original image dimensions\n", " first_img = Image.open(image_paths[0])\n", " image_size = (first_img.width, first_img.height)\n", " first_img.close()\n", "\n", " # Save binary files for the New Method\n", " sparse_dir_new = os.path.join(output_dir, \"sparse_new/0\")\n", " os.makedirs(sparse_dir_new, exist_ok=True)\n", "\n", " export_colmap_binary(\n", " cameras_dict=cameras_dict_new,\n", " pts3d=pts3d_new,\n", " confidence=confidence_new,\n", " image_size=image_size,\n", " output_dir=sparse_dir_new\n", " )\n", "\n", " # ===== METHOD 2: Traditional Method (extract_colmap_data_traditional) =====\n", " print(\"\\n--- METHOD 2: Traditional Implementation (extract_colmap_data) ---\")\n", "\n", " pts3d_trad, colors_trad, cameras_trad, poses_trad = extract_colmap_data_traditional(\n", " scene=scene,\n", " image_paths=image_paths,\n", " max_points=max_points\n", " )\n", "\n", " # Save binary files for the Traditional Method\n", " sparse_dir_trad = save_colmap_reconstruction_traditional(\n", " pts3d=pts3d_trad,\n", " colors=colors_trad,\n", " cameras=cameras_trad,\n", " poses=poses_trad,\n", " image_paths=image_paths,\n", " output_dir=output_dir\n", " )\n", "\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"Converting to CSV for comparison\")\n", " print(\"=\"*70)\n", "\n", " csv_prefix_new = os.path.join(output_dir, \"comparison_new\")\n", " csv_prefix_trad = os.path.join(output_dir, \"comparison_traditional\")\n", "\n", " cam_new, img_new, pts_new = convert_colmap_bins_to_csv(\n", " sparse_dir_new,\n", " csv_prefix_new\n", " )\n", "\n", " cam_trad, img_trad, pts_trad = convert_colmap_bins_to_csv(\n", " str(sparse_dir_trad),\n", " csv_prefix_trad\n", " )\n", "\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"COMPARISON SUMMARY\")\n", " print(\"=\"*70)\n", "\n", " comparison_results = {\n", " 'cameras': {\n", " 'new_count': len(cam_new),\n", " 'trad_count': len(cam_trad),\n", " 'new_focal': float(cam_new.iloc[0]['fx']) if len(cam_new) > 0 else None,\n", " 'trad_focal': float(cam_trad.iloc[0]['fx']) if len(cam_trad) > 0 else None,\n", " },\n", " 'images': {\n", " 'new_count': len(img_new),\n", " 'trad_count': len(img_trad),\n", " 'new_tvec': [float(img_new.iloc[0]['tx']), float(img_new.iloc[0]['ty']), float(img_new.iloc[0]['tz'])] if len(img_new) > 0 else None,\n", " 'trad_tvec': [float(img_trad.iloc[0]['tx']), float(img_trad.iloc[0]['ty']), float(img_trad.iloc[0]['tz'])] if len(img_trad) > 0 else None,\n", " },\n", " 'points': {\n", " 'new_count': len(pts_new),\n", " 'trad_count': len(pts_trad),\n", " 'new_center': [float(pts_new['x'].mean()), float(pts_new['y'].mean()), float(pts_new['z'].mean())] if len(pts_new) > 0 else None,\n", " 'trad_center': [float(pts_trad['x'].mean()), float(pts_trad['y'].mean()), float(pts_trad['z'].mean())] if len(pts_trad) > 0 else None,\n", " }\n", " }\n", "\n", " print(\"\\nCAMERAS:\")\n", " print(f\" New method: {comparison_results['cameras']['new_count']} cameras\")\n", " print(f\" Traditional method: {comparison_results['cameras']['trad_count']} cameras\")\n", " if comparison_results['cameras']['new_focal'] and comparison_results['cameras']['trad_focal']:\n", " print(f\"\\n Sample focal lengths:\")\n", " print(f\" New: fx={comparison_results['cameras']['new_focal']:.2f}\")\n", " print(f\" Traditional: fx={comparison_results['cameras']['trad_focal']:.2f}\")\n", " focal_diff = abs(comparison_results['cameras']['new_focal'] - comparison_results['cameras']['trad_focal'])\n", " print(f\" Difference: {focal_diff:.2f}\")\n", "\n", " print(\"\\nIMAGES:\")\n", " print(f\" New method: {comparison_results['images']['new_count']} images\")\n", " print(f\" Traditional method: {comparison_results['images']['trad_count']} images\")\n", " if comparison_results['images']['new_tvec'] and comparison_results['images']['trad_tvec']:\n", " print(f\"\\n Sample translation (first image):\")\n", " print(f\" New: {comparison_results['images']['new_tvec']}\")\n", " print(f\" Traditional: {comparison_results['images']['trad_tvec']}\")\n", " tvec_diff = np.linalg.norm(\n", " np.array(comparison_results['images']['new_tvec']) -\n", " np.array(comparison_results['images']['trad_tvec'])\n", " )\n", " print(f\" Distance: {tvec_diff:.3f}\")\n", "\n", " print(\"\\nPOINTS3D:\")\n", " print(f\" New method: {comparison_results['points']['new_count']} points (sampled)\")\n", " print(f\" Traditional method: {comparison_results['points']['trad_count']} points (sampled)\")\n", " if comparison_results['points']['new_center'] and comparison_results['points']['trad_center']:\n", " print(f\"\\n Center of points:\")\n", " print(f\" New: {comparison_results['points']['new_center']}\")\n", " print(f\" Traditional: {comparison_results['points']['trad_center']}\")\n", " center_diff = np.linalg.norm(\n", " np.array(comparison_results['points']['new_center']) -\n", " np.array(comparison_results['points']['trad_center'])\n", " )\n", " print(f\" Distance: {center_diff:.3f}\")\n", "\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"CSV FILES SAVED:\")\n", " print(\"=\"*70)\n", " print(f\" New method:\")\n", " print(f\" - {csv_prefix_new}_cameras.csv\")\n", " print(f\" - {csv_prefix_new}_images.csv\")\n", " print(f\" - {csv_prefix_new}_points3d.csv\")\n", " print(f\" Traditional method:\")\n", " print(f\" - {csv_prefix_trad}_cameras.csv\")\n", " print(f\" - {csv_prefix_trad}_images.csv\")\n", " print(f\" - {csv_prefix_trad}_points3d.csv\")\n", "\n", " print(\"\\n✓ Comparison complete! Review CSV files for detailed analysis.\")\n", "\n", " return comparison_results" ] }, { "cell_type": "code", "execution_count": 12, "id": "0d089d7a", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.621777Z", "iopub.status.busy": "2026-01-25T04:41:19.621357Z", "iopub.status.idle": "2026-01-25T04:41:19.633041Z", "shell.execute_reply": "2026-01-25T04:41:19.632438Z" }, "id": "U7Lk41hLTKyF", "papermill": { "duration": 0.020603, "end_time": "2026-01-25T04:41:19.634481", "exception": false, "start_time": "2026-01-25T04:41:19.613878", "status": "completed" }, "tags": [] }, "outputs": [], "source": [ "# =====================================================================\n", "# CELL 14: Main Pipeline\n", "# =====================================================================\n", "\n", "def main_pipeline(image_dir, output_dir, square_size=1024, iterations=30000,\n", " max_images=200, max_pairs=100, max_points=500000,\n", " conf_threshold=1.001, preprocess_mode='none'):\n", "\n", " # STEP 0: Image Preprocessing\n", " if preprocess_mode == 'biplet':\n", " print(\"=\"*70)\n", " print(\"STEP 0: Image Preprocessing (Biplet Crops)\")\n", " print(\"=\"*70)\n", "\n", " temp_biplet_dir = os.path.join(output_dir, \"temp_biplet\")\n", " biplet_dir = normalize_image_sizes_biplet(image_dir, temp_biplet_dir, size=square_size)\n", "\n", " images_dir = os.path.join(output_dir, \"images\")\n", " os.makedirs(images_dir, exist_ok=True)\n", "\n", " biplet_suffixes = ['_left', '_right', '_top', '_bottom']\n", " copied_count = 0\n", "\n", " for img_file in os.listdir(temp_biplet_dir):\n", " if any(suffix in img_file for suffix in biplet_suffixes):\n", " src = os.path.join(temp_biplet_dir, img_file)\n", " dst = os.path.join(images_dir, img_file)\n", " shutil.copy2(src, dst)\n", " copied_count += 1\n", "\n", " print(f\"✓ Copied {copied_count} biplet images to {images_dir}\")\n", "\n", " original_images_dir = os.path.join(output_dir, \"original_images\")\n", " os.makedirs(original_images_dir, exist_ok=True)\n", "\n", " original_count = 0\n", " valid_extensions = ('.jpg', '.jpeg', '.png', '.bmp')\n", " for img_file in os.listdir(image_dir):\n", " if img_file.lower().endswith(valid_extensions):\n", " src = os.path.join(image_dir, img_file)\n", " dst = os.path.join(original_images_dir, img_file)\n", " shutil.copy2(src, dst)\n", " original_count += 1\n", "\n", " print(f\"✓ Saved {original_count} original images to {original_images_dir}\")\n", " shutil.rmtree(temp_biplet_dir)\n", " image_dir = images_dir\n", " clear_memory()\n", " else:\n", " images_dir = os.path.join(output_dir, \"images\")\n", " if not os.path.exists(images_dir):\n", " print(\"=\"*70)\n", " print(\"STEP 0: Copying images to output directory\")\n", " print(\"=\"*70)\n", " shutil.copytree(image_dir, images_dir)\n", " print(f\"✓ Copied images to {images_dir}\")\n", " image_dir = images_dir\n", "\n", " # STEP 1: Loading Images\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 1: Loading and Preparing Images\")\n", " print(\"=\"*70)\n", "\n", " image_paths = load_images_from_directory(image_dir, max_images=max_images)\n", " print(f\"Loaded {len(image_paths)} images\")\n", " clear_memory()\n", "\n", " # STEP 2: Image Pair Selection (DINO)\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 2: Image Pair Selection (DINO)\")\n", " print(\"=\"*70)\n", "\n", " max_pairs = min(max_pairs, 50)\n", " pairs = get_image_pairs_dino(image_paths, max_pairs=max_pairs)\n", " print(f\"Selected {len(pairs)} image pairs\")\n", " clear_memory()\n", "\n", " # STEP 3: MASt3R 3D Reconstruction\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 3: MASt3R 3D Reconstruction\")\n", " print(\"=\"*70)\n", "\n", " device = Config.DEVICE\n", " model = load_mast3r_model(device) ###\n", " scene, mast3r_images = run_mast3r_pairs(model, image_paths, pairs, device)\n", "\n", " del model\n", " clear_memory()\n", "\n", " # STEP 4: Converting to COLMAP\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 4: Converting to COLMAP (PINHOLE)\")\n", " print(\"=\"*70)\n", "\n", "\n", " \n", "# Create a list of image filenames\n", " image_names = [os.path.basename(p) for p in image_paths]\n", "\n", " # CELL 11: Extract camera parameters (using the revised function)\n", " cameras_dict, pts3d, confidence = extract_camera_params_process2(\n", " scene=scene,\n", " image_paths=image_paths,\n", " conf_threshold=conf_threshold\n", " )\n", "\n", " print(f\"Extracted {len(cameras_dict)} cameras with confidence >= {conf_threshold}\")\n", "\n", " # Get image dimensions (from the first image)\n", " from PIL import Image\n", " first_img = Image.open(image_paths[0])\n", " image_size = (first_img.width, first_img.height)\n", " first_img.close()\n", "\n", " # COLMAP output directory setup\n", " colmap_dir = os.path.join(output_dir, \"sparse/0\")\n", " os.makedirs(colmap_dir, exist_ok=True)\n", "\n", " # CELL 12: Export in COLMAP binary format (using the revised function)\n", " export_colmap_binary(\n", " cameras_dict=cameras_dict,\n", " pts3d=pts3d,\n", " confidence=confidence,\n", " image_size=image_size,\n", " output_dir=colmap_dir\n", " )\n", " \n", " # Compare extraction methods (New vs Traditional)\n", " comparison_results = compare_extraction_methods(\n", " scene=scene,\n", " image_paths=image_paths,\n", " output_dir=\"/kaggle/working/output\",\n", " conf_threshold=0.5,\n", " max_points=2000000\n", " )" ] }, { "cell_type": "code", "execution_count": 13, "id": "b8999565", "metadata": { "execution": { "iopub.execute_input": "2026-01-25T04:41:19.648592Z", "iopub.status.busy": "2026-01-25T04:41:19.648389Z", "iopub.status.idle": "2026-01-25T04:43:06.834894Z", "shell.execute_reply": "2026-01-25T04:43:06.833947Z" }, "id": "_-8kDLieTKyG", "outputId": "beafd1de-a25c-4273-dfcb-10ca5301abb7", "papermill": { "duration": 107.195584, "end_time": "2026-01-25T04:43:06.836645", "exception": false, "start_time": "2026-01-25T04:41:19.641061", "status": "completed" }, "tags": [] }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "======================================================================\n", "STEP 0: Image Preprocessing (Biplet Crops)\n", "======================================================================\n", "\n", "=== Generating Biplet Crops (1024x1024) ===\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Creating biplets: 100%|██████████| 40/40 [00:05<00:00, 7.52it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "✓ Biplet generation complete:\n", " Source images: 40\n", " Biplet crops generated: 80\n", " Original size distribution: {'1440x1920': 40}\n", "✓ Copied 80 biplet images to /kaggle/working/output/images\n", "✓ Saved 40 original images to /kaggle/working/output/original_images\n", "\n", "======================================================================\n", "STEP 1: Loading and Preparing Images\n", "======================================================================\n", "\n", "Loading images from: /kaggle/working/output/images\n", "⚠️ Limiting from 80 to 40 images\n", "✓ Found 40 images\n", "Loaded 40 images\n", "\n", "======================================================================\n", "STEP 2: Image Pair Selection (DINO)\n", "======================================================================\n", "\n", "=== Extracting DINO Global Features ===\n", "Initial memory state:\n", "GPU Memory - Allocated: 0.00GB, Reserved: 0.00GB\n", "CPU Memory Usage: 5.8%\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/usr/local/lib/python3.12/dist-packages/huggingface_hub/file_download.py:942: FutureWarning: `resume_download` is deprecated and will be removed in version 1.0.0. Downloads always resume when possible. If you want to force a new download, use `force_download=True`.\n", " warnings.warn(\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { "model_id": "65f45ad754db43b491f1d1f4e9dfa9da", "version_major": 2, "version_minor": 0 }, "text/plain": [ "preprocessor_config.json: 0%| | 0.00/436 [00:00> Loading a list of 40 images\n", " - adding /kaggle/working/output/images/image_001_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_001_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_002_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_002_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_003_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_003_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_004_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_004_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_005_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_005_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_006_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_006_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_007_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_007_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_008_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_008_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_009_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_009_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_010_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_010_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_011_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_011_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_012_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_012_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_013_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_013_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_014_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_014_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_015_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_015_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_016_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_016_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_017_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_017_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_018_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_018_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_019_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_019_top.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_020_bottom.jpeg with resolution 1024x1024 --> 224x224\n", " - adding /kaggle/working/output/images/image_020_top.jpeg with resolution 1024x1024 --> 224x224\n", " (Found 40 images)\n", "Loaded 40 images\n", "After loading images:\n", "GPU Memory - Allocated: 2.57GB, Reserved: 2.59GB\n", "CPU Memory Usage: 15.0%\n", "Creating 50 image pairs...\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "Preparing pairs: 100%|██████████| 50/50 [00:00<00:00, 372495.91it/s]\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Running MASt3R inference on 50 pairs...\n", ">> Inference with model on 50 image pairs\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ " 0%| | 0/50 [00:000.5): 2007040\n", "Extracted 40 cameras with confidence >= 0.5\n", "COLMAP cameras.bin saved to /kaggle/working/output/sparse/0/cameras.bin\n", "COLMAP images.bin saved to /kaggle/working/output/sparse/0/images.bin\n", "COLMAP points3D.bin saved to /kaggle/working/output/sparse/0/points3D.bin\n", "\n", "COLMAP binary files exported to /kaggle/working/output/sparse/0/\n", " - cameras.bin: 40 cameras (PINHOLE model)\n", " - images.bin: 40 images\n", " - points3D.bin: 2007040 points\n", "\n", "======================================================================\n", "COMPARISON: New vs Traditional Extraction Methods\n", "======================================================================\n", "\n", "--- METHOD 1: New Implementation (extract_camera_params_process2) ---\n", "\n", "=== Extracting Camera Parameters ===\n", "\n", "Example camera 0:\n", " Original size: 1024x1024\n", " MASt3R size: 224.0\n", " Scale factor: 4.571\n", " MASt3R focal: 290.26\n", " Scaled focal: 1326.93\n", " MASt3R pp: [112.00, 112.00]\n", " Scaled pp: [512.00, 512.00]\n", "✓ Extracted parameters for 40 cameras\n", "✓ Total 3D points: 2007040\n", "✓ Points after confidence filtering (>0.5): 2007040\n", "COLMAP cameras.bin saved to /kaggle/working/output/sparse_new/0/cameras.bin\n", "COLMAP images.bin saved to /kaggle/working/output/sparse_new/0/images.bin\n", "COLMAP points3D.bin saved to /kaggle/working/output/sparse_new/0/points3D.bin\n", "\n", "COLMAP binary files exported to /kaggle/working/output/sparse_new/0/\n", " - cameras.bin: 40 cameras (PINHOLE model)\n", " - images.bin: 40 images\n", " - points3D.bin: 2007040 points\n", "\n", "--- METHOD 2: Traditional Implementation (extract_colmap_data) ---\n", "\n", "=== [TRADITIONAL] Extracting COLMAP-compatible data ===\n", "pts_all type: \n", "pts_all is a list with 40 elements\n", "First element type: \n", "First element shape: torch.Size([224, 224, 3])\n", "pts_all shape after conversion: torch.Size([40, 224, 224, 3])\n", "Found batched point cloud: torch.Size([40, 224, 224, 3])\n", "✓ Extracted 2007040 3D points from 40 images\n", "\n", "⚠ Downsampling from 2007040 to 2000000 points...\n", "✓ Downsampled to 2000000 points\n", "Extracting camera parameters...\n", "Retrieved camera-to-world poses: shape (40, 4, 4)\n", "Converted to world-to-camera poses for COLMAP\n", "Focals shape: (40, 1)\n", "Principal points shape: (40, 2)\n", "\n", "Example camera 0:\n", " Image size: 1024x1024\n", " MASt3R focal: 290.26, pp: (112.00, 112.00)\n", " Scaled fx=1326.93, fy=1326.93, cx=512.00, cy=512.00\n", " Pose (first row): [ 0.5880269 -0.39172736 0.70765394 -0.42789218]\n", "\n", "✓ Extracted 40 cameras and 40 poses\n", "\n", "=== [TRADITIONAL] Saving COLMAP reconstruction ===\n", " ✓ Wrote 40 cameras\n", " ✓ Wrote 40 images\n", " ✓ Wrote 2000000 3D points\n", "\n", "✓ Traditional COLMAP reconstruction saved to /kaggle/working/output/sparse_traditional/0\n", "\n", "======================================================================\n", "Converting to CSV for comparison\n", "======================================================================\n", "\n", "=== Converting /kaggle/working/output/sparse_new/0 to CSV ===\n", "✓ Cameras CSV saved: /kaggle/working/output/comparison_new_cameras.csv\n", "✓ Images CSV saved: /kaggle/working/output/comparison_new_images.csv\n", "✓ Points3D CSV saved: /kaggle/working/output/comparison_new_points3d.csv (sampled 10036 / 2007040 points)\n", "\n", "=== Converting /kaggle/working/output/sparse_traditional/0 to CSV ===\n", "✓ Cameras CSV saved: /kaggle/working/output/comparison_traditional_cameras.csv\n", "✓ Images CSV saved: /kaggle/working/output/comparison_traditional_images.csv\n", "✓ Points3D CSV saved: /kaggle/working/output/comparison_traditional_points3d.csv (sampled 10000 / 2000000 points)\n", "\n", "======================================================================\n", "COMPARISON SUMMARY\n", "======================================================================\n", "\n", "CAMERAS:\n", " New method: 40 cameras\n", " Traditional method: 40 cameras\n", "\n", " Sample focal lengths:\n", " New: fx=1326.93\n", " Traditional: fx=1326.93\n", " Difference: 0.00\n", "\n", "IMAGES:\n", " New method: 40 images\n", " Traditional method: 40 images\n", "\n", " Sample translation (first image):\n", " New: [-0.42789217829704285, -0.25418615341186523, 0.06525076180696487]\n", " Traditional: [-0.42789217829704285, -0.25418615341186523, 0.06525076180696487]\n", " Distance: 0.000\n", "\n", "POINTS3D:\n", " New method: 10036 points (sampled)\n", " Traditional method: 10000 points (sampled)\n", "\n", " Center of points:\n", " New: [-0.14262852822224073, 0.0549652147331208, 1.0986959506175156]\n", " Traditional: [-0.1285176262434019, 0.05012467725782335, 1.1123415442317723]\n", " Distance: 0.020\n", "\n", "======================================================================\n", "CSV FILES SAVED:\n", "======================================================================\n", " New method:\n", " - /kaggle/working/output/comparison_new_cameras.csv\n", " - /kaggle/working/output/comparison_new_images.csv\n", " - /kaggle/working/output/comparison_new_points3d.csv\n", " Traditional method:\n", " - /kaggle/working/output/comparison_traditional_cameras.csv\n", " - /kaggle/working/output/comparison_traditional_images.csv\n", " - /kaggle/working/output/comparison_traditional_points3d.csv\n", "\n", "✓ Comparison complete! Review CSV files for detailed analysis.\n" ] } ], "source": [ "# =====================================================================\n", "# CELL 15: Run Pipeline\n", "# =====================================================================\n", "if __name__ == \"__main__\":\n", " IMAGE_DIR = \"/kaggle/input/two-dogs/fountain40\"\n", " OUTPUT_DIR = \"/kaggle/working/output\"\n", "\n", "\n", " main_pipeline(\n", " image_dir=IMAGE_DIR,\n", " output_dir=OUTPUT_DIR,\n", " square_size=1024,\n", " iterations=1000,\n", " max_images=40,\n", " max_pairs=1000,\n", " max_points=2000000,\n", " conf_threshold=0.5,\n", " preprocess_mode='biplet' # or 'none'\n", " )\n" ] } ], "metadata": { "accelerator": "GPU", "colab": { "gpuType": "T4", "provenance": [] }, "kaggle": { "accelerator": "nvidiaTeslaT4", "dataSources": [ { "datasetId": 1429416, "sourceId": 14602001, "sourceType": "datasetVersion" } ], "dockerImageVersionId": 31260, "isGpuEnabled": true, "isInternetEnabled": true, "language": "python", "sourceType": "notebook" }, "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.12.12" }, "papermill": { "default_parameters": {}, "duration": 166.258596, "end_time": "2026-01-25T04:43:10.195792", "environment_variables": {}, "exception": null, "input_path": "__notebook__.ipynb", "output_path": "__notebook__.ipynb", "parameters": {}, "start_time": "2026-01-25T04:40:23.937196", "version": "2.6.0" }, "widgets": { "application/vnd.jupyter.widget-state+json": { "state": { "0858b8ac343649d4a7e668a4e066ecca": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_2c065f340e6744a6a96ec4e11173ecd0", "placeholder": "​", "style": "IPY_MODEL_419dd223b68c482b8d50c7743747ee7e", "tabbable": null, "tooltip": null, "value": "config.json: 100%" } }, "085f7075649742f080d523a716a56ab5": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_85d6a034f54143c987e0948c3a62eb5c", "max": 436.0, "min": 0.0, "orientation": "horizontal", "style": "IPY_MODEL_3d15375cb2ef46bea2e29b146c93ee2f", "tabbable": null, "tooltip": null, "value": 436.0 } }, "116235e3a3454adca4e455f1e9b9f095": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_be6b76c0c79a4d0c9004335c9b770587", "IPY_MODEL_80f4e2999fa14965af13151f4f9bfc05", "IPY_MODEL_f9f7b39a25014d41a21e3ba1e8be4ec4" ], "layout": "IPY_MODEL_c3240efc9e05407ab9c4b30d8013be17", "tabbable": null, "tooltip": null } }, "1407af3d568147c8a3ce95822b5456ab": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_f2aaa1cc89ff45c18e662bea033994e3", "placeholder": "​", "style": "IPY_MODEL_64a71a696fbf4505b031e6ec12f068fb", "tabbable": null, "tooltip": null, "value": " 436/436 [00:00<00:00, 48.0kB/s]" } }, "1789f405c04d42a5b006ad41686f320d": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "196e15cfd86f4c1aa62ea168132aca34": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "1a16710739c543f68df27818a9bd8ed8": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "2c065f340e6744a6a96ec4e11173ecd0": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "2d7f1d2946954cb9abd4c691c01f1eae": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_914a61a3f6894a2a8a04cc0054e41bec", "max": 346345912.0, "min": 0.0, "orientation": "horizontal", "style": "IPY_MODEL_e082a73eb57a49ac938a2d43dfe8f2db", "tabbable": null, "tooltip": null, "value": 346345912.0 } }, "32ea5042f36f4ba0a4ae45695f43e8bd": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_eea29a17497c470a9cda1bec80f966b7", "placeholder": "​", "style": "IPY_MODEL_ab0e47ea27144d6a819ab2ed30855e8e", "tabbable": null, "tooltip": null, "value": "config.json: 100%" } }, "3c384b6b222b43e5b578991092efe193": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "3d15375cb2ef46bea2e29b146c93ee2f": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "416bf7487366461c8502fd5f9b71d6c0": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_4f28088b56f24f77a353062996de01ba", "max": 546.0, "min": 0.0, "orientation": "horizontal", "style": "IPY_MODEL_f8a6be9c362447c99fddd7959d23af71", "tabbable": null, "tooltip": null, "value": 546.0 } }, "419dd223b68c482b8d50c7743747ee7e": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "48994ec661954d29807ea4723b03a540": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "491233c7b3394a24a7143167a944e110": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_8893de4170314bd2abf682e80205cf6e", "placeholder": "​", "style": "IPY_MODEL_e3c160b81c424ffea8069d1e402d4b86", "tabbable": null, "tooltip": null, "value": "preprocessor_config.json: 100%" } }, "4f28088b56f24f77a353062996de01ba": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "57c64a30e8a7449a9749b73c174398b9": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "5975a462316b49c5be60bbf9f570620c": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "60db5d83ed8b4c8b988108f5cdd12664": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_f14c1463b5af463e84f5ed590b8641a1", "placeholder": "​", "style": "IPY_MODEL_1789f405c04d42a5b006ad41686f320d", "tabbable": null, "tooltip": null, "value": " 346M/346M [00:03<00:00, 113MB/s]" } }, "64a71a696fbf4505b031e6ec12f068fb": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "65405f0aab6a4f03b87290bd7f90e4b8": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "65f45ad754db43b491f1d1f4e9dfa9da": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_491233c7b3394a24a7143167a944e110", "IPY_MODEL_085f7075649742f080d523a716a56ab5", "IPY_MODEL_1407af3d568147c8a3ce95822b5456ab" ], "layout": "IPY_MODEL_3c384b6b222b43e5b578991092efe193", "tabbable": null, "tooltip": null } }, "6cbbc59bd779433e8597ea137170da35": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_7e3c51236f4240f099567d0e5668719e", "IPY_MODEL_2d7f1d2946954cb9abd4c691c01f1eae", "IPY_MODEL_60db5d83ed8b4c8b988108f5cdd12664" ], "layout": "IPY_MODEL_5975a462316b49c5be60bbf9f570620c", "tabbable": null, "tooltip": null } }, "733d3ef92e634aa3985a450d6e8c1a79": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_bc36cef5023643329519f108a689a56f", "placeholder": "​", "style": "IPY_MODEL_1a16710739c543f68df27818a9bd8ed8", "tabbable": null, "tooltip": null, "value": " 548/548 [00:00<00:00, 66.5kB/s]" } }, "7456269e92f64e0aadc2fa163dc98414": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "7e3c51236f4240f099567d0e5668719e": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_f44301cbc6b04658a7b20394fe7cb6ab", "placeholder": "​", "style": "IPY_MODEL_8f5203127ee14dae992b060de2093a5a", "tabbable": null, "tooltip": null, "value": "model.safetensors: 100%" } }, "80f4e2999fa14965af13151f4f9bfc05": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_48994ec661954d29807ea4723b03a540", "max": 2754661648.0, "min": 0.0, "orientation": "horizontal", "style": "IPY_MODEL_aee7ff577ba345b591791a3648688316", "tabbable": null, "tooltip": null, "value": 2754661648.0 } }, "85d6a034f54143c987e0948c3a62eb5c": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "8893de4170314bd2abf682e80205cf6e": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "8f5203127ee14dae992b060de2093a5a": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "914a61a3f6894a2a8a04cc0054e41bec": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "9b442ac6848b4b81826a48af746acfa3": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "a403fdb141c948d6a9e7210f035c63e1": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "ab0e47ea27144d6a819ab2ed30855e8e": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "aee7ff577ba345b591791a3648688316": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "bc36cef5023643329519f108a689a56f": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "be6b76c0c79a4d0c9004335c9b770587": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_f7911c68855f465b924c88e20b5539ed", "placeholder": "​", "style": "IPY_MODEL_7456269e92f64e0aadc2fa163dc98414", "tabbable": null, "tooltip": null, "value": "model.safetensors: 100%" } }, "c0495880686b4779bcd667ba65082882": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_32ea5042f36f4ba0a4ae45695f43e8bd", "IPY_MODEL_416bf7487366461c8502fd5f9b71d6c0", "IPY_MODEL_f9b2560865af4d3fa68726fd06c3749d" ], "layout": "IPY_MODEL_9b442ac6848b4b81826a48af746acfa3", "tabbable": null, "tooltip": null } }, "c3240efc9e05407ab9c4b30d8013be17": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "c3b6de88e8f24acc9350603c8ebbdbf1": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "d20adc0e44584c579ccaab2ae3e88057": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "FloatProgressModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "FloatProgressModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "ProgressView", "bar_style": "success", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_e584117c559e43169e63ef635a5fe0aa", "max": 548.0, "min": 0.0, "orientation": "horizontal", "style": "IPY_MODEL_c3b6de88e8f24acc9350603c8ebbdbf1", "tabbable": null, "tooltip": null, "value": 548.0 } }, "d8d183b40c034e1aa9e8c8f59eb66311": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HBoxModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HBoxModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HBoxView", "box_style": "", "children": [ "IPY_MODEL_0858b8ac343649d4a7e668a4e066ecca", "IPY_MODEL_d20adc0e44584c579ccaab2ae3e88057", "IPY_MODEL_733d3ef92e634aa3985a450d6e8c1a79" ], "layout": "IPY_MODEL_196e15cfd86f4c1aa62ea168132aca34", "tabbable": null, "tooltip": null } }, "e082a73eb57a49ac938a2d43dfe8f2db": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "e3c160b81c424ffea8069d1e402d4b86": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "background": null, "description_width": "", "font_size": null, "text_color": null } }, "e584117c559e43169e63ef635a5fe0aa": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "eb47735d06074fa18f309e7d770bb4ac": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "eea29a17497c470a9cda1bec80f966b7": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f14c1463b5af463e84f5ed590b8641a1": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f2aaa1cc89ff45c18e662bea033994e3": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f44301cbc6b04658a7b20394fe7cb6ab": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f7911c68855f465b924c88e20b5539ed": { "model_module": "@jupyter-widgets/base", "model_module_version": "2.0.0", "model_name": "LayoutModel", "state": { "_model_module": "@jupyter-widgets/base", "_model_module_version": "2.0.0", "_model_name": "LayoutModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "LayoutView", "align_content": null, "align_items": null, "align_self": null, "border_bottom": null, "border_left": null, "border_right": null, "border_top": null, "bottom": null, "display": null, "flex": null, "flex_flow": null, "grid_area": null, "grid_auto_columns": null, "grid_auto_flow": null, "grid_auto_rows": null, "grid_column": null, "grid_gap": null, "grid_row": null, "grid_template_areas": null, "grid_template_columns": null, "grid_template_rows": null, "height": null, "justify_content": null, "justify_items": null, "left": null, "margin": null, "max_height": null, "max_width": null, "min_height": null, "min_width": null, "object_fit": null, "object_position": null, "order": null, "overflow": null, "padding": null, "right": null, "top": null, "visibility": null, "width": null } }, "f8a6be9c362447c99fddd7959d23af71": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "ProgressStyleModel", "state": { "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "ProgressStyleModel", "_view_count": null, "_view_module": "@jupyter-widgets/base", "_view_module_version": "2.0.0", "_view_name": "StyleView", "bar_color": null, "description_width": "" } }, "f9b2560865af4d3fa68726fd06c3749d": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_eb47735d06074fa18f309e7d770bb4ac", "placeholder": "​", "style": "IPY_MODEL_65405f0aab6a4f03b87290bd7f90e4b8", "tabbable": null, "tooltip": null, "value": " 546/546 [00:00<00:00, 68.3kB/s]" } }, "f9f7b39a25014d41a21e3ba1e8be4ec4": { "model_module": "@jupyter-widgets/controls", "model_module_version": "2.0.0", "model_name": "HTMLModel", "state": { "_dom_classes": [], "_model_module": "@jupyter-widgets/controls", "_model_module_version": "2.0.0", "_model_name": "HTMLModel", "_view_count": null, "_view_module": "@jupyter-widgets/controls", "_view_module_version": "2.0.0", "_view_name": "HTMLView", "description": "", "description_allow_html": false, "layout": "IPY_MODEL_a403fdb141c948d6a9e7210f035c63e1", "placeholder": "​", "style": "IPY_MODEL_57c64a30e8a7449a9749b73c174398b9", "tabbable": null, "tooltip": null, "value": " 2.75G/2.75G [00:07<00:00, 553MB/s]" } } }, "version_major": 2, "version_minor": 0 } } }, "nbformat": 4, "nbformat_minor": 5 }