{ "metadata": { "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" }, "colab": { "provenance": [], "gpuType": "T4", "machine_shape": "hm" }, "accelerator": "GPU" }, "nbformat_minor": 0, "nbformat": 4, "cells": [ { "cell_type": "markdown", "source": [ "# **dino-lightglue-mast3r-gs-colab**\n", "2026/01/15-" ], "metadata": { "id": "qDQLX3PArmh8" } }, { "cell_type": "markdown", "source": [ "# **setup**" ], "metadata": { "id": "vXt8y7QyyRn9" } }, { "cell_type": "code", "source": [], "metadata": { "id": "wsKE_91KY70P" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# 1. NumPyを下げつつ、それと互換性のある ml_dtypes をセットで入れる\n", "!pip install numpy==1.26.4 ml_dtypes==0.5.4\n" ], "metadata": { "id": "zzIlYMf5ozkH", "colab": { "base_uri": "https://localhost:8080/", "height": 544 }, "outputId": "1b6b080d-a90c-4612-fe9b-7c285e4d6f28" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Collecting numpy==1.26.4\n", " Downloading numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (61 kB)\n", "\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/61.0 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m61.0/61.0 kB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hRequirement already satisfied: ml_dtypes==0.5.4 in /usr/local/lib/python3.12/dist-packages (0.5.4)\n", "Downloading numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (18.0 MB)\n", "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m18.0/18.0 MB\u001b[0m \u001b[31m121.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n", "\u001b[?25hInstalling collected packages: numpy\n", " Attempting uninstall: numpy\n", " Found existing installation: numpy 2.0.2\n", " Uninstalling numpy-2.0.2:\n", " Successfully uninstalled numpy-2.0.2\n", "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n", "opencv-contrib-python 4.12.0.88 requires numpy<2.3.0,>=2; python_version >= \"3.9\", but you have numpy 1.26.4 which is incompatible.\n", "pytensor 2.36.3 requires numpy>=2.0, but you have numpy 1.26.4 which is incompatible.\n", "opencv-python-headless 4.12.0.88 requires numpy<2.3.0,>=2; python_version >= \"3.9\", but you have numpy 1.26.4 which is incompatible.\n", "shap 0.50.0 requires numpy>=2, but you have numpy 1.26.4 which is incompatible.\n", "tobler 0.13.0 requires numpy>=2.0, but you have numpy 1.26.4 which is incompatible.\n", "opencv-python 4.12.0.88 requires numpy<2.3.0,>=2; python_version >= \"3.9\", but you have numpy 1.26.4 which is incompatible.\n", "rasterio 1.5.0 requires numpy>=2, but you have numpy 1.26.4 which is incompatible.\n", "jax 0.7.2 requires numpy>=2.0, but you have numpy 1.26.4 which is incompatible.\n", "jaxlib 0.7.2 requires numpy>=2.0, but you have numpy 1.26.4 which is incompatible.\u001b[0m\u001b[31m\n", "\u001b[0mSuccessfully installed numpy-1.26.4\n" ] }, { "output_type": "display_data", "data": { "application/vnd.colab-display-data+json": { "pip_warning": { "packages": [ "numpy" ] }, "id": "4e83aa346ea2418c9bf28e3be84e980b" } }, "metadata": {} } ] }, { "cell_type": "code", "source": [ "break" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/", "height": 110 }, "id": "Dsic4JslI1l4", "outputId": "25ee6ab0-42b1-42aa-a626-ed9331dfccbd" }, "execution_count": null, "outputs": [ { "output_type": "error", "ename": "SyntaxError", "evalue": "'break' outside loop (ipython-input-668683560.py, line 1)", "traceback": [ "\u001b[0;36m File \u001b[0;32m\"/tmp/ipython-input-668683560.py\"\u001b[0;36m, line \u001b[0;32m1\u001b[0m\n\u001b[0;31m break\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m 'break' outside loop\n" ] } ] }, { "cell_type": "markdown", "source": [ "#**セッションを再起動する、下のセルを実行する**" ], "metadata": { "id": "n3HClCivHr9W" } }, { "cell_type": "code", "source": [ "from google.colab import drive\n", "drive.mount('/content/drive')" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "37MIfauyj__U", "outputId": "b4b5b403-95ad-4027-8815-ae3956cf03cc" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Mounted at /content/drive\n" ] } ] }, { "cell_type": "code", "source": [ "import numpy as np\n", "print(f\"NumPy version: {np.__version__}\")\n", "\n", "import ml_dtypes\n", "print(f\"ML_Dtypes version: {ml_dtypes.__version__}\")\n" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "inVfYAjA2nQ9", "outputId": "4c59bb48-324e-4d17-dc69-9d53d55db806" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "NumPy version: 1.26.4\n", "ML_Dtypes version: 0.5.4\n" ] } ] }, { "cell_type": "code", "source": [ "from transformers import AutoImageProcessor, AutoModel" ], "metadata": { "id": "jTO3dSS5HXrC" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# manual run this cell\n", "\n", "import os\n", "import sys\n", "import subprocess\n", "import shutil\n", "import glob\n", "\n", "\n", "\n", "def setup_environment():\n", " \"\"\"\n", " Nuclear option: Physically delete ALL numpy installations,\n", " then install clean versions\n", " \"\"\"\n", "\n", " print(\"🚀 Force Delete NumPy 2.0.2 - Nuclear Option\")\n", "\n", " WORK_DIR = \"/content/gaussian-splatting\"\n", "\n", " # =====================================================================\n", " # STEP 1: System packages\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 1: System packages\")\n", " print(\"=\"*70)\n", "\n", " subprocess.run([\"apt-get\", \"update\", \"-qq\"], capture_output=True)\n", " subprocess.run([\n", " \"apt-get\", \"install\", \"-y\", \"-qq\",\n", " \"colmap\", \"build-essential\", \"cmake\", \"git\",\n", " \"libopenblas-dev\", \"xvfb\"\n", " ], capture_output=True)\n", "\n", " os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n", " os.environ[\"DISPLAY\"] = \":99\"\n", " subprocess.Popen(\n", " [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n", " stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL\n", " )\n", " print(\"✓ System packages installed\")\n", "\n", "\n", " # =====================================================================\n", " # STEP 2: Clone Gaussian Splatting\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 2: Clone Gaussian Splatting\")\n", " print(\"=\"*70)\n", "\n", " if not os.path.exists(WORK_DIR):\n", " subprocess.run([\n", " \"git\", \"clone\", \"--recursive\",\n", " \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n", " WORK_DIR\n", " ], capture_output=True)\n", " print(\"✓ Cloned\")\n", " else:\n", " print(\"✓ Already exists\")\n", "\n", "\n", " # =====================================================================\n", " # STEP 3: NUCLEAR - Physically delete ALL numpy\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 3: NUCLEAR - Force delete ALL NumPy installations\")\n", " print(\"=\"*70)\n", "\n", " # Uninstall via pip\n", " print(\"Uninstalling numpy and scipy via pip...\")\n", " subprocess.run(\n", " [sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\", \"scipy\"],\n", " capture_output=True\n", " )\n", "\n", " # Find ALL site-packages locations\n", " import site\n", " site_packages = site.getsitepackages() + [site.getusersitepackages()]\n", "\n", " print(f\"Searching {len(site_packages)} site-packages directories...\")\n", "\n", " deleted_count = 0\n", " for sp in site_packages:\n", " if not os.path.exists(sp):\n", " continue\n", "\n", " # Delete numpy directories\n", " numpy_dirs = glob.glob(os.path.join(sp, \"numpy*\"))\n", " for d in numpy_dirs:\n", " try:\n", " if os.path.isdir(d):\n", " shutil.rmtree(d)\n", " else:\n", " os.remove(d)\n", " print(f\" Deleted: {d}\")\n", " deleted_count += 1\n", " except Exception as e:\n", " print(f\" Warning: Could not delete {d}: {e}\")\n", "\n", " # Delete scipy directories\n", " scipy_dirs = glob.glob(os.path.join(sp, \"scipy*\"))\n", " for d in scipy_dirs:\n", " try:\n", " if os.path.isdir(d):\n", " shutil.rmtree(d)\n", " else:\n", " os.remove(d)\n", " print(f\" Deleted: {d}\")\n", " deleted_count += 1\n", " except Exception as e:\n", " print(f\" Warning: Could not delete {d}: {e}\")\n", "\n", " print(f\"✓ Deleted {deleted_count} numpy/scipy installations\")\n", "\n", "\n", " # =====================================================================\n", " # STEP 4: Clean install - SciPy first\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 4: Clean install - SciPy first strategy\")\n", " print(\"=\"*70)\n", "\n", " # Install scipy (will install numpy 2.x)\n", " print(\"Installing scipy (with numpy 2.x)...\")\n", " subprocess.run(\n", " [sys.executable, \"-m\", \"pip\", \"install\", \"scipy\"],\n", " capture_output=True,\n", " check=True\n", " )\n", "\n", " # Physically delete numpy 2.x that just got installed\n", " print(\"Deleting numpy 2.x that scipy installed...\")\n", " for sp in site_packages:\n", " if not os.path.exists(sp):\n", " continue\n", " numpy_dirs = glob.glob(os.path.join(sp, \"numpy*\"))\n", " for d in numpy_dirs:\n", " try:\n", " if os.path.isdir(d):\n", " shutil.rmtree(d)\n", " else:\n", " os.remove(d)\n", " print(f\" Deleted: {d}\")\n", " except:\n", " pass\n", "\n", " # Install numpy 1.26.4 cleanly\n", " print(\"Installing numpy 1.26.4...\")\n", " subprocess.run(\n", " [sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"],\n", " capture_output=True,\n", " check=True\n", " )\n", "\n", " print(\"✓ Clean numpy 1.26.4 + scipy installed\")\n", "\n", "\n", " # =====================================================================\n", " # STEP 5: Install other packages\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 5: Install other packages\")\n", " print(\"=\"*70)\n", "\n", " packages = [\n", " \"torch torchvision torchaudio\",\n", " \"opencv-python pillow imageio imageio-ffmpeg plyfile tqdm tensorboard psutil\",\n", " \"transformers==4.40.0\",\n", " \"kornia h5py matplotlib\",\n", " \"git+https://github.com/cvg/LightGlue.git\",\n", " \"pycolmap\"\n", " ]\n", "\n", " for pkg in packages:\n", " subprocess.run(\n", " [sys.executable, \"-m\", \"pip\", \"install\"] + pkg.split(),\n", " capture_output=True\n", " )\n", "\n", " print(\"✓ All packages installed\")\n", "\n", "\n", " # =====================================================================\n", " # STEP 6: Build GS submodules\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 6: Build Gaussian Splatting submodules\")\n", " print(\"=\"*70)\n", "\n", " for name, repo in {\n", " \"diff-gaussian-rasterization\":\n", " \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n", " \"simple-knn\":\n", " \"https://github.com/camenduru/simple-knn.git\"\n", " }.items():\n", " path = os.path.join(WORK_DIR, \"submodules\", name)\n", " if not os.path.exists(path):\n", " subprocess.run([\"git\", \"clone\", repo, path], capture_output=True)\n", " subprocess.run(\n", " [sys.executable, \"-m\", \"pip\", \"install\", path],\n", " capture_output=True\n", " )\n", "\n", " print(\"✓ Submodules built\")\n", "\n", "\n", " # =====================================================================\n", " # STEP 7: Final nuclear strike on numpy 2.x\n", " # =====================================================================\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"STEP 7: Final check and nuclear strike if needed\")\n", " print(\"=\"*70)\n", "\n", " # Check if numpy 2.x snuck back in\n", " for sp in site_packages:\n", " if not os.path.exists(sp):\n", " continue\n", "\n", " # Look for numpy-2.* directories\n", " numpy2_dirs = glob.glob(os.path.join(sp, \"numpy-2.*\"))\n", " if numpy2_dirs:\n", " print(f\"⚠️ Found numpy 2.x installations: {len(numpy2_dirs)}\")\n", " for d in numpy2_dirs:\n", " try:\n", " shutil.rmtree(d)\n", " print(f\" Nuked: {d}\")\n", " except:\n", " pass\n", "\n", " # Also check for numpy/__init__.py with version 2.x\n", " numpy_init = os.path.join(sp, \"numpy\", \"__init__.py\")\n", " if os.path.exists(numpy_init):\n", " try:\n", " with open(numpy_init, 'r') as f:\n", " content = f.read()\n", " if '__version__ = \"2.' in content or \"__version__ = '2.\" in content:\n", " print(f\"⚠️ Found numpy 2.x at {sp}/numpy\")\n", " shutil.rmtree(os.path.join(sp, \"numpy\"))\n", " print(f\" Nuked: {os.path.join(sp, 'numpy')}\")\n", " except:\n", " pass\n", "\n", " # Reinstall numpy 1.26.4 to be absolutely sure\n", " subprocess.run(\n", " [sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\", \"--force-reinstall\"],\n", " capture_output=True\n", " )\n", "\n", " print(\"✓ Final numpy cleanup complete\")\n", "\n", " return WORK_DIR\n", "\n", "if __name__ == \"__main__\":\n", " setup_environment()" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "cN8OS0ZvX018", "outputId": "914736f4-5c3e-47db-80b2-89d98b6ca961" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "🚀 Force Delete NumPy 2.0.2 - Nuclear Option\n", "\n", "======================================================================\n", "STEP 1: System packages\n", "======================================================================\n", "✓ System packages installed\n", "\n", "======================================================================\n", "STEP 2: Clone Gaussian Splatting\n", "======================================================================\n", "✓ Cloned\n", "\n", "======================================================================\n", "STEP 3: NUCLEAR - Force delete ALL NumPy installations\n", "======================================================================\n", "Uninstalling numpy and scipy via pip...\n", "Searching 4 site-packages directories...\n", "✓ Deleted 0 numpy/scipy installations\n", "\n", "======================================================================\n", "STEP 4: Clean install - SciPy first strategy\n", "======================================================================\n", "Installing scipy (with numpy 2.x)...\n", "Deleting numpy 2.x that scipy installed...\n", " Deleted: /usr/local/lib/python3.12/dist-packages/numpy-2.4.1.dist-info\n", " Deleted: /usr/local/lib/python3.12/dist-packages/numpy.libs\n", " Deleted: /usr/local/lib/python3.12/dist-packages/numpy\n", "Installing numpy 1.26.4...\n", "✓ Clean numpy 1.26.4 + scipy installed\n", "\n", "======================================================================\n", "STEP 5: Install other packages\n", "======================================================================\n", "✓ All packages installed\n", "\n", "======================================================================\n", "STEP 6: Build Gaussian Splatting submodules\n", "======================================================================\n", "✓ Submodules built\n", "\n", "======================================================================\n", "STEP 7: Final check and nuclear strike if needed\n", "======================================================================\n", "⚠️ Found numpy 2.x installations: 1\n", " Nuked: /usr/local/lib/python3.12/dist-packages/numpy-2.2.6.dist-info\n", "✓ Final numpy cleanup complete\n" ] } ] }, { "cell_type": "code", "source": [ "import numpy as np\n", "print(f\"✓ NumPy: {np.__version__}\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "e2e1402f-e3f2-4260-aa05-b572c9ac2c74", "id": "nzzRu5emNQAj" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "✓ NumPy: 1.26.4\n" ] } ] }, { "cell_type": "code", "source": [ "import torch\n", "import PIL\n", "\n", "from transformers import AutoConfig\n", "from transformers import AutoImageProcessor" ], "metadata": { "id": "Ib-xRVVIy2PC" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import os\n", "import sys\n", "\n", "%cd /content/gaussian-splatting\n", "\n", "files = ['database.py', 'h5_to_db.py', 'metric.py']\n", "base_url = 'https://huggingface.co/stpete2/imc25_utils/resolve/main/'\n", "\n", "for file in files:\n", " if not os.path.exists(file):\n", " !wget -q {base_url + file}\n", " print(f\"✓ {file} download complete\")\n", " else:\n", " print(f\"✓ {file} already exists\")\n" ], "metadata": { "id": "eJrkKiCLzt1G", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "11f9b0b8-c08b-44f0-c50a-f480047b0363" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "/content/gaussian-splatting\n", "✓ database.py download complete\n", "✓ h5_to_db.py download complete\n", "✓ metric.py download complete\n" ] } ] }, { "cell_type": "code", "source": [ "from database import COLMAPDatabase, image_ids_to_pair_id\n", "from h5_to_db import add_keypoints, add_matches\n", "from metric import *" ], "metadata": { "id": "bmM_IBUtrEMd" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "#success\n", "\n", "def setup_mast3r():\n", " \"\"\"Install and setup MASt3R\"\"\"\n", " print(\"\\n=== Setting up MASt3R ===\")\n", "\n", " os.chdir('/content')\n", "\n", " # Remove existing installation\n", " if os.path.exists('mast3r'):\n", " print(\"Removing existing MASt3R installation...\")\n", " os.system('rm -rf mast3r')\n", "\n", " # Clone repository\n", " print(\"Cloning MASt3R repository...\")\n", " os.system('git clone --recursive https://github.com/naver/mast3r')\n", " os.chdir('/content/mast3r')\n", "\n", " # Check dust3r directory\n", " print(\"Checking dust3r structure...\")\n", " os.system('ls -la dust3r/')\n", "\n", " # Install dust3r\n", " print(\"Installing dust3r...\")\n", " os.system('cd dust3r && python -m pip install -e .')\n", "\n", " # Install croco\n", " print(\"Installing croco...\")\n", " os.system('cd dust3r/croco && python -m pip install -e .')\n", "\n", " # Install requirements\n", " print(\"Installing MASt3R requirements...\")\n", " os.system('pip install -r requirements.txt')\n", "\n", " # Download model weights\n", " print(\"Downloading model weights...\")\n", " os.system('mkdir -p checkpoints')\n", " os.system('wget -P checkpoints/ https://download.europe.naverlabs.com/ComputerVision/MASt3R/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth')\n", "\n", " # Install additional dependencies\n", " print(\"Installing additional dependencies...\")\n", " os.system('pip install trimesh matplotlib roma')\n", "\n", " # Add to path\n", " sys.path.insert(0, '/content/mast3r')\n", " sys.path.insert(0, '/content/mast3r/dust3r')\n", "\n", " # Verification\n", " print(\"\\n🔍 Verifying MASt3R installation...\")\n", " try:\n", " from mast3r.model import AsymmetricMASt3R\n", " print(\" ✓ MASt3R import: OK\")\n", " except Exception as e:\n", " print(f\" ❌ MASt3R import failed: {e}\")\n", " raise\n", "\n", " print(\"✓ MASt3R setup complete!\")\n", "\n", "if __name__ == \"__main__\":\n", " setup_mast3r()" ], "metadata": { "id": "3-CN6HJvZ6u2", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "04501f64-ad96-448e-aecb-8caceda0b04e" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "\n", "=== Setting up MASt3R ===\n", "Cloning MASt3R repository...\n", "Checking dust3r structure...\n", "Installing dust3r...\n", "Installing croco...\n", "Installing MASt3R requirements...\n", "Downloading model weights...\n", "Installing additional dependencies...\n", "\n", "🔍 Verifying MASt3R installation...\n", "Warning, cannot find cuda-compiled version of RoPE2D, using a slow pytorch version instead\n", " ✓ MASt3R import: OK\n", "✓ MASt3R setup complete!\n" ] } ] }, { "cell_type": "code", "source": [ "import torch\n", "import numpy as np\n", "import sys\n", "\n", "# listify関数も必要なので定義\n", "def listify(x):\n", " return list(x) if isinstance(x, (list, tuple)) else [x]" ], "metadata": { "id": "kTPzKB2vYn6b" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# /content/mast3r/dust3r/dust3r/utils/device.py の該当関数全体を置き換え\n", "\n", "def collate_with_cat(whatever, lists=False):\n", " if isinstance(whatever, (list, tuple)):\n", " if not whatever:\n", " return whatever\n", " elem = whatever[0]\n", "\n", " T = type(elem)\n", " if T is torch.Tensor or (T is torch.nn.parameter.Parameter):\n", " return listify(whatever) if lists else torch.cat(whatever)\n", "\n", " # numpyの型を確実に処理\n", " elem_type_name = type(elem).__name__\n", " elem_module = type(elem).__module__\n", "\n", " if elem_type_name == 'ndarray' or (elem_module == 'numpy' and elem_type_name == 'ndarray'):\n", " tensors = []\n", " for x in whatever:\n", " # 確実にnumpy配列として扱う\n", " if hasattr(x, '__array__'):\n", " arr = np.asarray(x)\n", " else:\n", " arr = np.array(x)\n", " tensors.append(torch.from_numpy(arr))\n", " return listify(tensors) if lists else torch.cat(tensors)" ], "metadata": { "id": "SpqfW2PDXp-H" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# MASt3Rのモジュールをインポート(まだインポートされていない場合)\n", "if 'dust3r.utils.device' not in sys.modules:\n", " from dust3r.utils import device as device_module\n", "else:\n", " device_module = sys.modules['dust3r.utils.device']\n", "\n", "# 関数を置き換え\n", "device_module.collate_with_cat = collate_with_cat\n", "print(\"✓ collate_with_cat関数を置き換えました\")" ], "metadata": { "id": "AUg6a4lEXeS-", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "5b5873d6-e7ca-40ea-b023-6a52d646f437" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "✓ collate_with_cat関数を置き換えました\n" ] } ] }, { "cell_type": "code", "source": [], "metadata": { "id": "oVGIvNWSpzlZ" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import os\n", "import sys\n", "import gc\n", "import h5py\n", "import numpy as np\n", "import torch\n", "import torch.nn.functional as F\n", "from tqdm import tqdm\n", "from pathlib import Path\n", "import subprocess\n", "\n", "\n", "# LightGlue\n", "from lightglue import ALIKED, LightGlue\n", "from lightglue.utils import load_image\n", "\n" ], "metadata": { "id": "GvtwW6aXpzeG" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "\"\"\"\n", "Gaussian Splatting Pipeline\n", "Simple and robust pipeline: LightGlue → COLMAP → Gaussian Splatting\n", "\"\"\"\n", "\"\"\"\n", "Gaussian Splatting Pipeline\n", "Simple and robust pipeline: LightGlue → MASt3R → Gaussian Splatting\n", "\"\"\"\n", "\n", "# ============================================================================\n", "# Configuration\n", "# ============================================================================\n", "class Config:\n", " # Feature extraction\n", " N_KEYPOINTS = 8192\n", " IMAGE_SIZE = 1024\n", "\n", " # Pair selection\n", " GLOBAL_TOPK = 200\n", " MIN_MATCHES = 10\n", " RATIO_THR = 1.2\n", "\n", " # Paths\n", " DINO_MODEL = \"facebook/dinov2-base\"\n", "\n", " # MASt3R settings (重要: これらが欠けていました!)\n", " MAST3R_MODEL = \"/content/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\"\n", " MAST3R_IMAGE_SIZE = 224 # メモリを節約するため小さめ(224 or 512)\n", "\n", " # Device\n", " DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')" ], "metadata": { "id": "7NfrJdMvrPZn" }, "outputs": [], "execution_count": null }, { "cell_type": "code", "source": [], "metadata": { "id": "eFExgZs-k0l9" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 0: images_square\n", "# ============================================================================\n", "\n", "def preprocess_images_square(input_dir, output_dir, size=1024, background='black'):\n", " \"\"\"\n", " Standardize all images to a square format (maintaining aspect ratio with padding).\n", "\n", " Args:\n", " input_dir (str): Directory containing input images.\n", " output_dir (str): Directory to save processed images.\n", " size (int): Target square dimension (default: 1024).\n", " background (str): Background style: 'black', 'white', or 'blur'.\n", " \"\"\"\n", " from PIL import Image, ImageFilter\n", " import os\n", " from tqdm import tqdm\n", "\n", " print(f\"\\n=== Preprocessing to {size}x{size} Square Images ===\")\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " image_files = sorted([\n", " f for f in os.listdir(input_dir)\n", " if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n", " ])\n", "\n", " stats = {\n", " 'total': len(image_files),\n", " 'landscape': 0,\n", " 'portrait': 0,\n", " 'square': 0,\n", " 'resized': 0,\n", " }\n", "\n", " for img_file in tqdm(image_files, desc=\"Converting to square\"):\n", " img_path = os.path.join(input_dir, img_file)\n", " img = Image.open(img_path).convert('RGB')\n", "\n", " width, height = img.size\n", "\n", " # Statistics\n", " if width > height:\n", " stats['landscape'] += 1\n", " elif width < height:\n", " stats['portrait'] += 1\n", " else:\n", " stats['square'] += 1\n", "\n", " # Resize based on the longest side\n", " max_dim = max(width, height)\n", " if max_dim != size:\n", " scale = size / max_dim\n", " new_width = int(width * scale)\n", " new_height = int(height * scale)\n", " img = img.resize((new_width, new_height), Image.LANCZOS)\n", " stats['resized'] += 1\n", " else:\n", " new_width, new_height = width, height\n", "\n", " # Create background\n", " if background == 'black':\n", " canvas = Image.new('RGB', (size, size), (0, 0, 0))\n", " elif background == 'white':\n", " canvas = Image.new('RGB', (size, size), (255, 255, 255))\n", " elif background == 'blur':\n", " # Use a blurred version of the image as background for a professional look\n", " canvas = img.resize((size, size), Image.LANCZOS)\n", " canvas = canvas.filter(ImageFilter.GaussianBlur(radius=20))\n", " else:\n", " canvas = Image.new('RGB', (size, size), (0, 0, 0))\n", "\n", " # Center the image\n", " offset_x = (size - new_width) // 2\n", " offset_y = (size - new_height) // 2\n", " canvas.paste(img, (offset_x, offset_y))\n", "\n", " # Save output\n", " output_path = os.path.join(output_dir, img_file)\n", " canvas.save(output_path, quality=95, optimize=True)\n", "\n", " print(f\"\\n✓ Preprocessing complete:\")\n", " print(f\" Total images: {stats['total']}\")\n", " print(f\" Landscape: {stats['landscape']} / Portrait: {stats['portrait']} / Square: {stats['square']}\")\n", " print(f\" Resized: {stats['resized']}\")\n", " print(f\" Output size: {size}x{size}\")\n", "\n", " return output_dir" ], "metadata": { "id": "TkVzKRqsvxFZ" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "def normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n", " \"\"\"\n", " Generates two square crops (Left & Right or Top & Bottom)\n", " from each image in a directory.\n", " \"\"\"\n", " if output_dir is None:\n", " output_dir = input_dir\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n", " print()\n", "\n", " converted_count = 0\n", " size_stats = {}\n", "\n", " for img_file in sorted(os.listdir(input_dir)):\n", " if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n", " continue\n", "\n", " input_path = os.path.join(input_dir, img_file)\n", "\n", " try:\n", " img = Image.open(input_path)\n", " original_size = img.size\n", "\n", " size_key = f\"{original_size[0]}x{original_size[1]}\"\n", " size_stats[size_key] = size_stats.get(size_key, 0) + 1\n", "\n", " # Generate 2 crops\n", " crops = generate_two_crops(img, size)\n", "\n", " base_name, ext = os.path.splitext(img_file)\n", " for mode, cropped_img in crops.items():\n", " output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n", " cropped_img.save(output_path, quality=95)\n", "\n", " converted_count += 1\n", " print(f\" ✓ {img_file}: {original_size} → 2 square images generated\")\n", "\n", " except Exception as e:\n", " print(f\" ✗ Error processing {img_file}: {e}\")\n", "\n", " print(f\"\\nProcessing complete: {converted_count} source images processed\")\n", " print(f\"Original size distribution: {size_stats}\")\n", " return converted_count\n", "\n", "\n", "def generate_two_crops(img, size):\n", " \"\"\"\n", " Crops the image into a square and returns 2 variations\n", " (Left/Right for landscape, Top/Bottom for portrait).\n", " \"\"\"\n", " width, height = img.size\n", " crop_size = min(width, height)\n", " crops = {}\n", "\n", " if width > height:\n", " # Landscape → Left & Right\n", " positions = {\n", " 'left': 0,\n", " 'right': width - crop_size\n", " }\n", " for mode, x_offset in positions.items():\n", " box = (x_offset, 0, x_offset + crop_size, crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", "\n", " else:\n", " # Portrait or Square → Top & Bottom\n", " positions = {\n", " 'top': 0,\n", " 'bottom': height - crop_size\n", " }\n", " for mode, y_offset in positions.items():\n", " box = (0, y_offset, crop_size, y_offset + crop_size)\n", " crops[mode] = img.crop(box).resize(\n", " (size, size),\n", " Image.Resampling.LANCZOS\n", " )\n", "\n", " return crops" ], "metadata": { "id": "A6smO9X0el3d" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 1: Image Pair Selection (DINO + ALIKED local verify)\n", "# ============================================================================\n", "\n", "def load_torch_image(fname, device):\n", " \"\"\"Load image as torch tensor\"\"\"\n", " from PIL import Image\n", " import torchvision.transforms as T\n", "\n", " img = Image.open(fname).convert('RGB')\n", " transform = T.Compose([\n", " T.ToTensor(),\n", " ])\n", " return transform(img).unsqueeze(0).to(device)\n", "\n", "def extract_dino_global(image_paths, model_path, device):\n", " \"\"\"Extract DINO global descriptors\"\"\"\n", " print(\"\\n=== Extracting DINO Global Features ===\")\n", "\n", " processor = AutoImageProcessor.from_pretrained(model_path)\n", " model = AutoModel.from_pretrained(model_path).eval().to(device)\n", "\n", " global_descs = []\n", " for img_path in tqdm(image_paths):\n", " img = load_torch_image(img_path, device)\n", " with torch.no_grad():\n", " inputs = processor(images=img, return_tensors=\"pt\", do_rescale=False).to(device)\n", " outputs = model(**inputs)\n", " desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n", " global_descs.append(desc.cpu())\n", "\n", " global_descs = torch.cat(global_descs, dim=0)\n", "\n", " del model\n", " torch.cuda.empty_cache()\n", " gc.collect()\n", "\n", " return global_descs\n", "\n", "def build_topk_pairs(global_feats, k, device):\n", " \"\"\"Build top-k similar pairs from global features\"\"\"\n", " g = global_feats.to(device)\n", " sim = g @ g.T\n", " sim.fill_diagonal_(-1)\n", "\n", " N = sim.size(0)\n", " k = min(k, N - 1)\n", "\n", " topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n", "\n", " pairs = []\n", " for i in range(N):\n", " for j in topk_indices[i]:\n", " j = j.item()\n", " if i < j:\n", " pairs.append((i, j))\n", "\n", " return list(set(pairs))\n", "\n", "def extract_aliked_features(image_paths, device):\n", " \"\"\"Extract ALIKED local features\"\"\"\n", " print(\"\\n=== Extracting ALIKED Local Features ===\")\n", "\n", " extractor = ALIKED(\n", " model_name=\"aliked-n16\",\n", " max_num_keypoints=Config.N_KEYPOINTS,\n", " detection_threshold=0.01,\n", " resize=Config.IMAGE_SIZE\n", " ).eval().to(device)\n", "\n", " features = []\n", " for img_path in tqdm(image_paths):\n", " img = load_torch_image(img_path, device)\n", " with torch.no_grad():\n", " feats = extractor.extract(img)\n", " kpts = feats['keypoints'].reshape(-1, 2).cpu()\n", " descs = feats['descriptors'].reshape(len(kpts), -1).cpu()\n", " features.append({'keypoints': kpts, 'descriptors': descs})\n", "\n", " del extractor\n", " torch.cuda.empty_cache()\n", " gc.collect()\n", "\n", " return features\n", "\n", "def verify_pairs_locally(pairs, features, device, threshold=Config.MIN_MATCHES):\n", " \"\"\"Verify pairs using local descriptor matching\"\"\"\n", " print(\"\\n=== Verifying Pairs with Local Features ===\")\n", "\n", " verified = []\n", " for i, j in tqdm(pairs):\n", " desc1 = features[i]['descriptors'].to(device)\n", " desc2 = features[j]['descriptors'].to(device)\n", "\n", " if len(desc1) == 0 or len(desc2) == 0:\n", " continue\n", "\n", " # Simple mutual nearest neighbor\n", " dist = torch.cdist(desc1, desc2, p=2)\n", " min_dist = dist.min(dim=1)[0]\n", " n_matches = (min_dist < Config.RATIO_THR).sum().item()\n", "\n", " if n_matches >= threshold:\n", " verified.append((i, j))\n", "\n", " return verified\n", "\n", "def get_image_pairs(image_paths):\n", " \"\"\"Main pair selection pipeline\"\"\"\n", " device = Config.DEVICE\n", "\n", " # 1. DINO global\n", " global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n", " pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n", "\n", " print(f\"Initial pairs from global features: {len(pairs)}\")\n", "\n", " # 2. ALIKED local\n", " features = extract_aliked_features(image_paths, device)\n", "\n", " # 3. Local verification\n", " verified_pairs = verify_pairs_locally(pairs, features, device)\n", "\n", " print(f\"Verified pairs: {len(verified_pairs)}\")\n", "\n", " return verified_pairs, features" ], "metadata": { "id": "FNjFURfYmVcL" }, "outputs": [], "execution_count": null }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 2: Feature Matching (ALIKED + LightGlue)\n", "# ============================================================================\n", "\n", "def match_pairs_lightglue(image_paths, pairs, features, output_dir):\n", " \"\"\"\n", " Match image pairs using LightGlue\n", " \"\"\"\n", " print(\"\\n=== Matching with LightGlue ===\")\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", " keypoints_path = os.path.join(output_dir, 'keypoints.h5')\n", " matches_path = os.path.join(output_dir, 'matches.h5')\n", "\n", " if os.path.exists(keypoints_path):\n", " os.remove(keypoints_path)\n", " if os.path.exists(matches_path):\n", " os.remove(matches_path)\n", "\n", " device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n", " extractor = ALIKED(max_num_keypoints=4096, detection_threshold=0.2, nms_radius=2).eval().to(device)\n", " matcher = LightGlue(features='aliked').eval().to(device)\n", "\n", " if isinstance(features, dict):\n", " all_keypoints = features['keypoints']\n", " all_descriptors = features['descriptors']\n", " elif isinstance(features, list):\n", " all_keypoints = [f['keypoints'] for f in features]\n", " all_descriptors = [f['descriptors'] for f in features]\n", " else:\n", " raise ValueError(f\"Unsupported features type: {type(features)}\")\n", "\n", " with h5py.File(keypoints_path, 'w') as f_kp:\n", " for idx, img_path in enumerate(tqdm(image_paths, desc=\"Saving keypoints\")):\n", " img_name = os.path.splitext(os.path.basename(img_path))[0]\n", "\n", " kp = all_keypoints[idx]\n", " if torch.is_tensor(kp):\n", " kp = kp.cpu().numpy()\n", " f_kp.create_dataset(img_name, data=kp)\n", "\n", " # Match pairs\n", " with h5py.File(matches_path, 'w') as f_match:\n", " for idx1, idx2 in tqdm(pairs, desc=\"Matching\"):\n", " with torch.no_grad():\n", " kp0 = all_keypoints[idx1]\n", " kp1 = all_keypoints[idx2]\n", " desc0 = all_descriptors[idx1]\n", " desc1 = all_descriptors[idx2]\n", "\n", " if isinstance(kp0, np.ndarray):\n", " kp0 = torch.from_numpy(kp0).float().to(device)\n", " kp1 = torch.from_numpy(kp1).float().to(device)\n", " desc0 = torch.from_numpy(desc0).float().to(device)\n", " desc1 = torch.from_numpy(desc1).float().to(device)\n", " else:\n", " kp0 = kp0.float().to(device)\n", " kp1 = kp1.float().to(device)\n", " desc0 = desc0.float().to(device)\n", " desc1 = desc1.float().to(device)\n", "\n", " feats0 = {\n", " 'keypoints': kp0.unsqueeze(0) if kp0.dim() == 2 else kp0,\n", " 'descriptors': desc0.unsqueeze(0) if desc0.dim() == 2 else desc0,\n", " }\n", " feats1 = {\n", " 'keypoints': kp1.unsqueeze(0) if kp1.dim() == 2 else kp1,\n", " 'descriptors': desc1.unsqueeze(0) if desc1.dim() == 2 else desc1,\n", " }\n", "\n", " matches01 = matcher({'image0': feats0, 'image1': feats1})\n", "\n", " if 'matches0' in matches01:\n", " matches0 = matches01['matches0']\n", " if isinstance(matches0, list):\n", " matches0 = matches0[0]\n", "\n", " # CUDAテンソルをCPUに移動\n", " if torch.is_tensor(matches0):\n", " matches0 = matches0.detach().cpu().numpy()\n", "\n", " valid = matches0 > -1\n", " if torch.is_tensor(valid):\n", " valid = valid.cpu().numpy()\n", "\n", " # 標準的なnumpy配列として取得\n", " valid_indices = np.where(valid)[0]\n", " valid_matches = matches0[valid]\n", "\n", " # 手動で2列の配列を構築\n", " n = len(valid_indices)\n", " matches = np.empty((n, 2), dtype=np.int64)\n", " matches[:, 0] = valid_indices\n", " matches[:, 1] = valid_matches\n", "\n", " elif 'matches' in matches01:\n", " m = matches01['matches']\n", " if torch.is_tensor(m):\n", " m = m.detach().cpu().numpy()\n", " matches = m\n", "\n", " else:\n", " continue\n", "\n", " if len(matches) > 0:\n", " img_name1 = os.path.splitext(os.path.basename(image_paths[idx1]))[0]\n", " img_name2 = os.path.splitext(os.path.basename(image_paths[idx2]))[0]\n", " pair_key = f\"{img_name1}_{img_name2}\"\n", " f_match.create_dataset(pair_key, data=matches)\n", "\n", " print(f\"✓ Matches saved to {matches_path}\")\n", "\n" ], "metadata": { "id": "X-PKgmdwmVcL" }, "outputs": [], "execution_count": null }, { "cell_type": "code", "source": [ "import numpy as np\n", "print(f\"✓ NumPy: {np.__version__}\")" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "Tg_SJYlwkeiD", "outputId": "9cc8b0cd-a2bf-4020-94e0-9739d4567770" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "✓ NumPy: 1.26.4\n" ] } ] }, { "cell_type": "code", "source": [ "import torch\n", "from pathlib import Path\n", "from tqdm import tqdm" ], "metadata": { "id": "7D86wFMan2X8" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# 1. まず、すべての通常のインポート\n", "# ============================================================================\n", "from dust3r.image_pairs import make_pairs\n", "from dust3r.inference import inference\n", "from dust3r.utils.image import load_images\n", "from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n", "\n", "# ============================================================================\n", "# 2. 次に、修正関数を定義してモンキーパッチ\n", "# ============================================================================\n", "import torch\n", "import numpy as np\n", "from dust3r.utils.device import to_cpu\n", "from dust3r.inference import check_if_same_size\n", "import dust3r.inference\n", "import dust3r.utils.misc\n", "\n", "# is_symmetrized関数を修正版に置き換え\n", "def is_symmetrized_fixed(gt1, gt2):\n", " \"\"\"\n", " is_symmetrizedの修正版 - IndexErrorを回避\n", " \"\"\"\n", " # instanceフィールドをチェック\n", " if 'instance' in gt1 and 'instance' in gt2:\n", " x = gt1['instance']\n", " y = gt2['instance']\n", "\n", " # リストの場合\n", " if isinstance(x, list) and isinstance(y, list):\n", " if len(x) != len(y):\n", " return False\n", " if len(x) < 2 or len(y) < 2:\n", " return False\n", " ok = True\n", " for i in range(0, len(x), 2):\n", " if i + 1 >= len(x) or i + 1 >= len(y):\n", " return False\n", " ok = ok and (x[i] == y[i + 1]) and (x[i + 1] == y[i])\n", " return ok\n", "\n", " # 文字列の場合\n", " elif isinstance(x, str) and isinstance(y, str):\n", " if len(x) != len(y):\n", " return False\n", " if len(x) < 2 or len(y) < 2:\n", " return False\n", " ok = True\n", " for i in range(0, len(x), 2):\n", " if i + 1 >= len(x) or i + 1 >= len(y):\n", " return False\n", " ok = ok and (x[i] == y[i + 1]) and (x[i + 1] == y[i])\n", " return ok\n", "\n", " return False\n", "\n", "\n", "def collate_with_cat_fixed(batch, lists=False):\n", " \"\"\"collate_with_catの修正版\"\"\"\n", " if not batch:\n", " return None\n", "\n", " if len(batch) == 1:\n", " elem = batch[0]\n", " if isinstance(elem, (list, tuple)) and len(elem) == 2:\n", " view1, view2 = elem\n", " if isinstance(view1, dict) and isinstance(view2, dict):\n", " view1 = convert_numpy_to_tensor(view1)\n", " view2 = convert_numpy_to_tensor(view2)\n", " return (view1, view2)\n", "\n", " if isinstance(batch[0], (list, tuple)):\n", " view1_list = []\n", " view2_list = []\n", "\n", " for pair in batch:\n", " if len(pair) == 2:\n", " v1 = convert_numpy_to_tensor(pair[0])\n", " v2 = convert_numpy_to_tensor(pair[1])\n", " view1_list.append(v1)\n", " view2_list.append(v2)\n", "\n", " def stack_dicts(dict_list):\n", " if not dict_list:\n", " return {}\n", "\n", " result = {}\n", " for key in dict_list[0].keys():\n", " values = [d[key] for d in dict_list]\n", "\n", " if isinstance(values[0], torch.Tensor):\n", " result[key] = torch.cat(values, dim=0)\n", " elif isinstance(values[0], np.ndarray):\n", " tensors = [torch.from_numpy(v) if isinstance(v, np.ndarray) else v for v in values]\n", " result[key] = torch.cat(tensors, dim=0)\n", " elif isinstance(values[0], (list, tuple)):\n", " result[key] = []\n", " for v in values:\n", " result[key].extend(v if isinstance(v, list) else [v])\n", " else:\n", " result[key] = values\n", "\n", " return result\n", "\n", " view1_batched = stack_dicts(view1_list)\n", " view2_batched = stack_dicts(view2_list)\n", "\n", " return (view1_batched, view2_batched)\n", "\n", " return None\n", "\n", "\n", "def convert_numpy_to_tensor(view_dict):\n", " \"\"\"辞書内のnumpy配列をTensorに変換\"\"\"\n", " result = {}\n", " for key, value in view_dict.items():\n", " if isinstance(value, np.ndarray):\n", " result[key] = torch.from_numpy(value)\n", " else:\n", " result[key] = value\n", " return result\n", "\n", "\n", "def loss_of_one_batch_fixed(batch, model, criterion, device, symmetrize_batch=False, use_amp=False, ret=None):\n", " view1, view2 = batch\n", " ignore_keys = set(['depthmap', 'dataset', 'label', 'instance', 'idx', 'true_shape', 'rng'])\n", " for view in batch:\n", " for name in view.keys():\n", " if name in ignore_keys:\n", " continue\n", " view[name] = view[name].to(device, non_blocking=True)\n", "\n", " with torch.cuda.amp.autocast(enabled=bool(use_amp)):\n", " pred1, pred2 = model(view1, view2)\n", "\n", " with torch.cuda.amp.autocast(enabled=False):\n", " loss = criterion(view1, view2, pred1, pred2) if criterion is not None else None\n", "\n", " result = dict(view1=view1, view2=view2, pred1=pred1, pred2=pred2, loss=loss)\n", " return result[ret] if ret else result\n", "\n", "\n", "@torch.no_grad()\n", "def inference_debug(pairs, model, device, batch_size=8, verbose=True):\n", " \"\"\"\n", " デバッグ機能を追加したinference関数\n", " \"\"\"\n", " if verbose:\n", " print(f'>> Inference with model on {len(pairs)} image pairs')\n", "\n", " result = []\n", "\n", " # Check if all images have the same size\n", " multiple_shapes = not (check_if_same_size(pairs))\n", " if multiple_shapes:\n", " batch_size = 1\n", "\n", " for i in tqdm(range(0, len(pairs), batch_size), disable=not verbose, desc=\"MASt3R inference\"):\n", " batch_pairs = pairs[i:i + batch_size]\n", "\n", " # 修正版のcollate関数を使用\n", " collated = collate_with_cat_fixed(batch_pairs)\n", "\n", " if collated is None:\n", " raise ValueError(f\"collate_with_cat_fixed returned None at batch {i}\")\n", "\n", " # 修正版のloss_of_one_batchを使用\n", " res = loss_of_one_batch_fixed(collated, model, None, device)\n", " result.append(to_cpu(res))\n", "\n", " # ===== ここを修正 =====\n", " # 結果の集約 - multiple_shapesに関わらず辞書形式で結合\n", " if len(result) == 0:\n", " return None\n", "\n", " # 各バッチの結果を結合\n", " combined = {}\n", "\n", " for key in result[0].keys():\n", " if isinstance(result[0][key], dict):\n", " # 辞書の場合:各フィールドを結合\n", " combined[key] = {}\n", " for field in result[0][key].keys():\n", " values = [r[key][field] for r in result]\n", "\n", " if isinstance(values[0], torch.Tensor):\n", " combined[key][field] = torch.cat(values, dim=0)\n", " elif isinstance(values[0], list):\n", " combined[key][field] = []\n", " for v in values:\n", " combined[key][field].extend(v if isinstance(v, list) else [v])\n", " else:\n", " combined[key][field] = values\n", "\n", " elif isinstance(result[0][key], torch.Tensor):\n", " values = [r[key] for r in result]\n", " combined[key] = torch.cat(values, dim=0)\n", "\n", " elif isinstance(result[0][key], list):\n", " combined[key] = []\n", " for r in result:\n", " combined[key].extend(r[key] if isinstance(r[key], list) else [r[key]])\n", "\n", " else:\n", " combined[key] = result[0][key]\n", "\n", " return combined\n", "\n", "\n", "# ============================================================================\n", "# 3. モンキーパッチを適用(これが最も重要!)\n", "# ============================================================================\n", "print(\"Applying monkey patches...\")\n", "dust3r.utils.misc.is_symmetrized = is_symmetrized_fixed\n", "dust3r.inference.inference = inference_debug\n", "inference = dust3r.inference.inference\n", "\n", "print(\"✓ Monkey-patched dust3r.utils.misc.is_symmetrized\")\n", "print(\"✓ Monkey-patched dust3r.inference.inference\")\n", "\n", "\n", "# 確認テスト\n", "print(\"\\n=== Verification ===\")\n", "test_gt1 = {'instance': '12'}\n", "test_gt2 = {'instance': '21'}\n", "try:\n", " result = dust3r.utils.misc.is_symmetrized(test_gt1, test_gt2)\n", " print(f\"✅ Monkey patch working! is_symmetrized test passed\")\n", "except IndexError as e:\n", " print(f\"❌ ERROR: Monkey patch failed! {e}\")\n", " raise\n", "\n", "# ===== すべてのdust3rモジュールでis_symmetrizedを置き換え =====\n", "import sys\n", "\n", "print(\"\\n=== Patching all loaded dust3r modules ===\")\n", "patched_count = 0\n", "\n", "for module_name, module in list(sys.modules.items()):\n", " if module is None:\n", " continue\n", "\n", " # dust3rまたはmast3r関連のモジュール\n", " if 'dust3r' in module_name or 'mast3r' in module_name:\n", " # is_symmetrized属性を持っている場合\n", " if hasattr(module, 'is_symmetrized'):\n", " old_func = module.is_symmetrized\n", " module.is_symmetrized = is_symmetrized_fixed\n", " patched_count += 1\n", " print(f\" ✓ Patched: {module_name}.is_symmetrized\")\n", "\n", " # モジュールの__dict__を直接チェック\n", " if hasattr(module, '__dict__'):\n", " for attr_name in list(module.__dict__.keys()):\n", " attr = getattr(module, attr_name, None)\n", " if callable(attr) and attr_name == 'is_symmetrized':\n", " setattr(module, attr_name, is_symmetrized_fixed)\n", "\n", "print(f\"\\n✓ Patched {patched_count} modules\")\n", "print(\"=\"*70)\n", "\n", "\n" ], "metadata": { "colab": { "base_uri": "https://localhost:8080/" }, "id": "gVeANB1q0W0p", "outputId": "c60c2b08-ee64-4d5a-ae65-06ff02ef812b" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "Applying monkey patches...\n", "✓ Monkey-patched dust3r.utils.misc.is_symmetrized\n", "✓ Monkey-patched dust3r.inference.inference\n", "\n", "=== Verification ===\n", "✅ Monkey patch working! is_symmetrized test passed\n", "\n", "=== Patching all loaded dust3r modules ===\n", " ✓ Patched: dust3r.utils.misc.is_symmetrized\n", " ✓ Patched: dust3r.model.is_symmetrized\n", " ✓ Patched: mast3r.model.is_symmetrized\n", "\n", "✓ Patched 3 modules\n", "======================================================================\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "/content/mast3r/dust3r/dust3r/cloud_opt/base_opt.py:275: FutureWarning: `torch.cuda.amp.autocast(args...)` is deprecated. Please use `torch.amp.autocast('cuda', args...)` instead.\n", " @torch.cuda.amp.autocast(enabled=False)\n" ] } ] }, { "cell_type": "code", "source": [ "def run_mast3r_reconstruction(image_paths, pairs, output_dir, model, device):\n", " \"\"\"MASt3Rで3D再構成を実行\"\"\"\n", " print(\"\\n=== Running MASt3R Reconstruction ===\")\n", "\n", " # メモリ状態を表示\n", " print_memory_status(\"Initial memory state\")\n", "\n", " print(f\"Processing {len(pairs)} pairs...\")\n", "\n", " # 画像サイズを決定(224または512)\n", " img_size = 224 # MASt3Rのデフォルト推論サイズ\n", "\n", " print(f\"Loading {len(image_paths)} images at {img_size}x{img_size}...\")\n", "\n", " # 画像を読み込み\n", " imgs = load_images(image_paths, size=img_size, verbose=True)\n", " print(f\"Loaded {len(imgs)} images\")\n", " print_memory_status(\"After loading images\")\n", "\n", " # ペアを作成\n", " print(f\"Creating {len(pairs)} image pairs...\")\n", " scene_graph = []\n", "\n", " for idx1, idx2 in tqdm(pairs, desc=\"Preparing pairs\"):\n", " # 画像インデックスが有効か確認\n", " if idx1 >= len(imgs) or idx2 >= len(imgs):\n", " print(f\"Warning: Invalid pair ({idx1}, {idx2}), skipping...\")\n", " continue\n", "\n", " # ペアを作成\n", " view1 = imgs[idx1]\n", " view2 = imgs[idx2]\n", "\n", " # viewがNoneでないか確認\n", " if view1 is None or view2 is None:\n", " print(f\"Warning: None view in pair ({idx1}, {idx2}), skipping...\")\n", " continue\n", "\n", " # viewが辞書形式か確認\n", " if not isinstance(view1, dict) or not isinstance(view2, dict):\n", " print(f\"Warning: Invalid view type in pair ({idx1}, {idx2})\")\n", " print(f\" view1 type: {type(view1)}, view2 type: {type(view2)}\")\n", " continue\n", "\n", " scene_graph.append((view1, view2))\n", "\n", " print(f\"Valid pairs: {len(scene_graph)}\")\n", "\n", " if len(scene_graph) == 0:\n", " raise ValueError(\"No valid pairs to process!\")\n", "\n", " # 最初のペアをデバッグ\n", " print(\"\\n=== Debugging first pair ===\")\n", " first_pair = scene_graph[0]\n", " print(f\"Pair type: {type(first_pair)}\")\n", " print(f\"View1 type: {type(first_pair[0])}\")\n", " print(f\"View2 type: {type(first_pair[1])}\")\n", " if isinstance(first_pair[0], dict):\n", " print(f\"View1 keys: {list(first_pair[0].keys())}\")\n", " if isinstance(first_pair[1], dict):\n", " print(f\"View2 keys: {list(first_pair[1].keys())}\")\n", "\n", " # MASt3Rで推論\n", " print(f\"\\nRunning MASt3R inference on {len(scene_graph)} pairs...\")\n", " try:\n", " pairs_output = inference(\n", " scene_graph,\n", " model,\n", " device,\n", " batch_size=1,\n", " verbose=True\n", " )\n", " except Exception as e:\n", " print(f\"Error during inference: {e}\")\n", " print(f\"Error type: {type(e)}\")\n", " import traceback\n", " traceback.print_exc()\n", " raise\n", "\n", " print(f\"Inference complete, got {len(pairs_output)} results\")\n", " print_memory_status(\"After inference\")\n", "\n", " # Global alignmentを実行\n", " print(\"\\n=== Running Global Alignment ===\")\n", " scene = global_aligner(\n", " pairs_output,\n", " device=device,\n", " mode=GlobalAlignerMode.PointCloudOptimizer,\n", " verbose=True\n", " )\n", "\n", " # 最適化\n", " print(\"Optimizing scene...\")\n", " loss = scene.compute_global_alignment(\n", " init='mst',\n", " niter=300,\n", " schedule='cosine',\n", " lr=0.01\n", " )\n", "\n", " print(f\"Optimization complete, final loss: {loss:.4f}\")\n", " print_memory_status(\"After optimization\")\n", "\n", " # COLMAP形式で保存\n", " colmap_dir = Path(output_dir) / \"colmap\"\n", " colmap_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " print(f\"\\n=== Saving to COLMAP format ===\")\n", " save_colmap_format(scene, imgs, colmap_dir)\n", "\n", " print(f\"✓ COLMAP data saved to {colmap_dir}\")\n", "\n", " return scene, colmap_dir\n", "\n", "\n" ], "metadata": { "id": "jC7gd4-ktXiz" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "def save_colmap_format(scene, imgs, output_dir):\n", " \"\"\"シーンをCOLMAP形式で保存\"\"\"\n", " from dust3r.cloud_opt.base_opt import BasePCOptimizer\n", "\n", " output_dir = Path(output_dir)\n", " output_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " # カメラパラメータを取得\n", " focals = scene.get_focals().cpu().numpy()\n", " principal_points = scene.get_principal_points().cpu().numpy()\n", " poses = scene.get_im_poses().cpu().numpy()\n", " pts3d = scene.get_pts3d().cpu().numpy()\n", "\n", " n_images = len(imgs)\n", "\n", " # cameras.txt\n", " cameras_file = output_dir / \"cameras.txt\"\n", " with open(cameras_file, 'w') as f:\n", " f.write(\"# Camera list with one line of data per camera:\\n\")\n", " f.write(\"# CAMERA_ID, MODEL, WIDTH, HEIGHT, PARAMS[]\\n\")\n", "\n", " for i in range(n_images):\n", " # SIMPLE_PINHOLE モデル (f, cx, cy)\n", " img_shape = imgs[i]['true_shape']\n", " width, height = img_shape[1], img_shape[0]\n", " fx = focals[i, 0]\n", " cx, cy = principal_points[i]\n", "\n", " f.write(f\"{i+1} SIMPLE_PINHOLE {width} {height} {fx} {cx} {cy}\\n\")\n", "\n", " print(f\"✓ Saved cameras.txt with {n_images} cameras\")\n", "\n", " # images.txt\n", " images_file = output_dir / \"images.txt\"\n", " with open(images_file, 'w') as f:\n", " f.write(\"# Image list with two lines of data per image:\\n\")\n", " f.write(\"# IMAGE_ID, QW, QX, QY, QZ, TX, TY, TZ, CAMERA_ID, NAME\\n\")\n", " f.write(\"# POINTS2D[] as (X, Y, POINT3D_ID)\\n\")\n", "\n", " for i in range(n_images):\n", " # ポーズを回転とtranslationに分解\n", " pose = poses[i]\n", " R = pose[:3, :3]\n", " t = pose[:3, 3]\n", "\n", " # 回転行列をクォータニオンに変換\n", " from scipy.spatial.transform import Rotation\n", " quat = Rotation.from_matrix(R).as_quat() # [x, y, z, w]\n", " qw, qx, qy, qz = quat[3], quat[0], quat[1], quat[2]\n", "\n", " img_name = Path(imgs[i]['filepath']).name\n", "\n", " f.write(f\"{i+1} {qw} {qx} {qy} {qz} {t[0]} {t[1]} {t[2]} {i+1} {img_name}\\n\")\n", " f.write(\"\\n\") # 2D pointsの行(空)\n", "\n", " print(f\"✓ Saved images.txt with {n_images} images\")\n", "\n", " # points3D.txt\n", " points_file = output_dir / \"points3D.txt\"\n", " with open(points_file, 'w') as f:\n", " f.write(\"# 3D point list with one line of data per point:\\n\")\n", " f.write(\"# POINT3D_ID, X, Y, Z, R, G, B, ERROR, TRACK[] as (IMAGE_ID, POINT2D_IDX)\\n\")\n", "\n", " # 全画像の3Dポイントを集約\n", " point_id = 1\n", " for i in range(n_images):\n", " pts = pts3d[i] # (H, W, 3)\n", "\n", " # 有効なポイントのみ保存\n", " valid_mask = ~np.isnan(pts).any(axis=-1)\n", " valid_pts = pts[valid_mask]\n", "\n", " for pt in valid_pts[:1000]: # 各画像から最大1000点\n", " # デフォルトカラー(グレー)\n", " f.write(f\"{point_id} {pt[0]} {pt[1]} {pt[2]} 128 128 128 0.0\\n\")\n", " point_id += 1\n", "\n", " print(f\"✓ Saved points3D.txt with {point_id-1} points\")\n", "\n", "\n", "def print_memory_status(label=\"\"):\n", " \"\"\"メモリ使用状況を表示\"\"\"\n", " import psutil\n", "\n", " if torch.cuda.is_available():\n", " allocated = torch.cuda.memory_allocated() / 1024**3\n", " reserved = torch.cuda.memory_reserved() / 1024**3\n", " print(f\"{label}:\")\n", " print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n", "\n", " cpu_percent = psutil.virtual_memory().percent\n", " print(f\"CPU Memory Usage: {cpu_percent:.1f}%\")" ], "metadata": { "id": "BJDvEDwBnlJm" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "\n", "# ============================================================================\n", "# Step 3 & 4: MASt3R Reconstruction (COLMAPの代替)\n", "# ============================================================================\n", "\n", "import struct\n", "from pathlib import Path\n", "\n", "def import_to_mast3r_and_save_colmap(\n", " image_dir,\n", " processed_image_dir,\n", " feature_dir,\n", " database_path,\n", " output_dir,\n", " pairs,\n", " single_camera=True\n", "):\n", " \"\"\"\n", " MASt3Rを使用してカメラポーズを推定し、COLMAP形式で保存\n", " \"\"\"\n", " print(\"\\n=== Running MASt3R Reconstruction ===\")\n", " print(\"Initial memory state:\")\n", " get_memory_info()\n", "\n", " # MASt3Rモデルのロード\n", " from mast3r.model import AsymmetricMASt3R\n", " device = Config.DEVICE\n", "\n", " model = AsymmetricMASt3R.from_pretrained(Config.MAST3R_MODEL).to(device)\n", " model.eval()\n", " print(f\"✓ MASt3R model loaded on {device}\")\n", "\n", " # 画像パスの取得\n", " image_paths = sorted([\n", " os.path.join(processed_image_dir, f)\n", " for f in os.listdir(processed_image_dir)\n", " if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n", " ])\n", "\n", " # MASt3Rで再構成\n", " scene, mast3r_images = run_mast3r_pairs(\n", " model, image_paths, pairs, device,\n", " batch_size=1\n", " )\n", "\n", " # モデルをメモリから削除\n", " del model\n", " clear_memory()\n", "\n", " # COLMAP形式のデータを抽出\n", " pts3d, colors, cameras, poses = extract_colmap_data(\n", " scene, image_paths, max_points=1000000\n", " )\n", "\n", " # COLMAP形式で保存\n", " sparse_dir = save_colmap_reconstruction(\n", " pts3d, colors, cameras, poses, image_paths, output_dir\n", " )\n", "\n", " print(f\"\\n✓ MASt3R reconstruction saved in COLMAP format\")\n", " print(f\" Output: {sparse_dir}\")\n", "\n", " return sparse_dir\n", "\n", "\n", "def run_mast3r_mapper(database_path, image_dir, output_dir, pairs, processed_image_dir):\n", " \"\"\"\n", " MASt3Rを使用したマッピング(COLMAPの代替)\n", " \"\"\"\n", " print(\"\\n=== MASt3R Mapper (COLMAP Alternative) ===\")\n", "\n", " # MASt3Rで再構成してCOLMAP形式で保存\n", " sparse_dir = import_to_mast3r_and_save_colmap(\n", " image_dir=image_dir,\n", " processed_image_dir=processed_image_dir,\n", " feature_dir=None,\n", " database_path=database_path,\n", " output_dir=output_dir,\n", " pairs=pairs,\n", " single_camera=True\n", " )\n", "\n", " # sparse/0 ディレクトリが存在することを確認\n", " model_dir = sparse_dir\n", " if not os.path.exists(model_dir):\n", " raise RuntimeError(f\"MASt3R reconstruction failed - directory not found: {model_dir}\")\n", "\n", " # 必要なファイルが存在することを確認\n", " required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n", " for file in required_files:\n", " file_path = os.path.join(model_dir, file)\n", " if not os.path.exists(file_path):\n", " raise FileNotFoundError(f\"Required file not found: {file}\")\n", "\n", " print(f\"\\n✓ MASt3R reconstruction complete: {model_dir}\")\n", " return model_dir\n", "\n", "\n", "def load_images_for_mast3r(image_paths, size=224):\n", " \"\"\"MASt3R用に画像をロード\"\"\"\n", " print(f\"\\n=== Loading images for MASt3R (size={size}) ===\")\n", "\n", " from dust3r.utils.image import load_images\n", "\n", " images = load_images(image_paths, size=size, verbose=True)\n", "\n", " return images\n", "\n", "\n", "\n", "\n", "\n", "def run_mast3r_pairs(model, image_paths, pairs, device='cuda', batch_size=1, chunk_size=500):\n", " \"\"\"\n", " 選択されたペアでMASt3Rを実行(画像もチャンクごとにロード、結合も最適化)\n", " \"\"\"\n", " print(\"\\n=== Running MASt3R Reconstruction ===\")\n", " print(\"Initial memory state:\")\n", " get_memory_info()\n", "\n", " from dust3r.cloud_opt import global_aligner, GlobalAlignerMode\n", " import dust3r.inference\n", " import pickle\n", " import tempfile\n", "\n", " print(f\"Processing {len(pairs)} pairs in chunks of {chunk_size}...\")\n", " print(\"Note: Images will be loaded on-demand per chunk to save memory\")\n", "\n", " # 一時ディレクトリを作成\n", " temp_dir = tempfile.mkdtemp()\n", " print(f\"Temporary directory: {temp_dir}\")\n", "\n", " # チャンクごとに処理してディスクに保存\n", " chunk_files = []\n", " num_chunks = (len(pairs) + chunk_size - 1) // chunk_size\n", "\n", " for chunk_idx in range(num_chunks):\n", " start_idx = chunk_idx * chunk_size\n", " end_idx = min(start_idx + chunk_size, len(pairs))\n", " chunk_pairs_indices = pairs[start_idx:end_idx]\n", "\n", " print(f\"\\n--- Processing chunk {chunk_idx + 1}/{num_chunks} (pairs {start_idx}-{end_idx}) ---\")\n", "\n", " # このチャンクで必要な画像インデックスを収集\n", " needed_image_indices = set()\n", " for idx1, idx2 in chunk_pairs_indices:\n", " needed_image_indices.add(idx1)\n", " needed_image_indices.add(idx2)\n", "\n", " needed_image_indices = sorted(list(needed_image_indices))\n", " print(f\"Loading {len(needed_image_indices)} unique images for this chunk...\")\n", "\n", " # 必要な画像だけロード\n", " needed_image_paths = [image_paths[i] for i in needed_image_indices]\n", " chunk_images = load_images_for_mast3r(needed_image_paths, size=Config.MAST3R_IMAGE_SIZE)\n", "\n", " # インデックスマッピングを作成(元のインデックス → チャンク内インデックス)\n", " index_mapping = {orig_idx: new_idx for new_idx, orig_idx in enumerate(needed_image_indices)}\n", "\n", " print(f\"Memory after loading chunk images:\")\n", " get_memory_info()\n", "\n", " # 画像ペアを作成(インデックスを変換)\n", " mast3r_pairs = []\n", " for idx1, idx2 in tqdm(chunk_pairs_indices, desc=f\"Preparing chunk {chunk_idx + 1}\"):\n", " new_idx1 = index_mapping[idx1]\n", " new_idx2 = index_mapping[idx2]\n", " mast3r_pairs.append([chunk_images[new_idx1], chunk_images[new_idx2]])\n", "\n", " # 推論を実行\n", " print(f\"Running MASt3R inference on {len(mast3r_pairs)} pairs...\")\n", " output = dust3r.inference.inference(mast3r_pairs, model, device, batch_size=batch_size, verbose=True)\n", "\n", " # ディスクに保存\n", " chunk_file = os.path.join(temp_dir, f'chunk_{chunk_idx}.pkl')\n", " with open(chunk_file, 'wb') as f:\n", " pickle.dump(output, f)\n", " chunk_files.append(chunk_file)\n", "\n", " del mast3r_pairs\n", " del chunk_images # 画像も削除\n", " del output\n", " clear_memory()\n", "\n", " print(f\"Chunk {chunk_idx + 1} saved to disk. Memory state:\")\n", " get_memory_info()\n", "\n", " # ディスクから読み込んで結合(メモリ効率化版)\n", " print(\"\\n=== Combining all chunks from disk ===\")\n", "\n", " # まず最初の2チャンクを結合\n", " print(f\"Loading and combining chunks 1-2...\")\n", " with open(chunk_files[0], 'rb') as f:\n", " combined_output = pickle.load(f)\n", " os.remove(chunk_files[0])\n", "\n", " with open(chunk_files[1], 'rb') as f:\n", " chunk_output = pickle.load(f)\n", "\n", " for key in combined_output.keys():\n", " if isinstance(combined_output[key], dict):\n", " for field in combined_output[key].keys():\n", " if isinstance(combined_output[key][field], torch.Tensor):\n", " combined_output[key][field] = torch.cat([\n", " combined_output[key][field],\n", " chunk_output[key][field]\n", " ], dim=0)\n", " elif isinstance(combined_output[key][field], list):\n", " combined_output[key][field].extend(chunk_output[key][field])\n", " elif isinstance(combined_output[key], torch.Tensor):\n", " combined_output[key] = torch.cat([\n", " combined_output[key],\n", " chunk_output[key]\n", " ], dim=0)\n", " elif isinstance(combined_output[key], list):\n", " combined_output[key].extend(chunk_output[key])\n", "\n", " del chunk_output\n", " os.remove(chunk_files[1])\n", " clear_memory()\n", "\n", " # 残りのチャンクを1つずつ結合\n", " for idx in range(2, len(chunk_files)):\n", " print(f\"Loading and combining chunk {idx + 1}/{len(chunk_files)}...\")\n", "\n", " with open(chunk_files[idx], 'rb') as f:\n", " chunk_output = pickle.load(f)\n", "\n", " for key in combined_output.keys():\n", " if isinstance(combined_output[key], dict):\n", " for field in combined_output[key].keys():\n", " if isinstance(combined_output[key][field], torch.Tensor):\n", " # メモリ効率化: 結合後に元のTensorを削除\n", " old_tensor = combined_output[key][field]\n", " combined_output[key][field] = torch.cat([\n", " old_tensor,\n", " chunk_output[key][field]\n", " ], dim=0)\n", " del old_tensor\n", " elif isinstance(combined_output[key][field], list):\n", " combined_output[key][field].extend(chunk_output[key][field])\n", "\n", " elif isinstance(combined_output[key], torch.Tensor):\n", " old_tensor = combined_output[key]\n", " combined_output[key] = torch.cat([\n", " old_tensor,\n", " chunk_output[key]\n", " ], dim=0)\n", " del old_tensor\n", "\n", " elif isinstance(combined_output[key], list):\n", " combined_output[key].extend(chunk_output[key])\n", "\n", " del chunk_output\n", " os.remove(chunk_files[idx])\n", " clear_memory()\n", "\n", " # 進捗確認\n", " if (idx + 1) % 3 == 0:\n", " print(f\" Memory after combining {idx + 1} chunks:\")\n", " get_memory_info()\n", "\n", " os.rmdir(temp_dir)\n", "\n", " print(f\"✓ Combined output keys: {list(combined_output.keys())}\")\n", " print(\"After combining all chunks:\")\n", " get_memory_info()\n", "\n", " print(\"✓ MASt3R inference complete\")\n", "\n", " # 最後にグローバルアライメント用に全画像をロード\n", " print(\"\\nLoading all images for global alignment...\")\n", " images = load_images_for_mast3r(image_paths, size=Config.MAST3R_IMAGE_SIZE)\n", " print(\"Memory after loading all images:\")\n", " get_memory_info()\n", "\n", " # グローバルアライメント\n", " print(\"\\nRunning global alignment...\")\n", " scene = global_aligner(\n", " combined_output,\n", " device=device,\n", " mode=GlobalAlignerMode.PointCloudOptimizer\n", " )\n", "\n", " del combined_output\n", " clear_memory()\n", "\n", " print(\"Computing global alignment...\")\n", " loss = scene.compute_global_alignment(\n", " init=\"mst\",\n", " niter=150,\n", " schedule='cosine',\n", " lr=0.01\n", " )\n", "\n", " print(f\"✓ Global alignment complete (final loss: {loss:.6f})\")\n", " print(\"Final memory state:\")\n", " get_memory_info()\n", "\n", " return scene, images\n", "\n", "\n", "\n", "### revised for mast3r\n", "def extract_colmap_data(scene, image_paths, max_points=1000000):\n", " \"\"\"\n", " Extract COLMAP-compatible camera parameters and 3D points from MASt3R scene\n", "\n", " Args:\n", " scene: MASt3R scene object\n", " image_paths: List of image paths\n", " max_points: Maximum number of 3D points to extract (default: 1M)\n", " \"\"\"\n", " print(\"\\n=== Extracting COLMAP-compatible data ===\")\n", "\n", " # Extract point cloud\n", " pts_all = scene.get_pts3d()\n", " print(f\"pts_all type: {type(pts_all)}\")\n", "\n", " if isinstance(pts_all, list):\n", " print(f\"pts_all is a list with {len(pts_all)} elements\")\n", " if len(pts_all) > 0:\n", " print(f\"First element type: {type(pts_all[0])}\")\n", " if hasattr(pts_all[0], 'shape'):\n", " print(f\"First element shape: {pts_all[0].shape}\")\n", "\n", " pts_all = torch.stack([p if isinstance(p, torch.Tensor) else torch.tensor(p)\n", " for p in pts_all])\n", " print(f\"pts_all shape after conversion: {pts_all.shape}\")\n", "\n", " if len(pts_all.shape) == 4:\n", " print(f\"Found batched point cloud: {pts_all.shape}\")\n", " B, H, W, _ = pts_all.shape\n", " pts3d = pts_all.reshape(-1, 3).detach().cpu().numpy()\n", "\n", " # Extract colors\n", " colors = []\n", " for img_path in image_paths:\n", " img = Image.open(img_path).resize((W, H))\n", " colors.append(np.array(img))\n", " colors = np.stack(colors).reshape(-1, 3) / 255.0\n", " else:\n", " pts3d = pts_all.detach().cpu().numpy() if isinstance(pts_all, torch.Tensor) else pts_all\n", " colors = np.ones((len(pts3d), 3)) * 0.5\n", "\n", " print(f\"✓ Extracted {len(pts3d)} 3D points from {len(image_paths)} images\")\n", "\n", " # **DOWNSAMPLE POINTS TO REDUCE MEMORY USAGE**\n", " if len(pts3d) > max_points:\n", " print(f\"\\n⚠ Downsampling from {len(pts3d)} to {max_points} points to reduce memory usage...\")\n", "\n", " # Remove invalid points first\n", " valid_mask = ~(np.isnan(pts3d).any(axis=1) | np.isinf(pts3d).any(axis=1))\n", " pts3d_valid = pts3d[valid_mask]\n", " colors_valid = colors[valid_mask]\n", "\n", " # Random sampling\n", " indices = np.random.choice(len(pts3d_valid), size=max_points, replace=False)\n", " pts3d = pts3d_valid[indices]\n", " colors = colors_valid[indices]\n", "\n", " print(f\"✓ Downsampled to {len(pts3d)} points\")\n", "\n", " # Extract camera parameters\n", " print(\"Extracting camera parameters...\")\n", "\n", " # 【重要】MASt3Rのポーズはcamera-to-world形式\n", " # COLMAPはworld-to-camera形式を要求するので逆行列が必要\n", " poses_c2w = scene.get_im_poses().detach().cpu().numpy()\n", " print(f\"Retrieved camera-to-world poses: shape {poses_c2w.shape}\")\n", "\n", " # camera-to-world を world-to-camera に変換\n", " poses = []\n", " for i, pose_c2w in enumerate(poses_c2w):\n", " # 4x4行列の逆行列を計算\n", " pose_w2c = np.linalg.inv(pose_c2w)\n", " poses.append(pose_w2c)\n", "\n", " poses = np.array(poses)\n", " print(f\"Converted to world-to-camera poses for COLMAP\")\n", "\n", " # 焦点距離と主点を取得\n", " focals = scene.get_focals().detach().cpu().numpy()\n", " pp = scene.get_principal_points().detach().cpu().numpy()\n", " print(f\"Focals shape: {focals.shape}\")\n", " print(f\"Principal points shape: {pp.shape}\")\n", "\n", " # MASt3Rの処理サイズ(通常224x224)\n", " mast3r_size = 224.0\n", "\n", " cameras = []\n", " for i, img_path in enumerate(image_paths):\n", " img = Image.open(img_path)\n", " W, H = img.size\n", "\n", " # 元画像サイズとのスケール比\n", " scale = W / mast3r_size\n", "\n", " # focalsは[N,1]の形式(fx=fyの等方性カメラ)\n", " if focals.shape[1] == 1:\n", " focal_mast3r = float(focals[i, 0])\n", " fx = fy = focal_mast3r * scale\n", " else:\n", " fx = float(focals[i, 0]) * scale\n", " fy = float(focals[i, 1]) * scale\n", "\n", " # 主点もスケーリング\n", " cx = float(pp[i, 0]) * scale\n", " cy = float(pp[i, 1]) * scale\n", "\n", " camera = {\n", " 'camera_id': i + 1,\n", " 'model': 'PINHOLE',\n", " 'width': W,\n", " 'height': H,\n", " 'params': [fx, fy, cx, cy]\n", " }\n", " cameras.append(camera)\n", "\n", " if i == 0:\n", " print(f\"\\nExample camera 0:\")\n", " print(f\" Image size: {W}x{H}\")\n", " print(f\" MASt3R focal: {focal_mast3r:.2f}, pp: ({pp[i,0]:.2f}, {pp[i,1]:.2f})\")\n", " print(f\" Scaled fx={fx:.2f}, fy={fy:.2f}, cx={cx:.2f}, cy={cy:.2f}\")\n", " print(f\" Pose (first row): {poses[i][0]}\")\n", "\n", " print(f\"\\n✓ Extracted {len(cameras)} cameras and {len(poses)} poses\")\n", "\n", " return pts3d, colors, cameras, poses\n", "\n", "\n", "\n", "\n", "def save_colmap_reconstruction(pts3d, colors, cameras, poses, image_paths, output_dir):\n", " \"\"\"COLMAP形式で再構成を保存\"\"\"\n", " print(\"\\n=== Saving COLMAP reconstruction ===\")\n", "\n", " sparse_dir = Path(output_dir) / 'sparse' / '0'\n", " sparse_dir.mkdir(parents=True, exist_ok=True)\n", "\n", " print(f\" Writing COLMAP files to {sparse_dir}...\")\n", "\n", " write_cameras_binary(cameras, sparse_dir / 'cameras.bin')\n", " print(f\" ✓ Wrote {len(cameras)} cameras\")\n", "\n", " write_images_binary(image_paths, cameras, poses, sparse_dir / 'images.bin')\n", " print(f\" ✓ Wrote {len(image_paths)} images\")\n", "\n", " num_points = write_points3d_binary(pts3d, colors, sparse_dir / 'points3D.bin')\n", " print(f\" ✓ Wrote {num_points} 3D points\")\n", "\n", " print(f\"\\n✓ COLMAP reconstruction saved to {sparse_dir}\")\n", "\n", " return sparse_dir\n", "\n", "\n", "def write_cameras_binary(cameras, output_file):\n", " \"\"\"cameras.binをCOLMAPバイナリ形式で書き込み\"\"\"\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(cameras)))\n", "\n", " for i, cam in enumerate(cameras):\n", " camera_id = cam.get('camera_id', i + 1)\n", " model_id = 1\n", " width = cam['width']\n", " height = cam['height']\n", " params = cam['params']\n", "\n", " f.write(struct.pack('i', camera_id))\n", " f.write(struct.pack('i', model_id))\n", " f.write(struct.pack('Q', width))\n", " f.write(struct.pack('Q', height))\n", "\n", " for param in params[:4]:\n", " f.write(struct.pack('d', param))\n", "\n", "\n", "def write_images_binary(image_paths, cameras, poses, output_file):\n", " \"\"\"images.binをCOLMAPバイナリ形式で書き込み\"\"\"\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(image_paths)))\n", "\n", " for i, (img_path, pose) in enumerate(zip(image_paths, poses)):\n", " image_id = i + 1\n", " camera_id = cameras[i].get('camera_id', i + 1)\n", " image_name = os.path.basename(img_path)\n", "\n", " R = pose[:3, :3]\n", " t = pose[:3, 3]\n", "\n", " qvec = rotmat2qvec(R)\n", " tvec = t\n", "\n", " f.write(struct.pack('i', image_id))\n", "\n", " for q in qvec:\n", " f.write(struct.pack('d', float(q)))\n", "\n", " for tv in tvec:\n", " f.write(struct.pack('d', float(tv)))\n", "\n", " f.write(struct.pack('i', camera_id))\n", " f.write(image_name.encode('utf-8') + b'\\x00')\n", " f.write(struct.pack('Q', 0))\n", "\n", "\n", "def write_points3d_binary(pts3d, colors, output_file):\n", " \"\"\"points3D.binをCOLMAPバイナリ形式で書き込み\"\"\"\n", " valid_indices = []\n", " for i, pt in enumerate(pts3d):\n", " if not (np.isnan(pt).any() or np.isinf(pt).any()):\n", " valid_indices.append(i)\n", "\n", " with open(output_file, 'wb') as f:\n", " f.write(struct.pack('Q', len(valid_indices)))\n", "\n", " for idx, point_id in enumerate(valid_indices):\n", " pt = pts3d[point_id]\n", " color = colors[point_id]\n", "\n", " f.write(struct.pack('Q', point_id))\n", "\n", " for coord in pt:\n", " f.write(struct.pack('d', float(coord)))\n", "\n", " col_int = (color * 255).astype(np.uint8)\n", " for c in col_int:\n", " f.write(struct.pack('B', int(c)))\n", "\n", " f.write(struct.pack('d', 0.0))\n", " f.write(struct.pack('Q', 0))\n", "\n", " if (idx + 1) % 1000000 == 0:\n", " print(f\" Wrote {idx + 1} / {len(valid_indices)} points...\")\n", "\n", " return len(valid_indices)\n", "\n", "\n", "def rotmat2qvec(R):\n", " \"\"\"回転行列をクォータニオンに変換\"\"\"\n", " R = np.asarray(R, dtype=np.float64)\n", " trace = np.trace(R)\n", "\n", " if trace > 0:\n", " s = 0.5 / np.sqrt(trace + 1.0)\n", " w = 0.25 / s\n", " x = (R[2, 1] - R[1, 2]) * s\n", " y = (R[0, 2] - R[2, 0]) * s\n", " z = (R[1, 0] - R[0, 1]) * s\n", " elif R[0, 0] > R[1, 1] and R[0, 0] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[0, 0] - R[1, 1] - R[2, 2])\n", " w = (R[2, 1] - R[1, 2]) / s\n", " x = 0.25 * s\n", " y = (R[0, 1] + R[1, 0]) / s\n", " z = (R[0, 2] + R[2, 0]) / s\n", " elif R[1, 1] > R[2, 2]:\n", " s = 2.0 * np.sqrt(1.0 + R[1, 1] - R[0, 0] - R[2, 2])\n", " w = (R[0, 2] - R[2, 0]) / s\n", " x = (R[0, 1] + R[1, 0]) / s\n", " y = 0.25 * s\n", " z = (R[1, 2] + R[2, 1]) / s\n", " else:\n", " s = 2.0 * np.sqrt(1.0 + R[2, 2] - R[0, 0] - R[1, 1])\n", " w = (R[1, 0] - R[0, 1]) / s\n", " x = (R[0, 2] + R[2, 0]) / s\n", " y = (R[1, 2] + R[2, 1]) / s\n", " z = 0.25 * s\n", "\n", " qvec = np.array([w, x, y, z], dtype=np.float64)\n", " qvec = qvec / np.linalg.norm(qvec)\n", "\n", " return qvec\n", "\n", "\n", "# メモリ管理ユーティリティ(必要に応じて追加)\n", "def clear_memory():\n", " \"\"\"GPUとCPUメモリを積極的にクリア\"\"\"\n", " gc.collect()\n", " if torch.cuda.is_available():\n", " torch.cuda.empty_cache()\n", " torch.cuda.synchronize()\n", "\n", "\n", "def get_memory_info():\n", " \"\"\"現在のメモリ使用状況を取得\"\"\"\n", " if torch.cuda.is_available():\n", " allocated = torch.cuda.memory_allocated() / 1024**3\n", " reserved = torch.cuda.memory_reserved() / 1024**3\n", " print(f\"GPU Memory - Allocated: {allocated:.2f}GB, Reserved: {reserved:.2f}GB\")\n", "\n", " import psutil\n", " cpu_mem = psutil.virtual_memory().percent\n", " print(f\"CPU Memory Usage: {cpu_mem:.1f}%\")" ], "metadata": { "id": "imNgPK2Phwi8" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [], "metadata": { "id": "0LHFaucWicrB" } }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 3: Import to COLMAP (too be removed)\n", "# ============================================================================\n", "\n", "def import_to_colmap(image_dir, feature_dir, database_path, single_camera=True):\n", " \"\"\"\n", " Import features and matches to COLMAP database\n", "\n", " Args:\n", " image_dir (str): Directory containing the images.\n", " feature_dir (str): Directory to save/load extracted features.\n", " database_path (str): Path to the database file.\n", " single_camera (bool): Set to True if all images have the same dimensions (e.g., pre-resized).\n", " \"\"\"\n", " print(\"\\n=== Creating COLMAP Database ===\")\n", "\n", " if os.path.exists(database_path):\n", " os.remove(database_path)\n", " print(f\"✓ Removed existing database\")\n", "\n", " db = COLMAPDatabase.connect(database_path)\n", " db.create_tables()\n", "\n", " print(f\"Single camera mode: {single_camera}\")\n", "\n", " image_files = [f for f in os.listdir(image_dir)\n", " if f.lower().endswith(('.jpg', '.jpeg', '.png'))]\n", " if not image_files:\n", " raise ValueError(f\"No images found in {image_dir}\")\n", "\n", " first_image = sorted(image_files)[0]\n", " img_ext = os.path.splitext(first_image)[1]\n", " print(f\"Detected image extension: '{img_ext}'\")\n", "\n", " fname_to_id = add_keypoints(\n", " db,\n", " feature_dir,\n", " image_dir,\n", " img_ext,\n", " 'PINHOLE',\n", " single_camera=single_camera\n", " )\n", "\n", " add_matches(db, feature_dir, fname_to_id)\n", " db.commit()\n", " db.close()\n", "\n", " print(f\"✓ Database created: {database_path}\")\n", "\n", "# ============================================================================\n", "# Step 4: Run COLMAP Mapper\n", "# ============================================================================\n", "\n", "def run_colmap_mapper(database_path, image_dir, output_dir):\n", " \"\"\"\n", " Run COLMAP mapper with verbose output\n", " \"\"\"\n", " print(\"\\n=== Running COLMAP Reconstruction ===\")\n", " os.makedirs(output_dir, exist_ok=True)\n", " cmd = [\n", " 'colmap', 'mapper',\n", " '--database_path', database_path,\n", " '--image_path', image_dir,\n", " '--output_path', output_dir,\n", " '--Mapper.ba_refine_focal_length', '0',\n", " '--Mapper.ba_refine_principal_point', '0',\n", " '--Mapper.ba_refine_extra_params', '0',\n", " '--Mapper.min_num_matches', '15',\n", " '--Mapper.init_min_num_inliers', '50',\n", " '--Mapper.max_num_models', '1',\n", " '--Mapper.num_threads', '16',\n", " ]\n", " print(f\"Command: {' '.join(cmd)}\\n\")\n", "\n", " process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n", " for line in process.stdout:\n", " print(line, end='')\n", " process.wait()\n", " if process.returncode == 0:\n", " model_dir = os.path.join(output_dir, '0')\n", " if os.path.exists(model_dir):\n", " print(f\"\\n✓ COLMAP reconstruction complete: {model_dir}\")\n", " return model_dir\n", " raise RuntimeError(\"COLMAP reconstruction failed\")" ], "metadata": { "id": "NJedFruCmVcL" }, "outputs": [], "execution_count": null }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# Step 5: Convert to Gaussian Splatting Format (if needed)\n", "# ============================================================================\n", "\n", "def convert_to_gs_format(colmap_model_dir, output_dir):\n", " \"\"\"\n", " Verify COLMAP output and prepare paths for Gaussian Splatting.\n", "\n", " Args:\n", " colmap_model_dir (str): Path to the COLMAP sparse/0 directory.\n", " Example: /content/output/colmap/sparse/0\n", " output_dir (str): Base output directory.\n", "\n", " Returns:\n", " colmap_parent_dir (str): The path to be passed to Gaussian Splatting.\n", " Example: /content/output/colmap (Parent directory containing 'sparse/')\n", " \"\"\"\n", " print(\"\\n=== Verifying COLMAP Model for Gaussian Splatting ===\")\n", "\n", " import pycolmap\n", " reconstruction = pycolmap.Reconstruction(colmap_model_dir)\n", "\n", " print(f\"Registered images: {len(reconstruction.images)}\")\n", " print(f\"3D points: {len(reconstruction.points3D)}\")\n", "\n", " # Check for files required by Gaussian Splatting\n", " required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n", " for file in required_files:\n", " file_path = os.path.join(colmap_model_dir, file)\n", " if not os.path.exists(file_path):\n", " raise FileNotFoundError(f\"Required file not found: {file}\")\n", " print(f\" ✓ {file}\")\n", "\n", " # Return the grandparent directory of sparse/0\n", " # /content/output/colmap/sparse/0 -> /content/output/colmap\n", " colmap_parent_dir = os.path.dirname(os.path.dirname(colmap_model_dir))\n", "\n", " print(f\"\\n✓ COLMAP model ready for Gaussian Splatting\")\n", " print(f\" Source path: {colmap_parent_dir}\")\n", "\n", " return colmap_parent_dir" ], "metadata": { "id": "4IioqnC1mVcM" }, "outputs": [], "execution_count": null }, { "cell_type": "code", "source": [ "def train_gaussian_splatting(colmap_dir, image_dir, output_dir, iterations=6000):\n", " \"\"\"\n", " Gaussian Splattingモデルをトレーニング\n", " \"\"\"\n", " print(\"\\n=== Training Gaussian Splatting ===\")\n", "\n", " # 環境の修正\n", " print(\"Checking and fixing Python environment...\")\n", " import subprocess\n", " subprocess.run([\"pip\", \"install\", \"--upgrade\", \"--force-reinstall\", \"-q\", \"scipy\", \"scikit-learn\"], check=True)\n", " print(\"✓ Environment fixed\")\n", "\n", " # COLMAPのsparseディレクトリを確認\n", " sparse_dir = os.path.join(colmap_dir, 'sparse', '0')\n", " if not os.path.exists(sparse_dir):\n", " raise FileNotFoundError(f\"COLMAP sparse directory not found: {sparse_dir}\")\n", "\n", " print(f\"COLMAP sparse model: {sparse_dir}\")\n", " print(f\"Training images: {image_dir}\")\n", " print(f\"Output: {output_dir}\")\n", " print(f\"Iterations: {iterations}\")\n", "\n", " os.makedirs(output_dir, exist_ok=True)\n", "\n", " # Gaussian Splattingのディレクトリに移動\n", " original_dir = os.getcwd()\n", " os.chdir(\"/content/gaussian-splatting\")\n", "\n", " # トレーニングコマンド\n", " cmd = [\n", " \"python\", \"train.py\",\n", " \"-s\", colmap_dir,\n", " \"--images\", image_dir,\n", " \"-m\", output_dir,\n", " \"--iterations\", str(iterations),\n", " \"--test_iterations\", str(iterations // 2), str(iterations),\n", " \"--save_iterations\", str(iterations // 2), str(iterations)\n", " ]\n", "\n", " print(f\"\\nCommand: {' '.join(cmd)}\\n\")\n", "\n", " # 実行\n", " result = subprocess.run(cmd, capture_output=True, text=True)\n", "\n", " # 元のディレクトリに戻る\n", " os.chdir(original_dir)\n", "\n", " print(\"STDOUT:\", result.stdout)\n", " if result.stderr:\n", " print(\"STDERR:\", result.stderr)\n", "\n", " if result.returncode != 0:\n", " raise RuntimeError(\"Gaussian Splatting training failed\")\n", "\n", " # 生成されたPLYファイルの存在確認\n", " ply_path = os.path.join(output_dir, f\"point_cloud/iteration_{iterations}/point_cloud.ply\")\n", " if not os.path.exists(ply_path):\n", " raise FileNotFoundError(f\"Expected output file not found: {ply_path}\")\n", "\n", " print(f\"\\n✓ Gaussian Splatting training complete!\")\n", " print(f\" Model saved to: {output_dir}\")\n", " print(f\" Point cloud: {ply_path}\")\n", "\n", " return output_dir" ], "metadata": { "id": "EiHoRSfzQ01b" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# **main**" ], "metadata": { "id": "IqNcsheVywit" } }, { "cell_type": "code", "source": [ "from PIL import Image, ImageFilter" ], "metadata": { "id": "yeP98DO30gNl" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import numpy as np\n", "print(f\"✓ NumPy: {np.__version__}\")" ], "metadata": { "id": "D0r5QQNg2GPl", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "74932505-08da-43f9-d0d3-8831e52569f7" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "✓ NumPy: 1.26.4\n" ] } ] }, { "cell_type": "code", "source": [ "# ============================================================================\n", "# メインパイプライン関数(修正版)\n", "# ============================================================================\n", "def main_pipeline(image_dir, output_dir, square_size=512, iterations=6000):\n", " \"\"\"\n", " 完全なパイプライン: Images → Square Processing → MASt3R → Gaussian Splatting\n", " 変更点:\n", " - Step 3: import_to_colmap を import_to_mast3r_and_save_colmap に置き換え\n", " - Step 4: run_colmap_mapper を run_mast3r_mapper に置き換え\n", " \"\"\"\n", " print(\"=\"*70)\n", " print(\"Gaussian Splatting Preparation Pipeline (MASt3R Version)\")\n", " print(\"=\"*70)\n", "\n", " # Step 0: 画像を正方形フォーマットに標準化\n", " processed_image_dir = os.path.join(output_dir, \"processed_images\")\n", " normalize_image_sizes_biplet(\n", " input_dir=image_dir,\n", " output_dir=processed_image_dir,\n", " size=square_size\n", " )\n", "\n", " # パスの設定\n", " feature_dir = os.path.join(output_dir, 'features')\n", " colmap_dir = os.path.join(output_dir, 'colmap')\n", " database_path = os.path.join(colmap_dir, 'database.db')\n", " sparse_dir = os.path.join(colmap_dir, 'sparse')\n", " os.makedirs(output_dir, exist_ok=True)\n", " os.makedirs(colmap_dir, exist_ok=True)\n", "\n", " # 画像パスを取得\n", " image_paths = sorted([\n", " os.path.join(processed_image_dir, f)\n", " for f in os.listdir(processed_image_dir)\n", " if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n", " ])\n", " print(f\"\\n📸 Found {len(image_paths)} images\")\n", "\n", " # Step 1: 画像ペアを生成\n", " pairs, features = get_image_pairs(image_paths)\n", "\n", " # Step 2: LightGlueで特徴マッチング\n", " match_pairs_lightglue(image_paths, pairs, features, feature_dir)\n", "\n", " ######### Step 3 and Step 4: MASt3R for reconstruction (not COLMAP) #########\n", " # Step 3 & 4: MASt3Rでカメラポーズを推定し、COLMAP形式で保存\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"Step 3 & 4: MASt3R Reconstruction → COLMAP Format\")\n", " print(\"=\"*70)\n", "\n", " model_dir = run_mast3r_mapper(\n", " database_path=database_path,\n", " image_dir=image_dir,\n", " output_dir=colmap_dir,\n", " pairs=pairs,\n", " processed_image_dir=processed_image_dir\n", " )\n", " ###############################################################################\n", "\n", " # Step 5: Gaussian Splattingの準備を確認\n", " colmap_parent = convert_to_gs_format(model_dir, output_dir)\n", "\n", " # Step 6: Gaussian Splattingモデルをトレーニング\n", " gs_output = train_gaussian_splatting(\n", " colmap_dir=colmap_parent,\n", " image_dir=processed_image_dir,\n", " output_dir=output_dir,\n", " iterations=iterations\n", " )\n", "\n", " print(\"\\n\" + \"=\"*70)\n", " print(\"✅ Full Pipeline Successfully Completed!\")\n", " print(\"=\"*70)\n", " print(f\"\\nGaussian Splatting model saved at: {gs_output}\")\n", "\n", " return gs_output\n" ], "metadata": { "id": "Ppkm3NVGwtjO" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "# 使用例\n", "if __name__ == \"__main__\":\n", " # 実際のデータセットで実行する場合の推奨設定\n", " IMAGE_DIR = \"/content/drive/MyDrive/your_folder/fountain\"\n", " OUTPUT_DIR = \"/content/output\"\n", "\n", " # 本番用の設定:\n", " # - square_size: 1024 (高品質) または 512 (バランス)\n", " # - iterations: 6000 (推奨) または 30000 (高品質だが時間がかかる)\n", "\n", " gs_output = main_pipeline(\n", " IMAGE_DIR,\n", " OUTPUT_DIR,\n", " square_size=512,\n", " iterations=1000,\n", " )" ], "metadata": { "id": "66uQIWbs2a1t", "colab": { "base_uri": "https://localhost:8080/" }, "outputId": "481e53ad-6a09-4ed1-b5ac-0d071350e1d4" }, "execution_count": null, "outputs": [ { "output_type": "stream", "name": "stdout", "text": [ "======================================================================\n", "Gaussian Splatting Preparation Pipeline (MASt3R Version)\n", "======================================================================\n", "Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\n", "\n", " ✓ image_001.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_002.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_003.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_004.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_005.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_006.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_007.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_008.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_009.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_010.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_011.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_012.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_013.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_014.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_015.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_016.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_017.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_018.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_019.jpeg: (1440, 1920) → 2 square images generated\n", " ✓ image_020.jpeg: (1440, 1920) → 2 square images generated\n", "\n", "Processing complete: 20 source images processed\n", "Original size distribution: {'1440x1920': 20}\n", "\n", "📸 Found 40 images\n", "\n", "=== Extracting DINO Global Features ===\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 40/40 [00:02<00:00, 19.88it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Initial pairs from global features: 780\n", "\n", "=== Extracting ALIKED Local Features ===\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 40/40 [00:03<00:00, 11.95it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "\n", "=== Verifying Pairs with Local Features ===\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 780/780 [00:09<00:00, 82.32it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Verified pairs: 780\n", "\n", "=== Matching with LightGlue ===\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Saving keypoints: 100%|██████████| 40/40 [00:00<00:00, 3809.45it/s]\n", "Matching: 100%|██████████| 780/780 [02:16<00:00, 5.71it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "✓ Matches saved to /content/output/features/matches.h5\n", "\n", "======================================================================\n", "Step 3 & 4: MASt3R Reconstruction → COLMAP Format\n", "======================================================================\n", "\n", "=== MASt3R Mapper (COLMAP Alternative) ===\n", "\n", "=== Running MASt3R Reconstruction ===\n", "Initial memory state:\n", "GPU Memory - Allocated: 0.36GB, Reserved: 4.76GB\n", "CPU Memory Usage: 18.7%\n", "... loading model from /content/mast3r/checkpoints/MASt3R_ViTLarge_BaseDecoder_512_catmlpdpt_metric.pth\n", "instantiating : AsymmetricMASt3R(enc_depth=24, dec_depth=12, enc_embed_dim=1024, dec_embed_dim=768, enc_num_heads=16, dec_num_heads=12, pos_embed='RoPE100',img_size=(512, 512), head_type='catmlp+dpt', output_mode='pts3d+desc24', depth_mode=('exp', -inf, inf), conf_mode=('exp', 1, inf), patch_embed_cls='PatchEmbedDust3R', two_confs=True, desc_conf_mode=('exp', 0, inf), landscape_only=False)\n", "\n", "✓ MASt3R model loaded on cuda\n", "\n", "=== Running MASt3R Reconstruction ===\n", "Initial memory state:\n", "GPU Memory - Allocated: 2.93GB, Reserved: 4.76GB\n", "CPU Memory Usage: 18.7%\n", "Processing 780 pairs in chunks of 500...\n", "Note: Images will be loaded on-demand per chunk to save memory\n", "Temporary directory: /tmp/tmpyq4aja7j\n", "\n", "--- Processing chunk 1/2 (pairs 0-500) ---\n", "Loading 40 unique images for this chunk...\n", "\n", "=== Loading images for MASt3R (size=224) ===\n", ">> Loading a list of 40 images\n", " - adding /content/output/processed_images/image_001_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_001_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_002_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_002_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_003_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_003_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_004_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_004_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_005_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_005_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_006_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_006_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_007_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_007_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_008_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_008_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_009_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_009_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_010_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_010_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_011_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_011_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_012_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_012_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_013_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_013_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_014_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_014_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_015_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_015_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_016_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_016_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_017_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_017_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_018_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_018_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_019_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_019_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_020_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_020_top.jpeg with resolution 512x512 --> 224x224\n", " (Found 40 images)\n", "Memory after loading chunk images:\n", "GPU Memory - Allocated: 2.93GB, Reserved: 4.76GB\n", "CPU Memory Usage: 18.7%\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Preparing chunk 1: 100%|██████████| 500/500 [00:00<00:00, 608575.74it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Running MASt3R inference on 500 pairs...\n", ">> Inference with model on 500 image pairs\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "\rMASt3R inference: 0%| | 0/500 [00:00> Loading a list of 40 images\n", " - adding /content/output/processed_images/image_001_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_001_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_002_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_002_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_003_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_003_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_004_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_004_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_005_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_005_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_006_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_006_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_007_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_007_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_008_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_008_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_009_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_009_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_010_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_010_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_011_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_011_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_012_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_012_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_013_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_013_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_014_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_014_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_015_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_015_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_016_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_016_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_017_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_017_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_018_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_018_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_019_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_019_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_020_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_020_top.jpeg with resolution 512x512 --> 224x224\n", " (Found 40 images)\n", "Memory after loading chunk images:\n", "GPU Memory - Allocated: 2.93GB, Reserved: 2.97GB\n", "CPU Memory Usage: 19.4%\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "Preparing chunk 2: 100%|██████████| 280/280 [00:00<00:00, 841264.41it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Running MASt3R inference on 280 pairs...\n", ">> Inference with model on 280 image pairs\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "MASt3R inference: 100%|██████████| 280/280 [01:03<00:00, 4.42it/s]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "Chunk 2 saved to disk. Memory state:\n", "GPU Memory - Allocated: 2.93GB, Reserved: 2.97GB\n", "CPU Memory Usage: 19.4%\n", "\n", "=== Combining all chunks from disk ===\n", "Loading and combining chunks 1-2...\n", "✓ Combined output keys: ['view1', 'view2', 'pred1', 'pred2', 'loss']\n", "After combining all chunks:\n", "GPU Memory - Allocated: 2.93GB, Reserved: 2.97GB\n", "CPU Memory Usage: 35.8%\n", "✓ MASt3R inference complete\n", "\n", "Loading all images for global alignment...\n", "\n", "=== Loading images for MASt3R (size=224) ===\n", ">> Loading a list of 40 images\n", " - adding /content/output/processed_images/image_001_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_001_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_002_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_002_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_003_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_003_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_004_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_004_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_005_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_005_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_006_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_006_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_007_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_007_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_008_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_008_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_009_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_009_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_010_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_010_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_011_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_011_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_012_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_012_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_013_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_013_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_014_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_014_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_015_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_015_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_016_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_016_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_017_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_017_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_018_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_018_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_019_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_019_top.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_020_bottom.jpeg with resolution 512x512 --> 224x224\n", " - adding /content/output/processed_images/image_020_top.jpeg with resolution 512x512 --> 224x224\n", " (Found 40 images)\n", "Memory after loading all images:\n", "GPU Memory - Allocated: 2.93GB, Reserved: 2.97GB\n", "CPU Memory Usage: 35.8%\n", "\n", "Running global alignment...\n", "Computing global alignment...\n", " init edge (15*,28*) score=39.758304595947266\n", " init edge (15,36*) score=35.8395881652832\n", " init edge (8*,36) score=35.50431442260742\n", " init edge (8,14*) score=32.074405670166016\n", " init edge (0*,36) score=31.76812744140625\n", " init edge (0,2*) score=28.978759765625\n", " init edge (10*,14) score=26.006826400756836\n", " init edge (18*,36) score=36.027557373046875\n", " init edge (18,23*) score=35.86989212036133\n", " init edge (18,33*) score=34.71807098388672\n", " init edge (18,19*) score=31.030723571777344\n", " init edge (18,25*) score=30.753198623657227\n", " init edge (19,38*) score=30.725109100341797\n", " init edge (18,29*) score=29.827274322509766\n", " init edge (2,37*) score=29.377527236938477\n", " init edge (18,39*) score=25.59001922607422\n", " init edge (18,22*) score=36.5020751953125\n", " init edge (11*,25) score=33.03491973876953\n", " init edge (22,32*) score=32.221405029296875\n", " init edge (21*,38) score=31.957304000854492\n", " init edge (7*,21) score=31.389163970947266\n", " init edge (9*,25) score=31.00221061706543\n", " init edge (6*,7) score=28.57666015625\n", " init edge (3*,22) score=28.573650360107422\n", " init edge (4*,21) score=27.17515754699707\n", " init edge (21,27*) score=33.37508010864258\n", " init edge (7,17*) score=32.06692886352539\n", " init edge (13*,27) score=31.686389923095703\n", " init edge (17,35*) score=29.423725128173828\n", " init edge (12*,35) score=29.047630310058594\n", " init edge (16*,17) score=23.028743743896484\n", " init edge (5*,27) score=39.58829116821289\n", " init edge (5,31*) score=32.39575958251953\n", " init edge (5,26*) score=32.27633285522461\n", " init edge (30*,31) score=28.815805435180664\n", " init edge (5,24*) score=25.976438522338867\n", " init edge (5,34*) score=25.234073638916016\n", " init edge (1*,24) score=23.33244514465332\n", " init edge (1,20*) score=25.115131378173828\n", " init loss = 0.8389807939529419\n", "Global alignement - optimizing for:\n", "['pw_poses', 'im_depthmaps', 'im_poses', 'im_focals']\n" ] }, { "output_type": "stream", "name": "stderr", "text": [ "100%|██████████| 150/150 [00:49<00:00, 3.03it/s, lr=2.09647e-06 loss=0.368302]\n" ] }, { "output_type": "stream", "name": "stdout", "text": [ "✓ Global alignment complete (final loss: 0.368302)\n", "Final memory state:\n", "GPU Memory - Allocated: 5.30GB, Reserved: 9.77GB\n", "CPU Memory Usage: 19.4%\n", "\n", "=== Extracting COLMAP-compatible data ===\n", "pts_all type: \n", "pts_all is a list with 40 elements\n", "First element type: \n", "First element shape: torch.Size([224, 224, 3])\n", "pts_all shape after conversion: torch.Size([40, 224, 224, 3])\n", "Found batched point cloud: torch.Size([40, 224, 224, 3])\n", "✓ Extracted 2007040 3D points from 40 images\n", "\n", "⚠ Downsampling from 2007040 to 1000000 points...\n", "✓ Downsampled to 1000000 points\n", "Extracting camera parameters...\n", "Retrieved all poses at once: shape torch.Size([40, 4, 4])\n", "✓ Extracted 40 cameras and 40 poses\n", "\n", "=== Saving COLMAP reconstruction ===\n", " Writing COLMAP files to /content/output/colmap/sparse/0...\n", " ✓ Wrote 40 cameras\n", " ✓ Wrote 40 images\n", " Wrote 1000000 / 1000000 points...\n", " ✓ Wrote 1000000 3D points\n", "\n", "✓ COLMAP reconstruction saved to /content/output/colmap/sparse/0\n", "\n", "✓ MASt3R reconstruction saved in COLMAP format\n", " Output: /content/output/colmap/sparse/0\n", "\n", "✓ MASt3R reconstruction complete: /content/output/colmap/sparse/0\n", "\n", "=== Verifying COLMAP Model for Gaussian Splatting ===\n", "Registered images: 40\n", "3D points: 1000000\n", " ✓ cameras.bin\n", " ✓ images.bin\n", " ✓ points3D.bin\n", "\n", "✓ COLMAP model ready for Gaussian Splatting\n", " Source path: /content/output/colmap\n", "\n", "=== Training Gaussian Splatting ===\n", "Checking and fixing Python environment...\n", "✓ Environment fixed\n", "COLMAP sparse model: /content/output/colmap/sparse/0\n", "Training images: /content/output/processed_images\n", "Output: /content/output\n", "Iterations: 1000\n", "\n", "Command: python train.py -s /content/output/colmap --images /content/output/processed_images -m /content/output --iterations 1000 --test_iterations 500 1000 --save_iterations 500 1000\n", "\n", "STDOUT: Optimizing /content/output\n", "Output folder: /content/output [18/01 14:11:04]\n", "\n", "Reading camera 1/40\n", "Reading camera 2/40\n", "Reading camera 3/40\n", "Reading camera 4/40\n", "Reading camera 5/40\n", "Reading camera 6/40\n", "Reading camera 7/40\n", "Reading camera 8/40\n", "Reading camera 9/40\n", "Reading camera 10/40\n", "Reading camera 11/40\n", "Reading camera 12/40\n", "Reading camera 13/40\n", "Reading camera 14/40\n", "Reading camera 15/40\n", "Reading camera 16/40\n", "Reading camera 17/40\n", "Reading camera 18/40\n", "Reading camera 19/40\n", "Reading camera 20/40\n", "Reading camera 21/40\n", "Reading camera 22/40\n", "Reading camera 23/40\n", "Reading camera 24/40\n", "Reading camera 25/40\n", "Reading camera 26/40\n", "Reading camera 27/40\n", "Reading camera 28/40\n", "Reading camera 29/40\n", "Reading camera 30/40\n", "Reading camera 31/40\n", "Reading camera 32/40\n", "Reading camera 33/40\n", "Reading camera 34/40\n", "Reading camera 35/40\n", "Reading camera 36/40\n", "Reading camera 37/40\n", "Reading camera 38/40\n", "Reading camera 39/40\n", "Reading camera 40/40 [18/01 14:11:04]\n", "Converting point3d.bin to .ply, will happen only the first time you open the scene. [18/01 14:11:04]\n", "Loading Training Cameras [18/01 14:11:10]\n", "Loading Test Cameras [18/01 14:11:10]\n", "Number of points at initialisation : 1000000 [18/01 14:11:10]\n", "\n", "[ITER 500] Evaluating train: L1 0.20798935294151308 PSNR 11.923607444763185 [18/01 14:12:00]\n", "\n", "[ITER 500] Saving Gaussians [18/01 14:12:00]\n", "\n", "[ITER 1000] Evaluating train: L1 0.18404637277126312 PSNR 12.721748352050781 [18/01 14:12:55]\n", "\n", "[ITER 1000] Saving Gaussians [18/01 14:12:55]\n", "\n", "Training complete. [18/01 14:13:03]\n", "\n", "STDERR: 2026-01-18 14:10:58.333133: E external/local_xla/xla/stream_executor/cuda/cuda_fft.cc:467] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "WARNING: All log messages before absl::InitializeLog() is called are written to STDERR\n", "E0000 00:00:1768745458.353767 14215 cuda_dnn.cc:8579] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "E0000 00:00:1768745458.360046 14215 cuda_blas.cc:1407] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "W0000 00:00:1768745458.376403 14215 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1768745458.376429 14215 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1768745458.376431 14215 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "W0000 00:00:1768745458.376433 14215 computation_placer.cc:177] computation placer already registered. Please check linkage and avoid linking the same target more than once.\n", "\n", "Training progress: 0%| | 0/1000 [00:00