{"metadata":{"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"colab":{"provenance":[],"gpuType":"T4"},"accelerator":"GPU","kaggle":{"accelerator":"nvidiaTeslaT4","dataSources":[{"sourceId":34970,"databundleVersionId":3445386,"isSourceIdPinned":false,"sourceType":"competition"}],"dockerImageVersionId":31236,"isInternetEnabled":true,"language":"python","sourceType":"notebook","isGpuEnabled":true}},"nbformat_minor":4,"nbformat":4,"cells":[{"cell_type":"markdown","source":"# **British Museum: dino-lightglue-colmap-gs-12** \n#### **Gaussian Splat via Biplet-Square Normalization, DINO, ALIKED, LIGHTGLUE and COLMAP**
\n","metadata":{"id":"qDQLX3PArmh8"}},{"cell_type":"markdown","source":"**2026/01/11 11:30**
\nN=176, square_size=700\n","metadata":{}},{"cell_type":"markdown","source":"https://huggingface.co/datasets/stpete2/ipynb/resolve/main/dino_lightglue_colmap_gs_12oo.ipynb
\ncolab ntebook to kaggle notebook ","metadata":{}},{"cell_type":"markdown","source":"# **setup**","metadata":{"id":"vXt8y7QyyRn9"}},{"cell_type":"code","source":"import os\nimport sys\nimport subprocess\nfrom PIL import Image\n\ndef run_cmd(cmd, check=True, capture=False):\n \"\"\"Run command with better error handling\"\"\"\n print(f\"Running: {' '.join(cmd)}\")\n result = subprocess.run(\n cmd,\n capture_output=capture,\n text=True,\n check=False\n )\n if check and result.returncode != 0:\n print(f\"β Command failed with code {result.returncode}\")\n if capture:\n print(f\"STDOUT: {result.stdout}\")\n print(f\"STDERR: {result.stderr}\")\n return result\n\n\ndef setup_environment():\n \"\"\"\n Colab environment setup for Gaussian Splatting + LightGlue + pycolmap\n Python 3.12 compatible version (v8)\n \"\"\"\n\n print(\"π Setting up COLAB environment (v8 - Python 3.12 compatible)\")\n\n WORK_DIR = \"gaussian-splatting\"\n\n # =====================================================================\n # STEP 0: NumPy FIX (Python 3.12 compatible)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 0: Fix NumPy (Python 3.12 compatible)\")\n print(\"=\"*70)\n\n # Python 3.12 requires numpy >= 1.26\n run_cmd([sys.executable, \"-m\", \"pip\", \"uninstall\", \"-y\", \"numpy\"])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"numpy==1.26.4\"])\n\n # sanity check\n run_cmd([sys.executable, \"-c\", \"import numpy; print('NumPy:', numpy.__version__)\"])\n\n # =====================================================================\n # STEP 1: System packages (Colab)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 1: System packages\")\n print(\"=\"*70)\n\n run_cmd([\"apt-get\", \"update\", \"-qq\"])\n run_cmd([\n \"apt-get\", \"install\", \"-y\", \"-qq\",\n \"colmap\",\n \"build-essential\",\n \"cmake\",\n \"git\",\n \"libopenblas-dev\",\n \"xvfb\"\n ])\n\n # virtual display (COLMAP / OpenCV safety)\n os.environ[\"QT_QPA_PLATFORM\"] = \"offscreen\"\n os.environ[\"DISPLAY\"] = \":99\"\n subprocess.Popen(\n [\"Xvfb\", \":99\", \"-screen\", \"0\", \"1024x768x24\"],\n stdout=subprocess.DEVNULL,\n stderr=subprocess.DEVNULL\n )\n\n # =====================================================================\n # STEP 2: Clone Gaussian Splatting\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 2: Clone Gaussian Splatting\")\n print(\"=\"*70)\n\n if not os.path.exists(WORK_DIR):\n run_cmd([\n \"git\", \"clone\", \"--recursive\",\n \"https://github.com/graphdeco-inria/gaussian-splatting.git\",\n WORK_DIR\n ])\n else:\n print(\"β Repository already exists\")\n\n # =====================================================================\n # STEP 3: Python packages (FIXED ORDER & VERSIONS)\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 3: Python packages (VERBOSE MODE)\")\n print(\"=\"*70)\n\n # ---- PyTorch (Colab CUDAε―ΎεΏ) ----\n print(\"\\nπ¦ Installing PyTorch...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"torch\", \"torchvision\", \"torchaudio\"\n ])\n\n # ---- Core utils ----\n print(\"\\nπ¦ Installing core utilities...\")\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"opencv-python\",\n \"pillow\",\n \"imageio\",\n \"imageio-ffmpeg\",\n \"plyfile\",\n \"tqdm\",\n \"tensorboard\"\n ])\n\n # ---- transformers (NumPy 1.26 compatible) ----\n print(\"\\nπ¦ Installing transformers (NumPy 1.26 compatible)...\")\n # Install transformers with proper dependencies\n run_cmd([\n sys.executable, \"-m\", \"pip\", \"install\",\n \"transformers==4.40.0\"\n ])\n\n # ---- LightGlue stack (GITHUB INSTALL) ----\n print(\"\\nπ¦ Installing LightGlue stack...\")\n\n # Install kornia first\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"kornia\"])\n\n # Install h5py (sometimes needed)\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"h5py\"])\n\n # Install matplotlib (LightGlue dependency)\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"matplotlib\"])\n\n # Install LightGlue directly from GitHub (more reliable)\n print(\" Installing LightGlue from GitHub...\")\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\",\n \"git+https://github.com/cvg/LightGlue.git\"])\n\n # Install pycolmap\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", \"pycolmap\"])\n\n # =====================================================================\n # STEP 4: Build GS submodules\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 4: Build Gaussian Splatting submodules\")\n print(\"=\"*70)\n\n submodules = {\n \"diff-gaussian-rasterization\":\n \"https://github.com/graphdeco-inria/diff-gaussian-rasterization.git\",\n \"simple-knn\":\n \"https://github.com/camenduru/simple-knn.git\"\n }\n\n for name, repo in submodules.items():\n print(f\"\\nπ¦ Installing {name}...\")\n path = os.path.join(WORK_DIR, \"submodules\", name)\n if not os.path.exists(path):\n run_cmd([\"git\", \"clone\", repo, path])\n run_cmd([sys.executable, \"-m\", \"pip\", \"install\", path])\n\n # =====================================================================\n # STEP 5: Detailed Verification\n # =====================================================================\n print(\"\\n\" + \"=\"*70)\n print(\"STEP 5: Detailed Verification\")\n print(\"=\"*70)\n\n # NumPy (verify version first)\n print(\"\\nπ Testing NumPy...\")\n try:\n import numpy as np\n print(f\" β NumPy: {np.__version__}\")\n except Exception as e:\n print(f\" β NumPy failed: {e}\")\n\n # PyTorch\n print(\"\\nπ Testing PyTorch...\")\n try:\n import torch\n print(f\" β PyTorch: {torch.__version__}\")\n print(f\" β CUDA available: {torch.cuda.is_available()}\")\n if torch.cuda.is_available():\n print(f\" β CUDA version: {torch.version.cuda}\")\n except Exception as e:\n print(f\" β PyTorch failed: {e}\")\n\n # transformers\n print(\"\\nπ Testing transformers...\")\n try:\n import transformers\n print(f\" β transformers version: {transformers.__version__}\")\n from transformers import AutoModel\n print(f\" β AutoModel import: OK\")\n except Exception as e:\n print(f\" β transformers failed: {e}\")\n print(f\" Attempting detailed diagnosis...\")\n result = run_cmd([\n sys.executable, \"-c\",\n \"import transformers; print(transformers.__version__)\"\n ], capture=True)\n print(f\" Output: {result.stdout}\")\n print(f\" Error: {result.stderr}\")\n\n # LightGlue\n print(\"\\nπ Testing LightGlue...\")\n try:\n from lightglue import LightGlue, ALIKED\n print(f\" β LightGlue: OK\")\n print(f\" β ALIKED: OK\")\n except Exception as e:\n print(f\" β LightGlue failed: {e}\")\n print(f\" Attempting detailed diagnosis...\")\n result = run_cmd([\n sys.executable, \"-c\",\n \"from lightglue import LightGlue\"\n ], capture=True)\n print(f\" Output: {result.stdout}\")\n print(f\" Error: {result.stderr}\")\n\n # pycolmap\n print(\"\\nπ Testing pycolmap...\")\n try:\n import pycolmap\n print(f\" β pycolmap: OK\")\n except Exception as e:\n print(f\" β pycolmap failed: {e}\")\n\n # kornia\n print(\"\\nπ Testing kornia...\")\n try:\n import kornia\n print(f\" β kornia: {kornia.__version__}\")\n except Exception as e:\n print(f\" β kornia failed: {e}\")\n\n print(\"\\n\" + \"=\"*70)\n print(\"β
SETUP COMPLETE\")\n print(\"=\"*70)\n print(f\"Working dir: {WORK_DIR}\")\n\n return WORK_DIR\n\n\nif __name__ == \"__main__\":\n setup_environment()","metadata":{"id":"z6cBHbABzZ0F"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"import os\n\n%cd gaussian-splatting\n\nfiles = ['database.py', 'h5_to_db.py', 'metric.py']\nbase_url = 'https://huggingface.co/stpete2/imc25_utils/resolve/main/'\n\nfor file in files:\n if not os.path.exists(file):\n !wget -q {base_url + file}\n print(f\"β {file} download complete\")\n else:\n print(f\"β {file} already exists\")\n","metadata":{"id":"eJrkKiCLzt1G"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"# **install libraries**","metadata":{"id":"DwyCRLt4yYfx"}},{"cell_type":"code","source":"from database import COLMAPDatabase, image_ids_to_pair_id\nfrom h5_to_db import add_keypoints, add_matches\nfrom metric import *","metadata":{"id":"WVr8ggyVuq6q"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"\"\"\"\nGaussian Splatting Pipeline\nSimple and robust pipeline: LightGlue β COLMAP β Gaussian Splatting\n\"\"\"\n\nimport os\nimport sys\nimport gc\nimport h5py\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nfrom pathlib import Path\nimport subprocess\n\n# LightGlue\nfrom lightglue import ALIKED, LightGlue\nfrom lightglue.utils import load_image\n\n# Transformers for DINO\nfrom transformers import AutoImageProcessor, AutoModel\n\n\n# ============================================================================\n# Configuration\n# ============================================================================\nclass Config:\n # Feature extraction\n N_KEYPOINTS = 8192\n IMAGE_SIZE = 1024\n\n # Pair selection\n GLOBAL_TOPK = 200\n MIN_MATCHES = 10\n RATIO_THR = 1.2\n\n # Paths\n DINO_MODEL = \"facebook/dinov2-base\" # Change if using local path\n\n # Device\n DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')","metadata":{"id":"7NfrJdMvrPZn"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 0: images_square\n# ============================================================================\n\ndef preprocess_images_square(input_dir, output_dir, size=1024, background='black'):\n \"\"\"\n Standardize all images to a square format (maintaining aspect ratio with padding).\n\n Args:\n input_dir (str): Directory containing input images.\n output_dir (str): Directory to save processed images.\n size (int): Target square dimension (default: 1024).\n background (str): Background style: 'black', 'white', or 'blur'.\n \"\"\"\n from PIL import Image, ImageFilter\n import os\n from tqdm import tqdm\n\n print(f\"\\n=== Preprocessing to {size}x{size} Square Images ===\")\n\n os.makedirs(output_dir, exist_ok=True)\n\n image_files = sorted([\n f for f in os.listdir(input_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n\n stats = {\n 'total': len(image_files),\n 'landscape': 0,\n 'portrait': 0,\n 'square': 0,\n 'resized': 0,\n }\n\n for img_file in tqdm(image_files, desc=\"Converting to square\"):\n img_path = os.path.join(input_dir, img_file)\n img = Image.open(img_path).convert('RGB')\n\n width, height = img.size\n\n # Statistics\n if width > height:\n stats['landscape'] += 1\n elif width < height:\n stats['portrait'] += 1\n else:\n stats['square'] += 1\n\n # Resize based on the longest side\n max_dim = max(width, height)\n if max_dim != size:\n scale = size / max_dim\n new_width = int(width * scale)\n new_height = int(height * scale)\n img = img.resize((new_width, new_height), Image.LANCZOS)\n stats['resized'] += 1\n else:\n new_width, new_height = width, height\n\n # Create background\n if background == 'black':\n canvas = Image.new('RGB', (size, size), (0, 0, 0))\n elif background == 'white':\n canvas = Image.new('RGB', (size, size), (255, 255, 255))\n elif background == 'blur':\n # Use a blurred version of the image as background for a professional look\n canvas = img.resize((size, size), Image.LANCZOS)\n canvas = canvas.filter(ImageFilter.GaussianBlur(radius=20))\n else:\n canvas = Image.new('RGB', (size, size), (0, 0, 0))\n\n # Center the image\n offset_x = (size - new_width) // 2\n offset_y = (size - new_height) // 2\n canvas.paste(img, (offset_x, offset_y))\n\n # Save output\n output_path = os.path.join(output_dir, img_file)\n canvas.save(output_path, quality=95, optimize=True)\n\n print(f\"\\nβ Preprocessing complete:\")\n print(f\" Total images: {stats['total']}\")\n print(f\" Landscape: {stats['landscape']} / Portrait: {stats['portrait']} / Square: {stats['square']}\")\n print(f\" Resized: {stats['resized']}\")\n print(f\" Output size: {size}x{size}\")\n\n return output_dir","metadata":{"id":"TkVzKRqsvxFZ"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def normalize_image_sizes_biplet(input_dir, output_dir=None, size=1024):\n \"\"\"\n Generates two square crops (Left & Right or Top & Bottom)\n from each image in a directory.\n \"\"\"\n if output_dir is None:\n output_dir = input_dir\n\n os.makedirs(output_dir, exist_ok=True)\n\n print(f\"Generating 2 cropped squares (Left/Right or Top/Bottom) for each image...\")\n print()\n\n converted_count = 0\n size_stats = {}\n\n for img_file in sorted(os.listdir(input_dir)):\n if not img_file.lower().endswith(('.jpg', '.jpeg', '.png')):\n continue\n\n input_path = os.path.join(input_dir, img_file)\n\n try:\n img = Image.open(input_path)\n original_size = img.size\n\n size_key = f\"{original_size[0]}x{original_size[1]}\"\n size_stats[size_key] = size_stats.get(size_key, 0) + 1\n\n # Generate 2 crops\n crops = generate_two_crops(img, size)\n\n base_name, ext = os.path.splitext(img_file)\n for mode, cropped_img in crops.items():\n output_path = os.path.join(output_dir, f\"{base_name}_{mode}{ext}\")\n cropped_img.save(output_path, quality=95)\n\n converted_count += 1\n print(f\" β {img_file}: {original_size} β 2 square images generated\")\n\n except Exception as e:\n print(f\" β Error processing {img_file}: {e}\")\n\n print(f\"\\nProcessing complete: {converted_count} source images processed\")\n print(f\"Original size distribution: {size_stats}\")\n return converted_count\n\n\ndef generate_two_crops(img, size):\n \"\"\"\n Crops the image into a square and returns 2 variations\n (Left/Right for landscape, Top/Bottom for portrait).\n \"\"\"\n width, height = img.size\n crop_size = min(width, height)\n crops = {}\n\n if width > height:\n # Landscape β Left & Right\n positions = {\n 'left': 0,\n 'right': width - crop_size\n }\n for mode, x_offset in positions.items():\n box = (x_offset, 0, x_offset + crop_size, crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n else:\n # Portrait or Square β Top & Bottom\n positions = {\n 'top': 0,\n 'bottom': height - crop_size\n }\n for mode, y_offset in positions.items():\n box = (0, y_offset, crop_size, y_offset + crop_size)\n crops[mode] = img.crop(box).resize(\n (size, size),\n Image.Resampling.LANCZOS\n )\n\n return crops","metadata":{"trusted":true},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 1: Image Pair Selection (DINO + ALIKED local verify)\n# ============================================================================\n\ndef load_torch_image(fname, device):\n \"\"\"Load image as torch tensor\"\"\"\n from PIL import Image\n import torchvision.transforms as T\n\n img = Image.open(fname).convert('RGB')\n transform = T.Compose([\n T.ToTensor(),\n ])\n return transform(img).unsqueeze(0).to(device)\n\ndef extract_dino_global(image_paths, model_path, device):\n \"\"\"Extract DINO global descriptors\"\"\"\n print(\"\\n=== Extracting DINO Global Features ===\")\n\n processor = AutoImageProcessor.from_pretrained(model_path)\n model = AutoModel.from_pretrained(model_path).eval().to(device)\n\n global_descs = []\n for img_path in tqdm(image_paths):\n img = load_torch_image(img_path, device)\n with torch.no_grad():\n inputs = processor(images=img, return_tensors=\"pt\", do_rescale=False).to(device)\n outputs = model(**inputs)\n desc = F.normalize(outputs.last_hidden_state[:, 1:].max(dim=1)[0], dim=1, p=2)\n global_descs.append(desc.cpu())\n\n global_descs = torch.cat(global_descs, dim=0)\n\n del model\n torch.cuda.empty_cache()\n gc.collect()\n\n return global_descs\n\ndef build_topk_pairs(global_feats, k, device):\n \"\"\"Build top-k similar pairs from global features\"\"\"\n g = global_feats.to(device)\n sim = g @ g.T\n sim.fill_diagonal_(-1)\n\n N = sim.size(0)\n k = min(k, N - 1)\n\n topk_indices = torch.topk(sim, k, dim=1).indices.cpu()\n\n pairs = []\n for i in range(N):\n for j in topk_indices[i]:\n j = j.item()\n if i < j:\n pairs.append((i, j))\n\n return list(set(pairs))\n\ndef extract_aliked_features(image_paths, device):\n \"\"\"Extract ALIKED local features\"\"\"\n print(\"\\n=== Extracting ALIKED Local Features ===\")\n\n extractor = ALIKED(\n model_name=\"aliked-n16\",\n max_num_keypoints=Config.N_KEYPOINTS,\n detection_threshold=0.01,\n resize=Config.IMAGE_SIZE\n ).eval().to(device)\n\n features = []\n for img_path in tqdm(image_paths):\n img = load_torch_image(img_path, device)\n with torch.no_grad():\n feats = extractor.extract(img)\n kpts = feats['keypoints'].reshape(-1, 2).cpu()\n descs = feats['descriptors'].reshape(len(kpts), -1).cpu()\n features.append({'keypoints': kpts, 'descriptors': descs})\n\n del extractor\n torch.cuda.empty_cache()\n gc.collect()\n\n return features\n\ndef verify_pairs_locally(pairs, features, device, threshold=Config.MIN_MATCHES):\n \"\"\"Verify pairs using local descriptor matching\"\"\"\n print(\"\\n=== Verifying Pairs with Local Features ===\")\n\n verified = []\n for i, j in tqdm(pairs):\n desc1 = features[i]['descriptors'].to(device)\n desc2 = features[j]['descriptors'].to(device)\n\n if len(desc1) == 0 or len(desc2) == 0:\n continue\n\n # Simple mutual nearest neighbor\n dist = torch.cdist(desc1, desc2, p=2)\n min_dist = dist.min(dim=1)[0]\n n_matches = (min_dist < Config.RATIO_THR).sum().item()\n\n if n_matches >= threshold:\n verified.append((i, j))\n\n return verified\n\ndef get_image_pairs(image_paths):\n \"\"\"Main pair selection pipeline\"\"\"\n device = Config.DEVICE\n\n # 1. DINO global\n global_feats = extract_dino_global(image_paths, Config.DINO_MODEL, device)\n pairs = build_topk_pairs(global_feats, Config.GLOBAL_TOPK, device)\n\n print(f\"Initial pairs from global features: {len(pairs)}\")\n\n # 2. ALIKED local\n features = extract_aliked_features(image_paths, device)\n\n # 3. Local verification\n verified_pairs = verify_pairs_locally(pairs, features, device)\n\n print(f\"Verified pairs: {len(verified_pairs)}\")\n\n return verified_pairs, features","metadata":{"id":"FNjFURfYmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 2: Feature Matching (ALIKED + LightGlue)\n# ============================================================================\n\ndef match_pairs_lightglue(image_paths, pairs, features, output_dir):\n \"\"\"\n Match image pairs using LightGlue\n \"\"\"\n print(\"\\n=== Matching with LightGlue ===\")\n\n os.makedirs(output_dir, exist_ok=True)\n keypoints_path = os.path.join(output_dir, 'keypoints.h5')\n matches_path = os.path.join(output_dir, 'matches.h5')\n\n if os.path.exists(keypoints_path):\n os.remove(keypoints_path)\n if os.path.exists(matches_path):\n os.remove(matches_path)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n extractor = ALIKED(max_num_keypoints=4096, detection_threshold=0.2, nms_radius=2).eval().to(device)\n matcher = LightGlue(features='aliked').eval().to(device)\n\n if isinstance(features, dict):\n all_keypoints = features['keypoints']\n all_descriptors = features['descriptors']\n elif isinstance(features, list):\n all_keypoints = [f['keypoints'] for f in features]\n all_descriptors = [f['descriptors'] for f in features]\n else:\n raise ValueError(f\"Unsupported features type: {type(features)}\")\n\n with h5py.File(keypoints_path, 'w') as f_kp:\n for idx, img_path in enumerate(tqdm(image_paths, desc=\"Saving keypoints\")):\n img_name = os.path.splitext(os.path.basename(img_path))[0]\n\n kp = all_keypoints[idx]\n if torch.is_tensor(kp):\n kp = kp.cpu().numpy()\n f_kp.create_dataset(img_name, data=kp)\n\n # Match pairs\n with h5py.File(matches_path, 'w') as f_match:\n for idx1, idx2 in tqdm(pairs, desc=\"Matching\"):\n with torch.no_grad():\n kp0 = all_keypoints[idx1]\n kp1 = all_keypoints[idx2]\n desc0 = all_descriptors[idx1]\n desc1 = all_descriptors[idx2]\n\n if isinstance(kp0, np.ndarray):\n kp0 = torch.from_numpy(kp0).float().to(device)\n kp1 = torch.from_numpy(kp1).float().to(device)\n desc0 = torch.from_numpy(desc0).float().to(device)\n desc1 = torch.from_numpy(desc1).float().to(device)\n else:\n kp0 = kp0.float().to(device)\n kp1 = kp1.float().to(device)\n desc0 = desc0.float().to(device)\n desc1 = desc1.float().to(device)\n\n feats0 = {\n 'keypoints': kp0.unsqueeze(0) if kp0.dim() == 2 else kp0,\n 'descriptors': desc0.unsqueeze(0) if desc0.dim() == 2 else desc0,\n }\n feats1 = {\n 'keypoints': kp1.unsqueeze(0) if kp1.dim() == 2 else kp1,\n 'descriptors': desc1.unsqueeze(0) if desc1.dim() == 2 else desc1,\n }\n\n matches01 = matcher({'image0': feats0, 'image1': feats1})\n\n if 'matches0' in matches01:\n matches0 = matches01['matches0'].cpu().numpy()\n if matches0.ndim > 1:\n matches0 = matches0[0]\n valid = matches0 > -1\n matches = np.stack([np.where(valid)[0], matches0[valid]], axis=1)\n elif 'matches' in matches01:\n m = matches01['matches']\n if isinstance(m, list):\n matches = np.array(m)\n elif hasattr(m, 'cpu'):\n matches = m.cpu().numpy()\n else:\n matches = np.array(m)\n else:\n continue\n\n if len(matches) > 0:\n img_name1 = os.path.splitext(os.path.basename(image_paths[idx1]))[0]\n img_name2 = os.path.splitext(os.path.basename(image_paths[idx2]))[0]\n pair_key = f\"{img_name1}_{img_name2}\"\n f_match.create_dataset(pair_key, data=matches)\n\n print(f\"β Matches saved to {matches_path}\")\n","metadata":{"id":"X-PKgmdwmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 3: Import to COLMAP\n# ============================================================================\n\ndef import_to_colmap(image_dir, feature_dir, database_path, single_camera=True):\n \"\"\"\n Import features and matches to COLMAP database\n\n Args:\n image_dir (str): Directory containing the images.\n feature_dir (str): Directory to save/load extracted features.\n database_path (str): Path to the database file.\n single_camera (bool): Set to True if all images have the same dimensions (e.g., pre-resized).\n \"\"\"\n print(\"\\n=== Creating COLMAP Database ===\")\n\n if os.path.exists(database_path):\n os.remove(database_path)\n print(f\"β Removed existing database\")\n\n db = COLMAPDatabase.connect(database_path)\n db.create_tables()\n\n print(f\"Single camera mode: {single_camera}\")\n\n image_files = [f for f in os.listdir(image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))]\n if not image_files:\n raise ValueError(f\"No images found in {image_dir}\")\n\n first_image = sorted(image_files)[0]\n img_ext = os.path.splitext(first_image)[1]\n print(f\"Detected image extension: '{img_ext}'\")\n\n fname_to_id = add_keypoints(\n db,\n feature_dir,\n image_dir,\n img_ext,\n 'PINHOLE',\n single_camera=single_camera\n )\n\n add_matches(db, feature_dir, fname_to_id)\n db.commit()\n db.close()\n\n print(f\"β Database created: {database_path}\")\n\n# ============================================================================\n# Step 4: Run COLMAP Mapper\n# ============================================================================\n\ndef run_colmap_mapper(database_path, image_dir, output_dir):\n \"\"\"\n Run COLMAP mapper with verbose output\n \"\"\"\n print(\"\\n=== Running COLMAP Reconstruction ===\")\n os.makedirs(output_dir, exist_ok=True)\n cmd = [\n 'colmap', 'mapper',\n '--database_path', database_path,\n '--image_path', image_dir,\n '--output_path', output_dir,\n '--Mapper.ba_refine_focal_length', '0',\n '--Mapper.ba_refine_principal_point', '0',\n '--Mapper.ba_refine_extra_params', '0',\n '--Mapper.min_num_matches', '15',\n '--Mapper.init_min_num_inliers', '50',\n '--Mapper.max_num_models', '1',\n '--Mapper.num_threads', '16',\n ]\n print(f\"Command: {' '.join(cmd)}\\n\")\n\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True)\n for line in process.stdout:\n print(line, end='')\n process.wait()\n if process.returncode == 0:\n model_dir = os.path.join(output_dir, '0')\n if os.path.exists(model_dir):\n print(f\"\\nβ COLMAP reconstruction complete: {model_dir}\")\n return model_dir\n raise RuntimeError(\"COLMAP reconstruction failed\")","metadata":{"id":"NJedFruCmVcL"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"# ============================================================================\n# Step 5: Convert to Gaussian Splatting Format (if needed)\n# ============================================================================\n\ndef convert_to_gs_format(colmap_model_dir, output_dir):\n \"\"\"\n Verify COLMAP output and prepare paths for Gaussian Splatting.\n\n Args:\n colmap_model_dir (str): Path to the COLMAP sparse/0 directory.\n Example: output/colmap/sparse/0\n output_dir (str): Base output directory.\n\n Returns:\n colmap_parent_dir (str): The path to be passed to Gaussian Splatting.\n Example: output/colmap (Parent directory containing 'sparse/')\n \"\"\"\n print(\"\\n=== Verifying COLMAP Model for Gaussian Splatting ===\")\n\n import pycolmap\n reconstruction = pycolmap.Reconstruction(colmap_model_dir)\n\n print(f\"Registered images: {len(reconstruction.images)}\")\n print(f\"3D points: {len(reconstruction.points3D)}\")\n\n # Check for files required by Gaussian Splatting\n required_files = ['cameras.bin', 'images.bin', 'points3D.bin']\n for file in required_files:\n file_path = os.path.join(colmap_model_dir, file)\n if not os.path.exists(file_path):\n raise FileNotFoundError(f\"Required file not found: {file}\")\n print(f\" β {file}\")\n\n # Return the grandparent directory of sparse/0\n # output/colmap/sparse/0 -> output/colmap\n colmap_parent_dir = os.path.dirname(os.path.dirname(colmap_model_dir))\n\n print(f\"\\nβ COLMAP model ready for Gaussian Splatting\")\n print(f\" Source path: {colmap_parent_dir}\")\n\n return colmap_parent_dir","metadata":{"id":"4IioqnC1mVcM"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def train_gaussian_splatting(colmap_dir, image_dir, output_dir, iterations=30000):\n \"\"\"\n Train a Gaussian Splatting model.\n\n Args:\n colmap_dir (str): COLMAP parent directory (the directory containing 'sparse/').\n Example: output/colmap\n image_dir (str): Directory containing training images.\n Example: output/processed_images\n output_dir (str): Base directory for Gaussian Splatting output.\n iterations (int): Total number of training iterations.\n\n Returns:\n gs_output_dir (str): Path to the generated Gaussian Splatting output.\n \"\"\"\n print(\"\\n=== Training Gaussian Splatting ===\")\n\n gs_output_dir = os.path.join(output_dir, 'gs_output')\n os.makedirs(gs_output_dir, exist_ok=True)\n\n # Verify the Gaussian Splatting directory structure\n sparse_dir = os.path.join(colmap_dir, 'sparse', '0')\n if not os.path.exists(sparse_dir):\n raise FileNotFoundError(f\"COLMAP sparse directory not found: {sparse_dir}\")\n\n print(f\"COLMAP sparse model: {sparse_dir}\")\n print(f\"Training images: {image_dir}\")\n print(f\"Output: {gs_output_dir}\")\n print(f\"Iterations: {iterations}\")\n\n # Gaussian Splatting command\n cmd = [\n 'python', 'train.py',\n '-s', colmap_dir, # Source directory (must contain 'sparse/')\n '--images', image_dir, # Explicitly specify the images directory\n '-m', gs_output_dir, # Model output directory\n '--iterations', str(iterations),\n '--test_iterations', str(iterations//2), str(iterations),\n '--save_iterations', str(iterations//2), str(iterations),\n ]\n\n print(f\"\\nCommand: {' '.join(cmd)}\\n\")\n\n result = subprocess.run(cmd, capture_output=True, text=True)\n\n print(result.stdout)\n if result.stderr:\n print(\"STDERR:\", result.stderr)\n\n if result.returncode != 0:\n raise RuntimeError(\"Gaussian Splatting training failed\")\n\n # Check for the existence of the generated PLY file\n ply_path = os.path.join(gs_output_dir, 'point_cloud', f'iteration_{iterations}', 'point_cloud.ply')\n if os.path.exists(ply_path):\n size_mb = os.path.getsize(ply_path) / (1024 * 1024)\n print(f\"\\nβ Training complete!\")\n print(f\" PLY file: {ply_path}\")\n print(f\" Size: {size_mb:.2f} MB\")\n else:\n print(f\"β οΈ Warning: PLY file not found at the expected location\")\n\n return gs_output_dir","metadata":{"id":"EiHoRSfzQ01b"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"def main_pipeline(image_dir, output_dir, square_size=1024):\n \"\"\"\n Complete pipeline: Images β Square Processing β COLMAP β Gaussian Splatting\n \"\"\"\n print(\"=\"*70)\n print(\"Gaussian Splatting Preparation Pipeline\")\n print(\"=\"*70)\n\n # Step 0: Standardize images to square format\n #processed_dir = os.path.join(output_dir, 'processed_images')\n #processed_image_dir = normalize_image_sizes_biplet(image_dir, processed_dir, size=square_size)\n \n processed_image_dir = os.path.join(output_dir, \"processed_images\")\n \n normalize_image_sizes_biplet(\n input_dir=image_dir,\n output_dir=processed_image_dir,\n size=square_size\n)\n\n # Setup paths\n feature_dir = os.path.join(output_dir, 'features')\n colmap_dir = os.path.join(output_dir, 'colmap')\n database_path = os.path.join(colmap_dir, 'database.db')\n sparse_dir = os.path.join(colmap_dir, 'sparse')\n\n os.makedirs(output_dir, exist_ok=True)\n os.makedirs(colmap_dir, exist_ok=True)\n\n # Get image paths\n image_paths = sorted([\n os.path.join(processed_image_dir, f)\n for f in os.listdir(processed_image_dir)\n if f.lower().endswith(('.jpg', '.jpeg', '.png'))\n ])\n\n print(f\"\\nπΈ Found {len(image_paths)} images\")\n\n # Step 1: Generate image pairs\n pairs, features = get_image_pairs(image_paths)\n\n # Step 2: Feature matching with LightGlue\n match_pairs_lightglue(image_paths, pairs, features, feature_dir)\n\n # Step 3: Import data into COLMAP\n # (single_camera=True assumes uniform image dimensions)\n import_to_colmap(processed_image_dir, feature_dir, database_path, single_camera=True)\n\n # Step 4: Run COLMAP Sparse Reconstruction\n model_dir = run_colmap_mapper(database_path, processed_image_dir, sparse_dir)\n\n # Step 5: \n colmap_parent = convert_to_gs_format(model_dir, output_dir)\n \n # Step 6: Train Gaussian Splatting model\n gs_output = train_gaussian_splatting(\n colmap_dir=colmap_parent, \n image_dir=processed_image_dir, \n output_dir=output_dir,\n iterations=2000\n )\n\n print(\"\\n\" + \"=\"*70)\n print(\"β
Full Pipeline Successfully Completed!\")\n print(\"=\"*70)\n print(f\"\\nGaussian Splatting model saved at: {gs_output}\")\n\n return gs_output\n\n\n# Example usage\nif __name__ == \"__main__\":\n # Example: Tourist photos with varying resolutions/aspect ratios\n IMAGE_DIR = \"/kaggle/input/image-matching-challenge-2022/train/british_museum/images\"\n OUTPUT_DIR = \"/kaggle/working/output\"\n\n gs_output = main_pipeline(IMAGE_DIR, OUTPUT_DIR, square_size=700)","metadata":{"id":"5-_UvgTtRiC_"},"outputs":[],"execution_count":null},{"cell_type":"code","source":"print('Congratulation! Successsfully Completed!')","metadata":{"id":"8jhKKtTqwv7O"},"outputs":[],"execution_count":null},{"cell_type":"markdown","source":"### **3D Gaussian Splat Viewer**\n\nhttps://splat-three.vercel.app/?url=british_museum_photo.splat","metadata":{}},{"cell_type":"code","source":"","metadata":{"id":"VQsLeKY8Rl8Y"},"outputs":[],"execution_count":null}]}