# DA3 with custom trained DPT/DualDPT/SDT checkpoint import os import sys from typing import * from pathlib import Path import click import torch import torch.nn.functional as F import torchvision.transforms as T import torchvision.transforms.functional as TF from moge.test.baseline import MGEBaselineInterface # DA3 Wrapper (same as training) class DA3Wrapper(torch.nn.Module): def __init__(self, model): super().__init__() self.model = model def forward(self, x): # x: [B, 3, H, W] # DA3 expects [B, N, 3, H, W] where N is number of views x = x.unsqueeze(1) # [B, 1, 3, H, W] output = self.model(x) # output.depth shape: [B, 1, H, W] depth = output.depth.squeeze(1) # [B, H, W] return depth class Baseline(MGEBaselineInterface): def __init__(self, repo_path: str, checkpoint: str, decoder: str, num_tokens: int, device: Union[torch.device, str]): # Create from repo repo_path = os.path.abspath(repo_path) src_path = os.path.join(repo_path, 'src') training_path = os.path.join(repo_path, 'training') # Add src path for depth_anything_3 if src_path not in sys.path: sys.path.insert(0, src_path) if training_path not in sys.path: sys.path.insert(0, training_path) if not Path(repo_path).exists(): raise FileNotFoundError(f'Cannot find the Depth-Anything-3 repository at {repo_path}.') device = torch.device(device) # Config paths config_dir = os.path.join(repo_path, 'src', 'depth_anything_3', 'configs') if decoder == 'dpt': config_path = os.path.join(config_dir, 'da3dpt-large.yaml') elif decoder == 'dualdpt': config_path = os.path.join(config_dir, 'da3dualdpt-large.yaml') elif decoder == 'sdt': config_path = os.path.join(config_dir, 'da3sdt-large.yaml') else: raise ValueError(f"Unknown decoder: {decoder}") from depth_anything_3.cfg import load_config, create_object # Build model cfg = load_config(config_path) base_model = create_object(cfg) model = DA3Wrapper(base_model) # Load checkpoint if not os.path.exists(checkpoint): raise FileNotFoundError(f'Cannot find checkpoint at {checkpoint}') ckpt = torch.load(checkpoint, map_location='cpu') if 'model' in ckpt: state_dict = ckpt['model'] else: state_dict = ckpt # Remove 'module.' prefix if present state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()} missing, unexpected = model.load_state_dict(state_dict, strict=False) print(f"Loaded checkpoint from {checkpoint}") if missing: print(f"Missing keys: {len(missing)}") if unexpected: print(f"Unexpected keys: {len(unexpected)}") model.to(device).eval() self.model = model self.num_tokens = num_tokens self.device = device @click.command() @click.option('--repo', 'repo_path', type=click.Path(), default='/home/ywan0794/Depth-Anything-3', help='Path to the Depth-Anything-3 repository.') @click.option('--checkpoint', type=click.Path(), required=True, help='Path to trained checkpoint.') @click.option('--decoder', type=click.Choice(['dpt', 'dualdpt', 'sdt']), default='dpt', help='Decoder type.') @click.option('--num_tokens', type=int, default=None, help='Number of tokens to use for the input image.') @click.option('--device', type=str, default='cuda', help='Device to use for inference.') @staticmethod def load(repo_path: str, checkpoint: str, decoder: str, num_tokens: int, device: torch.device = 'cuda'): return Baseline(repo_path, checkpoint, decoder, num_tokens, device) @torch.inference_mode() def infer(self, image: torch.Tensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]: original_height, original_width = image.shape[-2:] if image.ndim == 3: image = image.unsqueeze(0) omit_batch_dim = True else: omit_batch_dim = False if self.num_tokens is None: resize_factor = 518 / min(original_height, original_width) expected_width = round(original_width * resize_factor / 14) * 14 expected_height = round(original_height * resize_factor / 14) * 14 else: aspect_ratio = original_width / original_height tokens_rows = round((self.num_tokens * aspect_ratio) ** 0.5) tokens_cols = round((self.num_tokens / aspect_ratio) ** 0.5) expected_width = tokens_cols * 14 expected_height = tokens_rows * 14 image = TF.resize(image, (expected_height, expected_width), interpolation=T.InterpolationMode.BICUBIC, antialias=True) image = TF.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) image = image.to(self.device) # DA3 model forward - outputs normalized disparity (NOT depth!) with torch.cuda.amp.autocast(dtype=torch.bfloat16): disparity = self.model(image) disparity = F.interpolate(disparity[:, None], size=(original_height, original_width), mode='bilinear', align_corners=False, antialias=False)[:, 0] if omit_batch_dim: disparity = disparity.squeeze(0) return { 'disparity_affine_invariant': disparity }