File size: 5,548 Bytes
da3ed5b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 | # DA3 with custom trained DPT/DualDPT/SDT checkpoint
import os
import sys
from typing import *
from pathlib import Path
import click
import torch
import torch.nn.functional as F
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from moge.test.baseline import MGEBaselineInterface
# DA3 Wrapper (same as training)
class DA3Wrapper(torch.nn.Module):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x):
# x: [B, 3, H, W]
# DA3 expects [B, N, 3, H, W] where N is number of views
x = x.unsqueeze(1) # [B, 1, 3, H, W]
output = self.model(x)
# output.depth shape: [B, 1, H, W]
depth = output.depth.squeeze(1) # [B, H, W]
return depth
class Baseline(MGEBaselineInterface):
def __init__(self, repo_path: str, checkpoint: str, decoder: str, num_tokens: int, device: Union[torch.device, str]):
# Create from repo
repo_path = os.path.abspath(repo_path)
src_path = os.path.join(repo_path, 'src')
training_path = os.path.join(repo_path, 'training')
# Add src path for depth_anything_3
if src_path not in sys.path:
sys.path.insert(0, src_path)
if training_path not in sys.path:
sys.path.insert(0, training_path)
if not Path(repo_path).exists():
raise FileNotFoundError(f'Cannot find the Depth-Anything-3 repository at {repo_path}.')
device = torch.device(device)
# Config paths
config_dir = os.path.join(repo_path, 'src', 'depth_anything_3', 'configs')
if decoder == 'dpt':
config_path = os.path.join(config_dir, 'da3dpt-large.yaml')
elif decoder == 'dualdpt':
config_path = os.path.join(config_dir, 'da3dualdpt-large.yaml')
elif decoder == 'sdt':
config_path = os.path.join(config_dir, 'da3sdt-large.yaml')
else:
raise ValueError(f"Unknown decoder: {decoder}")
from depth_anything_3.cfg import load_config, create_object
# Build model
cfg = load_config(config_path)
base_model = create_object(cfg)
model = DA3Wrapper(base_model)
# Load checkpoint
if not os.path.exists(checkpoint):
raise FileNotFoundError(f'Cannot find checkpoint at {checkpoint}')
ckpt = torch.load(checkpoint, map_location='cpu')
if 'model' in ckpt:
state_dict = ckpt['model']
else:
state_dict = ckpt
# Remove 'module.' prefix if present
state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
missing, unexpected = model.load_state_dict(state_dict, strict=False)
print(f"Loaded checkpoint from {checkpoint}")
if missing:
print(f"Missing keys: {len(missing)}")
if unexpected:
print(f"Unexpected keys: {len(unexpected)}")
model.to(device).eval()
self.model = model
self.num_tokens = num_tokens
self.device = device
@click.command()
@click.option('--repo', 'repo_path', type=click.Path(), default='/home/ywan0794/Depth-Anything-3', help='Path to the Depth-Anything-3 repository.')
@click.option('--checkpoint', type=click.Path(), required=True, help='Path to trained checkpoint.')
@click.option('--decoder', type=click.Choice(['dpt', 'dualdpt', 'sdt']), default='dpt', help='Decoder type.')
@click.option('--num_tokens', type=int, default=None, help='Number of tokens to use for the input image.')
@click.option('--device', type=str, default='cuda', help='Device to use for inference.')
@staticmethod
def load(repo_path: str, checkpoint: str, decoder: str, num_tokens: int, device: torch.device = 'cuda'):
return Baseline(repo_path, checkpoint, decoder, num_tokens, device)
@torch.inference_mode()
def infer(self, image: torch.Tensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
original_height, original_width = image.shape[-2:]
if image.ndim == 3:
image = image.unsqueeze(0)
omit_batch_dim = True
else:
omit_batch_dim = False
if self.num_tokens is None:
resize_factor = 518 / min(original_height, original_width)
expected_width = round(original_width * resize_factor / 14) * 14
expected_height = round(original_height * resize_factor / 14) * 14
else:
aspect_ratio = original_width / original_height
tokens_rows = round((self.num_tokens * aspect_ratio) ** 0.5)
tokens_cols = round((self.num_tokens / aspect_ratio) ** 0.5)
expected_width = tokens_cols * 14
expected_height = tokens_rows * 14
image = TF.resize(image, (expected_height, expected_width), interpolation=T.InterpolationMode.BICUBIC, antialias=True)
image = TF.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
image = image.to(self.device)
# DA3 model forward - outputs normalized disparity (NOT depth!)
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
disparity = self.model(image)
disparity = F.interpolate(disparity[:, None], size=(original_height, original_width), mode='bilinear', align_corners=False, antialias=False)[:, 0]
if omit_batch_dim:
disparity = disparity.squeeze(0)
return {
'disparity_affine_invariant': disparity
}
|