MoGe / baselines /vggt_custom.py
zeyuren2002's picture
Add files using upload-large-folder tool
da3ed5b verified
# VGGT with custom trained DPT/SDT checkpoint (LoRA)
import os
import sys
from typing import *
from pathlib import Path
import click
import torch
import torch.nn.functional as F
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from moge.test.baseline import MGEBaselineInterface
class Baseline(MGEBaselineInterface):
def __init__(self, repo_path: str, checkpoint: str, decoder: str, lora_rank: int, lora_alpha: int, num_tokens: int, device: Union[torch.device, str]):
# Create from repo
repo_path = os.path.abspath(repo_path)
training_path = os.path.join(repo_path, 'training')
if training_path not in sys.path:
sys.path.insert(0, training_path)
if repo_path not in sys.path:
sys.path.insert(0, repo_path)
if not Path(repo_path).exists():
raise FileNotFoundError(f'Cannot find the VGGT repository at {repo_path}.')
device = torch.device(device)
# Build model based on decoder type
if decoder == 'dpt':
from vggt.models.vggt import VGGT
model = VGGT(
enable_camera=True,
enable_depth=True,
enable_point=False,
enable_track=False,
)
elif decoder == 'sdt':
from vggt.models.vggt_sdt import VGGT_SDT
model = VGGT_SDT(
enable_camera=True,
enable_depth=True,
enable_point=False,
enable_track=False,
)
else:
raise ValueError(f"Unknown decoder: {decoder}")
# Apply LoRA
from lora import apply_lora
model = apply_lora(model, rank=lora_rank, alpha=lora_alpha)
print(f"Applied LoRA (rank={lora_rank}, alpha={lora_alpha})")
# Load checkpoint
if not os.path.exists(checkpoint):
raise FileNotFoundError(f'Cannot find checkpoint at {checkpoint}')
ckpt = torch.load(checkpoint, map_location='cpu')
if 'model' in ckpt:
state_dict = ckpt['model']
else:
state_dict = ckpt
# Remove 'module.' prefix if present
state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}
missing, unexpected = model.load_state_dict(state_dict, strict=False)
print(f"Loaded checkpoint from {checkpoint}")
if missing:
print(f"Missing keys: {len(missing)}")
if unexpected:
print(f"Unexpected keys: {len(unexpected)}")
model.to(device).eval()
self.model = model
self.num_tokens = num_tokens
self.device = device
@click.command()
@click.option('--repo', 'repo_path', type=click.Path(), default='/home/ywan0794/vggt', help='Path to the VGGT repository.')
@click.option('--checkpoint', type=click.Path(), required=True, help='Path to trained checkpoint.')
@click.option('--decoder', type=click.Choice(['dpt', 'sdt']), default='dpt', help='Decoder type.')
@click.option('--lora_rank', type=int, default=8, help='LoRA rank.')
@click.option('--lora_alpha', type=int, default=16, help='LoRA alpha.')
@click.option('--num_tokens', type=int, default=None, help='Number of tokens to use for the input image.')
@click.option('--device', type=str, default='cuda', help='Device to use for inference.')
@staticmethod
def load(repo_path: str, checkpoint: str, decoder: str, lora_rank: int, lora_alpha: int, num_tokens: int, device: torch.device = 'cuda'):
return Baseline(repo_path, checkpoint, decoder, lora_rank, lora_alpha, num_tokens, device)
@torch.inference_mode()
def infer(self, image: torch.Tensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
original_height, original_width = image.shape[-2:]
if image.ndim == 3:
image = image.unsqueeze(0)
omit_batch_dim = True
else:
omit_batch_dim = False
if self.num_tokens is None:
resize_factor = 518 / min(original_height, original_width)
expected_width = round(original_width * resize_factor / 14) * 14
expected_height = round(original_height * resize_factor / 14) * 14
else:
aspect_ratio = original_width / original_height
tokens_rows = round((self.num_tokens * aspect_ratio) ** 0.5)
tokens_cols = round((self.num_tokens / aspect_ratio) ** 0.5)
expected_width = tokens_cols * 14
expected_height = tokens_rows * 14
image = TF.resize(image, (expected_height, expected_width), interpolation=T.InterpolationMode.BICUBIC, antialias=True)
# VGGT expects [0, 1] range, not ImageNet normalized
image = image.to(self.device)
# VGGT expects sequence of images: [B, S, 3, H, W]
rgb_seq = image.unsqueeze(1).repeat(1, 2, 1, 1, 1)
# Forward pass
with torch.cuda.amp.autocast(dtype=torch.bfloat16):
output = self.model(images=rgb_seq)
# Extract depth from prediction
# pred["depth"] shape: [B, S, H, W, 1]
depth = output["depth"][0, 0, :, :, 0]
# Convert depth to disparity
disparity = 1.0 / (depth + 1e-6)
disparity = F.interpolate(disparity[None, None], size=(original_height, original_width), mode='bilinear', align_corners=False, antialias=False)[0, 0]
if omit_batch_dim:
pass # already squeezed
return {
'disparity_affine_invariant': disparity
}