File size: 3,846 Bytes
da3ed5b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 | # Reference: https://github.com/ByteDance-Seed/Depth-Anything-3
import os
import sys
from typing import *
from pathlib import Path
import click
import torch
import torch.nn.functional as F
import torchvision.transforms as T
import torchvision.transforms.functional as TF
from moge.test.baseline import MGEBaselineInterface
class Baseline(MGEBaselineInterface):
def __init__(self, repo_path: str, model_name: str, num_tokens: int, device: Union[torch.device, str]):
# Create from repo
repo_path = os.path.abspath(repo_path)
if repo_path not in sys.path:
sys.path.insert(0, os.path.join(repo_path, 'src'))
if not Path(repo_path).exists():
raise FileNotFoundError(f'Cannot find the Depth-Anything-3 repository at {repo_path}. Please clone the repository and provide the path to it using the --repo option.')
from depth_anything_3.api import DepthAnything3
device = torch.device(device)
# Instantiate model
model = DepthAnything3.from_pretrained(f"ByteDance-Seed/{model_name}")
model.to(device).eval()
self.model = model
self.num_tokens = num_tokens
self.device = device
@click.command()
@click.option('--repo', 'repo_path', type=click.Path(), default='../Depth-Anything-3', help='Path to the Depth-Anything-3 repository.')
@click.option('--model_name', type=click.Choice(['da3-base', 'da3-large', 'da3-giant']), default='da3-large', help='Model name.')
@click.option('--num_tokens', type=int, default=None, help='Number of tokens to use for the input image.')
@click.option('--device', type=str, default='cuda', help='Device to use for inference.')
@staticmethod
def load(repo_path: str, model_name: str, num_tokens: int, device: torch.device = 'cuda'):
return Baseline(repo_path, model_name, num_tokens, device)
@torch.inference_mode()
def infer(self, image: torch.Tensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
original_height, original_width = image.shape[-2:]
assert intrinsics is None, "Depth-Anything-3 does not support camera intrinsics input in this baseline"
if image.ndim == 3:
image = image.unsqueeze(0)
omit_batch_dim = True
else:
omit_batch_dim = False
if self.num_tokens is None:
resize_factor = 518 / min(original_height, original_width)
expected_width = round(original_width * resize_factor / 14) * 14
expected_height = round(original_height * resize_factor / 14) * 14
else:
aspect_ratio = original_width / original_height
tokens_rows = round((self.num_tokens * aspect_ratio) ** 0.5)
tokens_cols = round((self.num_tokens / aspect_ratio) ** 0.5)
expected_width = tokens_cols * 14
expected_height = tokens_rows * 14
image = TF.resize(image, (expected_height, expected_width), interpolation=T.InterpolationMode.BICUBIC, antialias=True)
image = TF.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
# DA3 expects [B, N, 3, H, W] where N is number of views
image = image.unsqueeze(1) # [B, 1, 3, H, W]
# Forward pass
output = self.model(image)
# Extract depth prediction
# Output shape: [B, N, H, W]
depth = output['depth'][:, 0] # [B, H, W]
# Convert depth to disparity (inverse depth)
disparity = 1.0 / (depth + 1e-6)
disparity = F.interpolate(disparity[:, None], size=(original_height, original_width), mode='bilinear', align_corners=False, antialias=False)[:, 0]
if omit_batch_dim:
disparity = disparity.squeeze(0)
return {
'disparity_affine_invariant': disparity
}
|