| |
| import os |
| import sys |
| from typing import * |
| from pathlib import Path |
|
|
| import click |
| import torch |
| import torch.nn.functional as F |
| import torchvision.transforms as T |
| import torchvision.transforms.functional as TF |
|
|
| from moge.test.baseline import MGEBaselineInterface |
|
|
|
|
| class Baseline(MGEBaselineInterface): |
| def __init__(self, repo_path: str, checkpoint: str, encoder: str, decoder: str, num_tokens: int, device: Union[torch.device, str]): |
| |
| repo_path = os.path.abspath(repo_path) |
| training_path = os.path.join(repo_path, 'training') |
| |
| if repo_path not in sys.path: |
| sys.path.insert(0, repo_path) |
| if training_path not in sys.path: |
| sys.path.insert(0, training_path) |
| if not Path(repo_path).exists(): |
| raise FileNotFoundError(f'Cannot find the Depth-Anything-V2 repository at {repo_path}.') |
|
|
| device = torch.device(device) |
|
|
| |
| model_configs = { |
| 'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]}, |
| 'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]}, |
| 'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]}, |
| 'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]} |
| } |
|
|
| |
| if decoder == 'dpt': |
| from depth_anything_v2.dpt import DepthAnythingV2 |
| model = DepthAnythingV2(**model_configs[encoder]) |
| elif decoder == 'sdt': |
| from depth_anything_v2.sdt import DepthAnythingV2SDT |
| model = DepthAnythingV2SDT( |
| encoder=encoder, |
| features=model_configs[encoder]['features'], |
| out_channels=model_configs[encoder]['out_channels'], |
| use_clstoken=True, |
| upsampler='dysample' |
| ) |
| else: |
| raise ValueError(f"Unknown decoder: {decoder}") |
|
|
| |
| if not os.path.exists(checkpoint): |
| raise FileNotFoundError(f'Cannot find checkpoint at {checkpoint}') |
|
|
| ckpt = torch.load(checkpoint, map_location='cpu') |
| if 'model' in ckpt: |
| state_dict = ckpt['model'] |
| else: |
| state_dict = ckpt |
|
|
| |
| state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()} |
|
|
| missing, unexpected = model.load_state_dict(state_dict, strict=False) |
| print(f"Loaded checkpoint from {checkpoint}") |
| if missing: |
| print(f"Missing keys: {len(missing)}") |
| if unexpected: |
| print(f"Unexpected keys: {len(unexpected)}") |
|
|
| model.to(device).eval() |
| self.model = model |
| self.num_tokens = num_tokens |
| self.device = device |
|
|
| @click.command() |
| @click.option('--repo', 'repo_path', type=click.Path(), default='/home/ywan0794/Depth-Anything-V2', help='Path to the Depth-Anything-V2 repository.') |
| @click.option('--checkpoint', type=click.Path(), required=True, help='Path to trained checkpoint.') |
| @click.option('--encoder', type=click.Choice(['vits', 'vitb', 'vitl']), default='vitb', help='Encoder architecture.') |
| @click.option('--decoder', type=click.Choice(['dpt', 'sdt']), default='dpt', help='Decoder type.') |
| @click.option('--num_tokens', type=int, default=None, help='Number of tokens to use for the input image.') |
| @click.option('--device', type=str, default='cuda', help='Device to use for inference.') |
| @staticmethod |
| def load(repo_path: str, checkpoint: str, encoder: str, decoder: str, num_tokens: int, device: torch.device = 'cuda'): |
| return Baseline(repo_path, checkpoint, encoder, decoder, num_tokens, device) |
|
|
| @torch.inference_mode() |
| def infer(self, image: torch.Tensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]: |
| original_height, original_width = image.shape[-2:] |
|
|
| if image.ndim == 3: |
| image = image.unsqueeze(0) |
| omit_batch_dim = True |
| else: |
| omit_batch_dim = False |
|
|
| if self.num_tokens is None: |
| resize_factor = 518 / min(original_height, original_width) |
| expected_width = round(original_width * resize_factor / 14) * 14 |
| expected_height = round(original_height * resize_factor / 14) * 14 |
| else: |
| aspect_ratio = original_width / original_height |
| tokens_rows = round((self.num_tokens * aspect_ratio) ** 0.5) |
| tokens_cols = round((self.num_tokens / aspect_ratio) ** 0.5) |
| expected_width = tokens_cols * 14 |
| expected_height = tokens_rows * 14 |
|
|
| image = TF.resize(image, (expected_height, expected_width), interpolation=T.InterpolationMode.BICUBIC, antialias=True) |
| image = TF.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) |
| image = image.to(self.device) |
|
|
| disparity = self.model(image) |
|
|
| disparity = F.interpolate(disparity[:, None], size=(original_height, original_width), mode='bilinear', align_corners=False, antialias=False)[:, 0] |
|
|
| if omit_batch_dim: |
| disparity = disparity.squeeze(0) |
|
|
| return { |
| 'disparity_affine_invariant': disparity |
| } |
|
|