File size: 5,496 Bytes
da3ed5b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
# DAv2 with custom trained DPT/SDT checkpoint
import os
import sys
from typing import *
from pathlib import Path

import click
import torch
import torch.nn.functional as F
import torchvision.transforms as T
import torchvision.transforms.functional as TF

from moge.test.baseline import MGEBaselineInterface


class Baseline(MGEBaselineInterface):
    def __init__(self, repo_path: str, checkpoint: str, encoder: str, decoder: str, num_tokens: int, device: Union[torch.device, str]):
        # Create from repo
        repo_path = os.path.abspath(repo_path)
        training_path = os.path.join(repo_path, 'training')
        # Add both repo root (for depth_anything_v2) and training (for sdt)
        if repo_path not in sys.path:
            sys.path.insert(0, repo_path)
        if training_path not in sys.path:
            sys.path.insert(0, training_path)
        if not Path(repo_path).exists():
            raise FileNotFoundError(f'Cannot find the Depth-Anything-V2 repository at {repo_path}.')

        device = torch.device(device)

        # Model configurations (same as training)
        model_configs = {
            'vits': {'encoder': 'vits', 'features': 64, 'out_channels': [48, 96, 192, 384]},
            'vitb': {'encoder': 'vitb', 'features': 128, 'out_channels': [96, 192, 384, 768]},
            'vitl': {'encoder': 'vitl', 'features': 256, 'out_channels': [256, 512, 1024, 1024]},
            'vitg': {'encoder': 'vitg', 'features': 384, 'out_channels': [1536, 1536, 1536, 1536]}
        }

        # Build model based on decoder type
        if decoder == 'dpt':
            from depth_anything_v2.dpt import DepthAnythingV2
            model = DepthAnythingV2(**model_configs[encoder])
        elif decoder == 'sdt':
            from depth_anything_v2.sdt import DepthAnythingV2SDT
            model = DepthAnythingV2SDT(
                encoder=encoder,
                features=model_configs[encoder]['features'],
                out_channels=model_configs[encoder]['out_channels'],
                use_clstoken=True,
                upsampler='dysample'
            )
        else:
            raise ValueError(f"Unknown decoder: {decoder}")

        # Load checkpoint
        if not os.path.exists(checkpoint):
            raise FileNotFoundError(f'Cannot find checkpoint at {checkpoint}')

        ckpt = torch.load(checkpoint, map_location='cpu')
        if 'model' in ckpt:
            state_dict = ckpt['model']
        else:
            state_dict = ckpt

        # Remove 'module.' prefix if present
        state_dict = {k.replace('module.', ''): v for k, v in state_dict.items()}

        missing, unexpected = model.load_state_dict(state_dict, strict=False)
        print(f"Loaded checkpoint from {checkpoint}")
        if missing:
            print(f"Missing keys: {len(missing)}")
        if unexpected:
            print(f"Unexpected keys: {len(unexpected)}")

        model.to(device).eval()
        self.model = model
        self.num_tokens = num_tokens
        self.device = device

    @click.command()
    @click.option('--repo', 'repo_path', type=click.Path(), default='/home/ywan0794/Depth-Anything-V2', help='Path to the Depth-Anything-V2 repository.')
    @click.option('--checkpoint', type=click.Path(), required=True, help='Path to trained checkpoint.')
    @click.option('--encoder', type=click.Choice(['vits', 'vitb', 'vitl']), default='vitb', help='Encoder architecture.')
    @click.option('--decoder', type=click.Choice(['dpt', 'sdt']), default='dpt', help='Decoder type.')
    @click.option('--num_tokens', type=int, default=None, help='Number of tokens to use for the input image.')
    @click.option('--device', type=str, default='cuda', help='Device to use for inference.')
    @staticmethod
    def load(repo_path: str, checkpoint: str, encoder: str, decoder: str, num_tokens: int, device: torch.device = 'cuda'):
        return Baseline(repo_path, checkpoint, encoder, decoder, num_tokens, device)

    @torch.inference_mode()
    def infer(self, image: torch.Tensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
        original_height, original_width = image.shape[-2:]

        if image.ndim == 3:
            image = image.unsqueeze(0)
            omit_batch_dim = True
        else:
            omit_batch_dim = False

        if self.num_tokens is None:
            resize_factor = 518 / min(original_height, original_width)
            expected_width = round(original_width * resize_factor / 14) * 14
            expected_height = round(original_height * resize_factor / 14) * 14
        else:
            aspect_ratio = original_width / original_height
            tokens_rows = round((self.num_tokens * aspect_ratio) ** 0.5)
            tokens_cols = round((self.num_tokens / aspect_ratio) ** 0.5)
            expected_width = tokens_cols * 14
            expected_height = tokens_rows * 14

        image = TF.resize(image, (expected_height, expected_width), interpolation=T.InterpolationMode.BICUBIC, antialias=True)
        image = TF.normalize(image, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        image = image.to(self.device)

        disparity = self.model(image)

        disparity = F.interpolate(disparity[:, None], size=(original_height, original_width), mode='bilinear', align_corners=False, antialias=False)[:, 0]

        if omit_batch_dim:
            disparity = disparity.squeeze(0)

        return {
            'disparity_affine_invariant': disparity
        }