File size: 5,983 Bytes
da3ed5b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | import os
import sys
from typing import *
import math
import click
import torch
import torch.nn.functional as F
from moge.test.baseline import MGEBaselineInterface
class Baseline(MGEBaselineInterface):
def __init__(self, repo_path: str, config_path: str, checkpoint_path: str,
image_size: int, num_steps: int, use_fp16: bool, device: str = 'cuda:0'):
super().__init__()
repo_path = os.path.abspath(repo_path)
src_path = os.path.join(repo_path, 'src')
if src_path not in sys.path:
sys.path.insert(0, src_path)
from omegaconf import OmegaConf
from stage2.transport import create_transport, Sampler
from utils.model_utils import instantiate_from_config
# Load config
full_cfg = OmegaConf.load(config_path)
rae_config = full_cfg.get("stage_1", None)
model_config = full_cfg.get("stage_2", None)
transport_config = full_cfg.get("transport", {})
sampler_config = full_cfg.get("sampler", {})
misc_config = full_cfg.get("misc", {})
transport_cfg = OmegaConf.to_container(transport_config, resolve=True) if transport_config else {}
sampler_cfg = OmegaConf.to_container(sampler_config, resolve=True) if sampler_config else {}
misc = OmegaConf.to_container(misc_config, resolve=True) if misc_config else {}
latent_size = tuple(int(dim) for dim in misc.get("latent_size", (768, 32, 32)))
shift_dim = misc.get("time_dist_shift_dim", math.prod(latent_size))
shift_base = misc.get("time_dist_shift_base", 4096)
time_dist_shift = math.sqrt(shift_dim / shift_base)
# Load RAE (DepthRAE)
rae = instantiate_from_config(rae_config).to(device)
rae.eval()
# Load Stage-2 model
model = instantiate_from_config(model_config).to(device)
# Load checkpoint
checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False)
if 'ema' in checkpoint:
state_dict = checkpoint['ema']
elif 'model' in checkpoint:
state_dict = checkpoint['model']
else:
state_dict = checkpoint
model.load_state_dict(state_dict)
model.eval()
# Create transport sampler
transport_params = dict(transport_cfg.get("params", {}))
transport_params.pop("time_dist_shift", None)
transport = create_transport(**transport_params, time_dist_shift=time_dist_shift)
transport_sampler = Sampler(transport)
sampler_mode = sampler_cfg.get("mode", "ODE").upper()
sampler_params = dict(sampler_cfg.get("params", {}))
sampler_params['num_steps'] = num_steps
if sampler_mode == "ODE":
eval_sampler = transport_sampler.sample_ode(**sampler_params)
else:
eval_sampler = transport_sampler.sample_sde(**sampler_params)
self.rae = rae
self.model = model
self.eval_sampler = eval_sampler
self.latent_size = latent_size
self.image_size = image_size
self.device = torch.device(device)
self.use_fp16 = use_fp16
@click.command()
@click.option('--repo', 'repo_path', type=str, default='/home/ywan0794/RAE')
@click.option('--rae_config', 'config_path', type=str, required=True)
@click.option('--checkpoint', 'checkpoint_path', type=str, required=True)
@click.option('--image_size', type=int, default=512)
@click.option('--num_steps', type=int, default=2)
@click.option('--fp16', 'use_fp16', is_flag=True)
@click.option('--device', type=str, default='cuda:0')
@staticmethod
def load(repo_path, config_path, checkpoint_path, image_size, num_steps, use_fp16, device):
return Baseline(repo_path, config_path, checkpoint_path, image_size, num_steps, use_fp16, device)
def _predict_depth(self, image: torch.FloatTensor):
original_height, original_width = image.shape[-2:]
if image.ndim == 3:
image = image.unsqueeze(0)
omit_batch_dim = True
else:
omit_batch_dim = False
b = image.shape[0]
# Resize to model input size
image_resized = F.interpolate(
image, size=(self.image_size, self.image_size),
mode='bilinear', align_corners=False, antialias=True,
)
# Encode RGB
z_rgb = self.rae.encode(image_resized)
# Sample depth from noise
z_noise = torch.randn(b, *self.latent_size, device=self.device)
y = torch.zeros(b, dtype=torch.long, device=self.device)
# Marigold-style: concat z_rgb with xt before passing to model
def model_fn(xt, t, y):
x_input = torch.cat([xt, z_rgb], dim=1)
return self.model(x_input, t, y)
# Run diffusion sampling
z_pred = self.eval_sampler(z_noise, model_fn, y=y)[-1]
# Decode to depth (pass z_rgb for conditioning)
depth_pred = self.rae.decode(z_pred.float(), z_rgb)
depth_pred = depth_pred.mean(dim=1) # (B, H, W)
# Resize back to original size
depth_pred = F.interpolate(
depth_pred.unsqueeze(1), size=(original_height, original_width),
mode='bilinear', align_corners=False,
)[:, 0]
if omit_batch_dim:
depth_pred = depth_pred.squeeze(0)
return depth_pred
@torch.inference_mode()
def infer(self, image: torch.FloatTensor, intrinsics: Optional[torch.FloatTensor] = None):
depth_pred = self._predict_depth(image)
return {
'depth_affine_invariant': depth_pred,
}
@torch.inference_mode()
def infer_for_evaluation(self, image: torch.FloatTensor, intrinsics: torch.FloatTensor = None):
with torch.cuda.amp.autocast(enabled=self.use_fp16, dtype=torch.float16):
depth_pred = self._predict_depth(image)
return {
'depth_affine_invariant': depth_pred,
}
|