| import os |
| import sys |
| from typing import * |
| import math |
|
|
| import click |
| import torch |
| import torch.nn.functional as F |
|
|
| from moge.test.baseline import MGEBaselineInterface |
|
|
|
|
| class Baseline(MGEBaselineInterface): |
|
|
| def __init__(self, repo_path: str, config_path: str, checkpoint_path: str, |
| image_size: int, num_steps: int, use_fp16: bool, device: str = 'cuda:0'): |
| super().__init__() |
| repo_path = os.path.abspath(repo_path) |
| src_path = os.path.join(repo_path, 'src') |
| if src_path not in sys.path: |
| sys.path.insert(0, src_path) |
|
|
| from omegaconf import OmegaConf |
| from stage2.transport import create_transport, Sampler |
| from utils.model_utils import instantiate_from_config |
|
|
| |
| full_cfg = OmegaConf.load(config_path) |
| rae_config = full_cfg.get("stage_1", None) |
| model_config = full_cfg.get("stage_2", None) |
| transport_config = full_cfg.get("transport", {}) |
| sampler_config = full_cfg.get("sampler", {}) |
| misc_config = full_cfg.get("misc", {}) |
|
|
| transport_cfg = OmegaConf.to_container(transport_config, resolve=True) if transport_config else {} |
| sampler_cfg = OmegaConf.to_container(sampler_config, resolve=True) if sampler_config else {} |
| misc = OmegaConf.to_container(misc_config, resolve=True) if misc_config else {} |
|
|
| latent_size = tuple(int(dim) for dim in misc.get("latent_size", (768, 32, 32))) |
| shift_dim = misc.get("time_dist_shift_dim", math.prod(latent_size)) |
| shift_base = misc.get("time_dist_shift_base", 4096) |
| time_dist_shift = math.sqrt(shift_dim / shift_base) |
|
|
| |
| rae = instantiate_from_config(rae_config).to(device) |
| rae.eval() |
|
|
| |
| model = instantiate_from_config(model_config).to(device) |
|
|
| |
| checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=False) |
| if 'ema' in checkpoint: |
| state_dict = checkpoint['ema'] |
| elif 'model' in checkpoint: |
| state_dict = checkpoint['model'] |
| else: |
| state_dict = checkpoint |
| model.load_state_dict(state_dict) |
| model.eval() |
|
|
| |
| transport_params = dict(transport_cfg.get("params", {})) |
| transport_params.pop("time_dist_shift", None) |
| transport = create_transport(**transport_params, time_dist_shift=time_dist_shift) |
| transport_sampler = Sampler(transport) |
|
|
| sampler_mode = sampler_cfg.get("mode", "ODE").upper() |
| sampler_params = dict(sampler_cfg.get("params", {})) |
| sampler_params['num_steps'] = num_steps |
|
|
| if sampler_mode == "ODE": |
| eval_sampler = transport_sampler.sample_ode(**sampler_params) |
| else: |
| eval_sampler = transport_sampler.sample_sde(**sampler_params) |
|
|
| self.rae = rae |
| self.model = model |
| self.eval_sampler = eval_sampler |
| self.latent_size = latent_size |
| self.image_size = image_size |
| self.device = torch.device(device) |
| self.use_fp16 = use_fp16 |
|
|
| @click.command() |
| @click.option('--repo', 'repo_path', type=str, default='/home/ywan0794/RAE') |
| @click.option('--rae_config', 'config_path', type=str, required=True) |
| @click.option('--checkpoint', 'checkpoint_path', type=str, required=True) |
| @click.option('--image_size', type=int, default=512) |
| @click.option('--num_steps', type=int, default=2) |
| @click.option('--fp16', 'use_fp16', is_flag=True) |
| @click.option('--device', type=str, default='cuda:0') |
| @staticmethod |
| def load(repo_path, config_path, checkpoint_path, image_size, num_steps, use_fp16, device): |
| return Baseline(repo_path, config_path, checkpoint_path, image_size, num_steps, use_fp16, device) |
|
|
| def _predict_depth(self, image: torch.FloatTensor): |
| original_height, original_width = image.shape[-2:] |
|
|
| if image.ndim == 3: |
| image = image.unsqueeze(0) |
| omit_batch_dim = True |
| else: |
| omit_batch_dim = False |
|
|
| b = image.shape[0] |
|
|
| |
| image_resized = F.interpolate( |
| image, size=(self.image_size, self.image_size), |
| mode='bilinear', align_corners=False, antialias=True, |
| ) |
|
|
| |
| z_rgb = self.rae.encode(image_resized) |
|
|
| |
| z_noise = torch.randn(b, *self.latent_size, device=self.device) |
| y = torch.zeros(b, dtype=torch.long, device=self.device) |
|
|
| |
| def model_fn(xt, t, y): |
| x_input = torch.cat([xt, z_rgb], dim=1) |
| return self.model(x_input, t, y) |
|
|
| |
| z_pred = self.eval_sampler(z_noise, model_fn, y=y)[-1] |
|
|
| |
| depth_pred = self.rae.decode(z_pred.float(), z_rgb) |
| depth_pred = depth_pred.mean(dim=1) |
|
|
| |
| depth_pred = F.interpolate( |
| depth_pred.unsqueeze(1), size=(original_height, original_width), |
| mode='bilinear', align_corners=False, |
| )[:, 0] |
|
|
| if omit_batch_dim: |
| depth_pred = depth_pred.squeeze(0) |
|
|
| return depth_pred |
|
|
| @torch.inference_mode() |
| def infer(self, image: torch.FloatTensor, intrinsics: Optional[torch.FloatTensor] = None): |
| depth_pred = self._predict_depth(image) |
| return { |
| 'depth_affine_invariant': depth_pred, |
| } |
|
|
| @torch.inference_mode() |
| def infer_for_evaluation(self, image: torch.FloatTensor, intrinsics: torch.FloatTensor = None): |
| with torch.cuda.amp.autocast(enabled=self.use_fp16, dtype=torch.float16): |
| depth_pred = self._predict_depth(image) |
| return { |
| 'depth_affine_invariant': depth_pred, |
| } |
|
|