File size: 5,471 Bytes
da3ed5b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 | # Reference: https://github.com/prs-eth/Marigold
# Strictly follows official `script/depth/run.py`:
# from marigold import MarigoldDepthPipeline
# pipe = MarigoldDepthPipeline.from_pretrained(checkpoint, torch_dtype=dtype)
# pipe_out = pipe(input_pil_image, denoise_steps, ensemble_size, processing_res,
# match_input_res, batch_size, resample_method, ...)
# depth_pred: np.ndarray = pipe_out.depth_np # normalized affine-invariant depth
#
# Marigold reports its outputs as affine-invariant depth (Marigold paper, CVPR 2024).
# Returns key `depth_affine_invariant`.
import os
import sys
from typing import *
from pathlib import Path
import click
import torch
import torch.nn.functional as F
import numpy as np
from PIL import Image
from moge.test.baseline import MGEBaselineInterface
class Baseline(MGEBaselineInterface):
def __init__(self, repo_path: str, checkpoint: str, denoise_steps: Optional[int],
ensemble_size: int, processing_res: Optional[int], half_precision: bool,
device: Union[torch.device, str]):
repo_path = os.path.abspath(repo_path)
if not Path(repo_path).exists():
raise FileNotFoundError(
f"Cannot find Marigold repo at {repo_path}. Clone https://github.com/prs-eth/Marigold."
)
if repo_path not in sys.path:
sys.path.insert(0, repo_path)
from marigold import MarigoldDepthPipeline
device = torch.device(device)
dtype = torch.float16 if half_precision else torch.float32
variant = "fp16" if half_precision else None
pipe = MarigoldDepthPipeline.from_pretrained(checkpoint, variant=variant, torch_dtype=dtype)
try:
pipe.enable_xformers_memory_efficient_attention()
except ImportError:
pass
pipe = pipe.to(device)
pipe.set_progress_bar_config(disable=True)
self.pipe = pipe
self.device = device
self.denoise_steps = denoise_steps
self.ensemble_size = ensemble_size
self.processing_res = processing_res
@click.command()
@click.option('--repo', 'repo_path', type=click.Path(), default='../Marigold',
help='Path to the prs-eth/Marigold repository.')
@click.option('--checkpoint', type=str, default='prs-eth/marigold-depth-v1-1',
help='HuggingFace ckpt name or local dir (run.py default).')
@click.option('--denoise_steps', type=int, default=None,
help='Diffusion denoising steps. None -> default in ckpt.')
@click.option('--ensemble_size', type=int, default=1,
help='Ensemble size. run.py default = 1.')
@click.option('--processing_res', type=int, default=None,
help='Processing resolution. None -> default in ckpt.')
@click.option('--fp16', 'half_precision', is_flag=True, help='Run in half precision.')
@click.option('--device', type=str, default='cuda')
@staticmethod
def load(repo_path: str, checkpoint: str, denoise_steps: Optional[int],
ensemble_size: int, processing_res: Optional[int], half_precision: bool,
device: str = 'cuda'):
return Baseline(repo_path, checkpoint, denoise_steps, ensemble_size,
processing_res, half_precision, device)
@torch.inference_mode()
def infer(self, image: torch.Tensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
assert intrinsics is None or True, "Marigold does not consume intrinsics; argument ignored."
omit_batch = image.ndim == 3
if omit_batch:
image = image.unsqueeze(0)
assert image.shape[0] == 1, "Marigold baseline only supports batch size 1"
_, _, H, W = image.shape
# MoGe pipeline supplies image as float tensor in [0, 1]. Marigold pipe takes PIL.Image (run.py uses PIL).
arr = (image[0].cpu().permute(1, 2, 0).clamp(0, 1).numpy() * 255).astype(np.uint8)
pil = Image.fromarray(arr)
kwargs: Dict[str, Any] = dict(
ensemble_size=self.ensemble_size,
match_input_res=True,
batch_size=0,
resample_method='bilinear',
show_progress_bar=False,
)
if self.denoise_steps is not None:
kwargs['denoising_steps'] = self.denoise_steps # pipeline kwarg is "denoising_steps"
if self.processing_res is not None:
kwargs['processing_res'] = self.processing_res
out = self.pipe(pil, **kwargs)
# MarigoldDepthOutput.depth_np: HxW np.float32 in [0, 1]. Marigold paper:
# affine-invariant depth (linear monotone with true depth, scale+shift free).
depth_np = out.depth_np
depth = torch.from_numpy(np.ascontiguousarray(depth_np)).to(self.device).float()
# Resize back if pipeline yielded a different size (it shouldn't with match_input_res=True).
if depth.shape[-2:] != (H, W):
depth = F.interpolate(depth[None, None], size=(H, W), mode='bilinear', align_corners=False)[0, 0]
# Marigold predicts affine-invariant depth (Marigold paper, CVPR 2024). Emit only
# this physical key. MoGe compute_metrics reports `depth_affine_invariant` metric.
result = {'depth_affine_invariant': depth}
if not omit_batch:
result['depth_affine_invariant'] = result['depth_affine_invariant'].unsqueeze(0)
return result
|