| import cv2 |
| import matplotlib |
| import numpy as np |
| import torch |
| import torchvision.transforms as transforms |
| import trimesh |
| from PIL import Image |
|
|
|
|
| def get_depth_model(device): |
| depth_model = torch.hub.load("osdsynth/external/Metric3D", "metric3d_vit_giant2", pretrain=True, source='local') |
| return depth_model.to(device) |
|
|
|
|
| def inference_depth(rgb_origin, intrinsic, depth_model): |
| |
| intrinsic = [intrinsic[0, 0], intrinsic[1, 1], intrinsic[0, 2], intrinsic[1, 2]] |
|
|
| |
| |
| input_size = (616, 1064) |
| |
| h, w = rgb_origin.shape[:2] |
| scale = min(input_size[0] / h, input_size[1] / w) |
| rgb = cv2.resize(rgb_origin, (int(w * scale), int(h * scale)), interpolation=cv2.INTER_LINEAR) |
| |
| intrinsic = [intrinsic[0] * scale, intrinsic[1] * scale, intrinsic[2] * scale, intrinsic[3] * scale] |
|
|
| |
| padding = [123.675, 116.28, 103.53] |
| h, w = rgb.shape[:2] |
| pad_h = input_size[0] - h |
| pad_w = input_size[1] - w |
| pad_h_half = pad_h // 2 |
| pad_w_half = pad_w // 2 |
| rgb = cv2.copyMakeBorder( |
| rgb, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=padding |
| ) |
| pad_info = [pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half] |
|
|
| |
| mean = torch.tensor([123.675, 116.28, 103.53]).float()[:, None, None] |
| std = torch.tensor([58.395, 57.12, 57.375]).float()[:, None, None] |
| rgb = torch.from_numpy(rgb.transpose((2, 0, 1))).float() |
| rgb = torch.div((rgb - mean), std) |
| rgb = rgb[None, :, :, :].cuda() |
|
|
| with torch.no_grad(): |
| pred_depth, confidence, output_dict = depth_model.inference({"input": rgb}) |
|
|
| |
| pred_depth = pred_depth.squeeze() |
| pred_depth = pred_depth[ |
| pad_info[0] : pred_depth.shape[0] - pad_info[1], pad_info[2] : pred_depth.shape[1] - pad_info[3] |
| ] |
|
|
| |
| pred_depth = torch.nn.functional.interpolate( |
| pred_depth[None, None, :, :], rgb_origin.shape[:2], mode="bilinear" |
| ).squeeze() |
|
|
| |
| canonical_to_real_scale = intrinsic[0] / 1000.0 |
| pred_depth = pred_depth * canonical_to_real_scale |
| pred_depth = torch.clamp(pred_depth, 0, 300) |
| return pred_depth.detach().cpu().numpy() |
|
|
|
|
| def depth_to_mesh(points, depth, image_rgb): |
| triangles = create_triangles(image_rgb.shape[0], image_rgb.shape[1], mask=~depth_edges_mask(depth)) |
| mesh = trimesh.Trimesh( |
| vertices=points.reshape(-1, 3), |
| faces=triangles, |
| vertex_colors=image_rgb.reshape(-1, 3), |
| ) |
| |
| return mesh |
|
|
|
|
| def depth_edges_mask(depth): |
| """Returns a mask of edges in the depth map. |
| |
| Args: |
| depth: 2D numpy array of shape (H, W) with dtype float32. |
| Returns: |
| mask: 2D numpy array of shape (H, W) with dtype bool. |
| """ |
| |
| depth_dx, depth_dy = np.gradient(depth) |
| |
| depth_grad = np.sqrt(depth_dx**2 + depth_dy**2) |
| |
| mask = depth_grad > 0.05 |
| return mask |
|
|
|
|
| def create_triangles(h, w, mask=None): |
| """ |
| Reference: https://github.com/google-research/google-research/blob/e96197de06613f1b027d20328e06d69829fa5a89/infinite_nature/render_utils.py#L68 |
| Creates mesh triangle indices from a given pixel grid size. |
| This function is not and need not be differentiable as triangle indices are |
| fixed. |
| Args: |
| h: (int) denoting the height of the image. |
| w: (int) denoting the width of the image. |
| Returns: |
| triangles: 2D numpy array of indices (int) with shape (2(W-1)(H-1) x 3) |
| """ |
| x, y = np.meshgrid(range(w - 1), range(h - 1)) |
| tl = y * w + x |
| tr = y * w + x + 1 |
| bl = (y + 1) * w + x |
| br = (y + 1) * w + x + 1 |
| triangles = np.array([tl, bl, tr, br, tr, bl]) |
| triangles = np.transpose(triangles, (1, 2, 0)).reshape(((w - 1) * (h - 1) * 2, 3)) |
| if mask is not None: |
| mask = mask.reshape(-1) |
| triangles = triangles[mask[triangles].all(1)] |
| return triangles |
|
|
|
|
| def get_intrinsics(H, W, fov): |
| """Intrinsics for a pinhole camera model. |
| |
| Assume fov of 55 degrees and central principal point. |
| """ |
| |
| |
|
|
| focal = H / 2 / np.tan(np.radians(fov) / 2) |
|
|
| cx = 0.5 * W |
| cy = 0.5 * H |
| return np.array([[focal, 0, cx], [0, focal, cy], [0, 0, 1]]) |
|
|
|
|
| def depth_to_points(depth, R=None, t=None, fov=None, intrinsic=None): |
| if intrinsic is None: |
| K = get_intrinsics(depth.shape[1], depth.shape[2], fov) |
| else: |
| K = intrinsic |
| Kinv = np.linalg.inv(K) |
| if R is None: |
| R = np.eye(3) |
| if t is None: |
| t = np.zeros(3) |
|
|
| |
| M = np.eye(3) |
| |
| |
|
|
| height, width = depth.shape[1:3] |
|
|
| x = np.arange(width) |
| y = np.arange(height) |
| coord = np.stack(np.meshgrid(x, y), -1) |
| coord = np.concatenate((coord, np.ones_like(coord)[:, :, [0]]), -1) |
| coord = coord.astype(np.float32) |
| |
| coord = coord[None] |
|
|
| D = depth[:, :, :, None, None] |
| |
| pts3D_1 = D * Kinv[None, None, None, ...] @ coord[:, :, :, :, None] |
| |
| pts3D_1 = M[None, None, None, ...] @ pts3D_1 |
| |
| pts3D_2 = R[None, None, None, ...] @ pts3D_1 + t[None, None, None, :, None] |
| |
| |
|
|
| |
| G = np.eye(3) |
| G[0, 0] = -1.0 |
| G[1, 1] = -1.0 |
|
|
| return pts3D_2[:, :, :, :3, 0][0] @ G.T |
|
|
| |
|
|
| |
|
|
|
|
| def colorize_depth( |
| value, |
| vmin=None, |
| vmax=None, |
| cmap="inferno_r", |
| invalid_val=-99, |
| invalid_mask=None, |
| background_color=(128, 128, 128, 255), |
| gamma_corrected=False, |
| value_transform=None, |
| ): |
| """Converts a depth map to a color image. |
| |
| Args: |
| value (torch.Tensor, numpy.ndarry): Input depth map. Shape: (H, W) or (1, H, W) or (1, 1, H, W). All singular dimensions are squeezed |
| vmin (float, optional): vmin-valued entries are mapped to start color of cmap. If None, value.min() is used. Defaults to None. |
| vmax (float, optional): vmax-valued entries are mapped to end color of cmap. If None, value.max() is used. Defaults to None. |
| cmap (str, optional): matplotlib colormap to use. Defaults to 'magma_r'. |
| invalid_val (int, optional): Specifies value of invalid pixels that should be colored as 'background_color'. Defaults to -99. |
| invalid_mask (numpy.ndarray, optional): Boolean mask for invalid regions. Defaults to None. |
| background_color (tuple[int], optional): 4-tuple RGB color to give to invalid pixels. Defaults to (128, 128, 128, 255). |
| gamma_corrected (bool, optional): Apply gamma correction to colored image. Defaults to False. |
| value_transform (Callable, optional): Apply transform function to valid pixels before coloring. Defaults to None. |
| |
| Returns: |
| numpy.ndarray, dtype - uint8: Colored depth map. Shape: (H, W, 4) |
| """ |
| if isinstance(value, torch.Tensor): |
| value = value.detach().cpu().numpy() |
|
|
| value = value.squeeze() |
| if invalid_mask is None: |
| invalid_mask = value == invalid_val |
| mask = np.logical_not(invalid_mask) |
|
|
| |
| vmin = np.percentile(value[mask], 2) if vmin is None else vmin |
| vmax = np.percentile(value[mask], 85) if vmax is None else vmax |
| if vmin != vmax: |
| value = (value - vmin) / (vmax - vmin) |
| else: |
| |
| value = value * 0.0 |
|
|
| |
| |
|
|
| value[invalid_mask] = np.nan |
| cmapper = matplotlib.cm.get_cmap(cmap) |
| if value_transform: |
| value = value_transform(value) |
| |
| value = cmapper(value, bytes=True) |
|
|
| |
| img = value[...] |
| img[invalid_mask] = background_color |
|
|
| |
| if gamma_corrected: |
| |
| img = img / 255 |
| img = np.power(img, 2.2) |
| img = img * 255 |
| img = img.astype(np.uint8) |
| return img |
|
|