zeyuren2002 commited on
Commit
b69ec42
·
verified ·
1 Parent(s): a99114a

Add files using upload-large-folder tool

Browse files
moge/scripts/app.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_IO_ENABLE_OPENEXR'] = '1'
3
+ import sys
4
+ from pathlib import Path
5
+ if (_package_root := str(Path(__file__).absolute().parents[2])) not in sys.path:
6
+ sys.path.insert(0, _package_root)
7
+ import time
8
+ import uuid
9
+ import tempfile
10
+ import itertools
11
+ from typing import *
12
+ import atexit
13
+ from concurrent.futures import ThreadPoolExecutor
14
+ import shutil
15
+
16
+ import click
17
+
18
+
19
+ @click.command(help='Web demo')
20
+ @click.option('--share', is_flag=True, help='Whether to run the app in shared mode.')
21
+ @click.option('--pretrained', 'pretrained_model_name_or_path', default=None, help='The name or path of the pre-trained model.')
22
+ @click.option('--version', 'model_version', default='v2', help='The version of the model.')
23
+ @click.option('--fp16', 'use_fp16', is_flag=True, help='Whether to use fp16 inference.')
24
+ def main(share: bool, pretrained_model_name_or_path: str, model_version: str, use_fp16: bool):
25
+ print("Import modules...")
26
+ # Lazy import
27
+ import cv2
28
+ import torch
29
+ import numpy as np
30
+ import trimesh
31
+ import trimesh.visual
32
+ from PIL import Image
33
+ import gradio as gr
34
+ try:
35
+ import spaces # This is for deployment at huggingface.co/spaces
36
+ HUGGINFACE_SPACES_INSTALLED = True
37
+ except ImportError:
38
+ HUGGINFACE_SPACES_INSTALLED = False
39
+
40
+ import utils3d
41
+ from moge.utils.io import write_normal
42
+ from moge.utils.vis import colorize_depth, colorize_normal
43
+ from moge.model import import_model_class_by_version
44
+ from moge.utils.geometry_numpy import depth_occlusion_edge_numpy
45
+ from moge.utils.tools import timeit
46
+
47
+ print("Load model...")
48
+ if pretrained_model_name_or_path is None:
49
+ DEFAULT_PRETRAINED_MODEL_FOR_EACH_VERSION = {
50
+ "v1": "Ruicheng/moge-vitl",
51
+ "v2": "Ruicheng/moge-2-vitl-normal",
52
+ }
53
+ pretrained_model_name_or_path = DEFAULT_PRETRAINED_MODEL_FOR_EACH_VERSION[model_version]
54
+ model = import_model_class_by_version(model_version).from_pretrained(pretrained_model_name_or_path).cuda().eval()
55
+ if use_fp16:
56
+ model.half()
57
+ thread_pool_executor = ThreadPoolExecutor(max_workers=1)
58
+
59
+ def delete_later(path: Union[str, os.PathLike], delay: int = 300):
60
+ def _delete():
61
+ try:
62
+ os.remove(path)
63
+ except FileNotFoundError:
64
+ pass
65
+ def _wait_and_delete():
66
+ time.sleep(delay)
67
+ _delete(path)
68
+ thread_pool_executor.submit(_wait_and_delete)
69
+ atexit.register(_delete)
70
+
71
+ # Inference on GPU.
72
+ @(spaces.GPU if HUGGINFACE_SPACES_INSTALLED else lambda x: x)
73
+ def run_with_gpu(image: np.ndarray, resolution_level: int, apply_mask: bool) -> Dict[str, np.ndarray]:
74
+ image_tensor = torch.tensor(image, dtype=torch.float32 if not use_fp16 else torch.float16, device=torch.device('cuda')).permute(2, 0, 1) / 255
75
+ output = model.infer(image_tensor, apply_mask=apply_mask, resolution_level=resolution_level, use_fp16=use_fp16)
76
+ output = {k: v.cpu().numpy() for k, v in output.items()}
77
+ return output
78
+
79
+ # Full inference pipeline
80
+ def run(image: np.ndarray, max_size: int = 800, resolution_level: str = 'High', apply_mask: bool = True, remove_edge: bool = True, request: gr.Request = None):
81
+ larger_size = max(image.shape[:2])
82
+ if larger_size > max_size:
83
+ scale = max_size / larger_size
84
+ image = cv2.resize(image, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_AREA)
85
+
86
+ height, width = image.shape[:2]
87
+
88
+ resolution_level_int = {'Low': 0, 'Medium': 5, 'High': 9, 'Ultra': 30}.get(resolution_level, 9)
89
+ output = run_with_gpu(image, resolution_level_int, apply_mask)
90
+
91
+ points, depth, mask, normal = output['points'], output['depth'], output['mask'], output.get('normal', None)
92
+
93
+ if remove_edge:
94
+ mask_cleaned = mask & ~utils3d.np.depth_map_edge(depth, rtol=0.04)
95
+ else:
96
+ mask_cleaned = mask
97
+
98
+ results = {
99
+ **output,
100
+ 'mask_cleaned': mask_cleaned,
101
+ 'image': image
102
+ }
103
+
104
+ # depth & normal visualization
105
+ depth_vis = colorize_depth(depth)
106
+ if normal is not None:
107
+ normal_vis = colorize_normal(normal)
108
+ else:
109
+ normal_vis = gr.update(label="Normal map (not avalable for this model)")
110
+
111
+ # mesh & pointcloud
112
+ if normal is None:
113
+ faces, vertices, vertex_colors, vertex_uvs = utils3d.np.build_mesh_from_map(
114
+ points,
115
+ image.astype(np.float32) / 255,
116
+ utils3d.np.uv_map(height, width),
117
+ mask=mask_cleaned,
118
+ tri=True
119
+ )
120
+ vertex_normals = None
121
+ else:
122
+ faces, vertices, vertex_colors, vertex_uvs, vertex_normals = utils3d.np.build_mesh_from_map(
123
+ points,
124
+ image.astype(np.float32) / 255,
125
+ utils3d.np.uv_map(height, width),
126
+ normal,
127
+ mask=mask_cleaned,
128
+ tri=True
129
+ )
130
+ vertices = vertices * np.array([1, -1, -1], dtype=np.float32)
131
+ vertex_uvs = vertex_uvs * np.array([1, -1], dtype=np.float32) + np.array([0, 1], dtype=np.float32)
132
+ if vertex_normals is not None:
133
+ vertex_normals = vertex_normals * np.array([1, -1, -1], dtype=np.float32)
134
+
135
+ tempdir = Path(tempfile.gettempdir(), 'moge')
136
+ tempdir.mkdir(exist_ok=True)
137
+ output_path = Path(tempdir, request.session_hash)
138
+ shutil.rmtree(output_path, ignore_errors=True)
139
+ output_path.mkdir(exist_ok=True, parents=True)
140
+ trimesh.Trimesh(
141
+ vertices=vertices,
142
+ faces=faces,
143
+ visual = trimesh.visual.texture.TextureVisuals(
144
+ uv=vertex_uvs,
145
+ material=trimesh.visual.material.PBRMaterial(
146
+ baseColorTexture=Image.fromarray(image),
147
+ metallicFactor=0.5,
148
+ roughnessFactor=1.0
149
+ )
150
+ ),
151
+ vertex_normals=vertex_normals,
152
+ process=False
153
+ ).export(output_path / 'mesh.glb')
154
+ pointcloud = trimesh.PointCloud(
155
+ vertices=vertices,
156
+ colors=vertex_colors,
157
+ )
158
+ pointcloud.vertex_normals = vertex_normals
159
+ pointcloud.export(output_path / 'pointcloud.ply', vertex_normal=True)
160
+ trimesh.PointCloud(
161
+ vertices=vertices,
162
+ colors=vertex_colors,
163
+ ).export(output_path / 'pointcloud.glb', include_normals=True)
164
+ cv2.imwrite(str(output_path /'mask.png'), mask.astype(np.uint8) * 255)
165
+ cv2.imwrite(str(output_path / 'depth.exr'), depth.astype(np.float32), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
166
+ cv2.imwrite(str(output_path / 'points.exr'), cv2.cvtColor(points.astype(np.float32), cv2.COLOR_RGB2BGR), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
167
+ if normal is not None:
168
+ cv2.imwrite(str(output_path / 'normal.exr'), cv2.cvtColor(normal.astype(np.float32) * np.array([1, -1, -1], dtype=np.float32), cv2.COLOR_RGB2BGR), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_HALF])
169
+
170
+ files = ['mesh.glb', 'pointcloud.ply', 'depth.exr', 'points.exr', 'mask.png']
171
+ if normal is not None:
172
+ files.append('normal.exr')
173
+
174
+ for f in files:
175
+ delete_later(output_path / f)
176
+
177
+ # FOV
178
+ intrinsics = results['intrinsics']
179
+ fov_x, fov_y = utils3d.np.intrinsics_to_fov(intrinsics)
180
+ fov_x, fov_y = np.rad2deg([fov_x, fov_y])
181
+
182
+ # messages
183
+ viewer_message = f'**Note:** Inference has been completed. It may take a few seconds to download the 3D model.'
184
+ if resolution_level != 'Ultra':
185
+ depth_message = f'**Note:** Want sharper depth map? Try increasing the `maximum image size` and setting the `inference resolution level` to `Ultra` in the settings.'
186
+ else:
187
+ depth_message = ""
188
+
189
+ return (
190
+ results,
191
+ depth_vis,
192
+ normal_vis,
193
+ output_path / 'pointcloud.glb',
194
+ [(output_path / f).as_posix() for f in files if (output_path / f).exists()],
195
+ f'- **Horizontal FOV: {fov_x:.1f}°**. \n - **Vertical FOV: {fov_y:.1f}°**',
196
+ viewer_message,
197
+ depth_message
198
+ )
199
+
200
+ def reset_measure(results: Dict[str, np.ndarray]):
201
+ return [results['image'], [], ""]
202
+
203
+
204
+ def measure(results: Dict[str, np.ndarray], measure_points: List[Tuple[int, int]], event: gr.SelectData):
205
+ point2d = event.index[0], event.index[1]
206
+ measure_points.append(point2d)
207
+
208
+ image = results['image'].copy()
209
+ for p in measure_points:
210
+ image = cv2.circle(image, p, radius=5, color=(255, 0, 0), thickness=2)
211
+
212
+ depth_text = ""
213
+ for i, p in enumerate(measure_points):
214
+ d = results['depth'][p[1], p[0]]
215
+ depth_text += f"- **P{i + 1} depth: {d:.2f}m.**\n"
216
+
217
+ if len(measure_points) == 2:
218
+ point1, point2 = measure_points
219
+ image = cv2.line(image, point1, point2, color=(255, 0, 0), thickness=2)
220
+ distance = np.linalg.norm(results['points'][point1[1], point1[0]] - results['points'][point2[1], point2[0]])
221
+ measure_points = []
222
+
223
+ distance_text = f"- **Distance: {distance:.2f}m**"
224
+
225
+ text = depth_text + distance_text
226
+ return [image, measure_points, text]
227
+ else:
228
+ return [image, measure_points, depth_text]
229
+
230
+ print("Create Gradio app...")
231
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
232
+ gr.Markdown(
233
+ f'''
234
+ <div align="center">
235
+ <h1> Turn a 2D image into 3D with MoGe <a title="Github" href="https://github.com/microsoft/MoGe" target="_blank" rel="noopener noreferrer" style="display: inline-block;"> <img src="https://img.shields.io/github/stars/microsoft/MoGe?label=GitHub%20%E2%98%85&logo=github&color=C8C" alt="badge-github-stars"> </a> </h1>
236
+ </div>
237
+ ''')
238
+ results = gr.State(value=None)
239
+ measure_points = gr.State(value=[])
240
+
241
+ with gr.Row():
242
+ with gr.Column():
243
+ input_image = gr.Image(type="numpy", image_mode="RGB", label="Input Image")
244
+ with gr.Accordion(label="Settings", open=False):
245
+ max_size_input = gr.Number(value=800, label="Maximum Image Size", precision=0, minimum=256, maximum=2048)
246
+ resolution_level = gr.Dropdown(['Low', 'Medium', 'High', 'Ultra'], label="Inference Resolution Level", value='High')
247
+ apply_mask = gr.Checkbox(value=True, label="Apply mask")
248
+ remove_edges = gr.Checkbox(value=True, label="Remove edges")
249
+ submit_btn = gr.Button("Submit", variant='primary')
250
+
251
+ with gr.Column():
252
+ with gr.Tabs():
253
+ with gr.Tab("3D View"):
254
+ viewer_message = gr.Markdown("")
255
+ model_3d = gr.Model3D(display_mode="solid", label="3D Point Map", clear_color=[1.0, 1.0, 1.0, 1.0], height="60vh")
256
+ fov = gr.Markdown()
257
+ with gr.Tab("Depth"):
258
+ depth_message = gr.Markdown("")
259
+ depth_map = gr.Image(type="numpy", label="Colorized Depth Map", format='png', interactive=False)
260
+ with gr.Tab("Normal", interactive=hasattr(model, 'normal_head')):
261
+ normal_map = gr.Image(type="numpy", label="Normal Map", format='png', interactive=False)
262
+ with gr.Tab("Measure", interactive=hasattr(model, 'scale_head')):
263
+ gr.Markdown("### Click on the image to measure the distance between two points. \n"
264
+ "**Note:** Metric scale is most reliable for typical indoor or street scenes, and may degrade for contents unfamiliar to the model (e.g., stylized or close-up images).")
265
+ measure_image = gr.Image(type="numpy", show_label=False, format='webp', interactive=False, sources=[])
266
+ measure_text = gr.Markdown("")
267
+ with gr.Tab("Download"):
268
+ files = gr.File(type='filepath', label="Output Files")
269
+
270
+ if Path('example_images').exists():
271
+ example_image_paths = sorted(list(itertools.chain(*[Path('example_images').glob(f'*.{ext}') for ext in ['jpg', 'png', 'jpeg', 'JPG', 'PNG', 'JPEG']])))
272
+ examples = gr.Examples(
273
+ examples = example_image_paths,
274
+ inputs=input_image,
275
+ label="Examples"
276
+ )
277
+
278
+ submit_btn.click(
279
+ fn=lambda: [None, None, None, None, None, "", "", ""],
280
+ outputs=[results, depth_map, normal_map, model_3d, files, fov, viewer_message, depth_message]
281
+ ).then(
282
+ fn=run,
283
+ inputs=[input_image, max_size_input, resolution_level, apply_mask, remove_edges],
284
+ outputs=[results, depth_map, normal_map, model_3d, files, fov, viewer_message, depth_message]
285
+ ).then(
286
+ fn=reset_measure,
287
+ inputs=[results],
288
+ outputs=[measure_image, measure_points, measure_text]
289
+ )
290
+
291
+ measure_image.select(
292
+ fn=measure,
293
+ inputs=[results, measure_points],
294
+ outputs=[measure_image, measure_points, measure_text]
295
+ )
296
+
297
+ demo.launch(share=share)
298
+
299
+
300
+ if __name__ == '__main__':
301
+ main()
moge/scripts/cli.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_IO_ENABLE_OPENEXR'] = '1'
3
+ from pathlib import Path
4
+ import sys
5
+ if (_package_root := str(Path(__file__).absolute().parents[2])) not in sys.path:
6
+ sys.path.insert(0, _package_root)
7
+
8
+ import click
9
+
10
+
11
+ @click.group(help='MoGe command line interface.')
12
+ def cli():
13
+ pass
14
+
15
+ def main():
16
+ from moge.scripts import app, infer, infer_baseline, infer_panorama, eval_baseline, vis_data
17
+ cli.add_command(app.main, name='app')
18
+ cli.add_command(infer.main, name='infer')
19
+ cli.add_command(infer_baseline.main, name='infer_baseline')
20
+ cli.add_command(infer_panorama.main, name='infer_panorama')
21
+ cli.add_command(eval_baseline.main, name='eval_baseline')
22
+ cli.add_command(vis_data.main, name='vis_data')
23
+ cli()
24
+
25
+
26
+ if __name__ == '__main__':
27
+ main()
moge/scripts/eval_baseline.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from pathlib import Path
4
+ if (_package_root := str(Path(__file__).absolute().parents[2])) not in sys.path:
5
+ sys.path.insert(0, _package_root)
6
+ import json
7
+ from typing import *
8
+ import importlib
9
+ import importlib.util
10
+
11
+ import click
12
+
13
+
14
+ @click.command(context_settings={"allow_extra_args": True, "ignore_unknown_options": True}, help='Evaluation script.')
15
+ @click.option('--baseline', 'baseline_code_path', type=click.Path(), required=True, help='Path to the baseline model python code.')
16
+ @click.option('--config', 'config_path', type=click.Path(), default='configs/eval/all_benchmarks.json', help='Path to the evaluation configurations. '
17
+ 'Defaults to "configs/eval/all_benchmarks.json".')
18
+ @click.option('--output', '-o', 'output_path', type=click.Path(), required=True, help='Path to the output json file.')
19
+ @click.option('--oracle', 'oracle_mode', is_flag=True, help='Use oracle mode for evaluation, i.e., use the GT intrinsics input.')
20
+ @click.option('--dump_pred', is_flag=True, help='Dump predition results.')
21
+ @click.option('--dump_gt', is_flag=True, help='Dump ground truth.')
22
+ @click.pass_context
23
+ def main(ctx: click.Context, baseline_code_path: str, config_path: str, oracle_mode: bool, output_path: Union[str, Path], dump_pred: bool, dump_gt: bool):
24
+ # Lazy import
25
+ import cv2
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ import torch
29
+ import torch.nn.functional as F
30
+ import utils3d
31
+
32
+ from moge.test.baseline import MGEBaselineInterface
33
+ from moge.test.dataloader import EvalDataLoaderPipeline
34
+ from moge.test.metrics import compute_metrics
35
+ from moge.utils.geometry_torch import intrinsics_to_fov
36
+ from moge.utils.vis import colorize_depth, colorize_normal
37
+ from moge.utils.tools import key_average, flatten_nested_dict, timeit, import_file_as_module
38
+
39
+ # Load the baseline model
40
+ module = import_file_as_module(baseline_code_path, Path(baseline_code_path).stem)
41
+ baseline_cls: Type[MGEBaselineInterface] = getattr(module, 'Baseline')
42
+ baseline : MGEBaselineInterface = baseline_cls.load.main(ctx.args, standalone_mode=False)
43
+
44
+ # Load the evaluation configurations
45
+ with open(config_path, 'r') as f:
46
+ config = json.load(f)
47
+
48
+ Path(output_path).parent.mkdir(parents=True, exist_ok=True)
49
+ all_metrics = {}
50
+ # Iterate over the dataset
51
+ for benchmark_name, benchmark_config in tqdm(list(config.items()), desc='Benchmarks'):
52
+ filenames, metrics_list = [], []
53
+ with (
54
+ EvalDataLoaderPipeline(**benchmark_config) as eval_data_pipe,
55
+ tqdm(total=len(eval_data_pipe), desc=benchmark_name, leave=False) as pbar
56
+ ):
57
+ # Iterate over the samples in the dataset
58
+ for i in range(len(eval_data_pipe)):
59
+ sample = eval_data_pipe.get()
60
+ sample = {k: v.to(baseline.device) if isinstance(v, torch.Tensor) else v for k, v in sample.items()}
61
+ image = sample['image']
62
+ gt_intrinsics = sample['intrinsics']
63
+
64
+ # Inference
65
+ torch.cuda.synchronize()
66
+ with torch.inference_mode(), timeit('_inference_timer', verbose=False) as timer:
67
+ if oracle_mode:
68
+ pred = baseline.infer_for_evaluation(image, gt_intrinsics)
69
+ else:
70
+ pred = baseline.infer_for_evaluation(image)
71
+ torch.cuda.synchronize()
72
+
73
+ # Compute metrics
74
+ metrics, misc = compute_metrics(pred, sample, vis=dump_pred or dump_gt)
75
+ metrics['inference_time'] = timer.time
76
+ metrics_list.append(metrics)
77
+
78
+ # Dump results
79
+ dump_path = Path(output_path.replace(".json", f"_dump"), f'{benchmark_name}', sample['filename'].replace('.zip', ''))
80
+ if dump_pred:
81
+ dump_path.joinpath('pred').mkdir(parents=True, exist_ok=True)
82
+ cv2.imwrite(str(dump_path / 'pred' / 'image.jpg'), cv2.cvtColor((image.cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8), cv2.COLOR_RGB2BGR))
83
+
84
+ with Path(dump_path, 'pred', 'metrics.json').open('w') as f:
85
+ json.dump(metrics, f, indent=4)
86
+
87
+ if 'pred_points' in misc:
88
+ points = misc['pred_points'].cpu().numpy()
89
+ cv2.imwrite(str(dump_path / 'pred' / 'points.exr'), cv2.cvtColor(points.astype(np.float32), cv2.COLOR_RGB2BGR), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
90
+
91
+ if 'pred_depth' in misc:
92
+ depth = misc['pred_depth'].cpu().numpy()
93
+ if 'mask' in pred:
94
+ mask = pred['mask'].cpu().numpy()
95
+ depth = np.where(mask, depth, np.inf)
96
+ cv2.imwrite(str(dump_path / 'pred' / 'depth.png'), cv2.cvtColor(colorize_depth(depth), cv2.COLOR_RGB2BGR))
97
+
98
+ if 'mask' in pred:
99
+ mask = pred['mask'].cpu().numpy()
100
+ cv2.imwrite(str(dump_path / 'pred' / 'mask.png'), (mask * 255).astype(np.uint8))
101
+
102
+ if 'normal' in pred:
103
+ normal = pred['normal'].cpu().numpy()
104
+ cv2.imwrite(str(dump_path / 'pred' / 'normal.png'), cv2.cvtColor(colorize_normal(normal), cv2.COLOR_RGB2BGR))
105
+
106
+ if 'intrinsics' in pred:
107
+ intrinsics = pred['intrinsics']
108
+ fov_x, fov_y = intrinsics_to_fov(intrinsics)
109
+ with open(dump_path / 'pred' / 'fov.json', 'w') as f:
110
+ json.dump({
111
+ 'fov_x': np.rad2deg(fov_x.item()),
112
+ 'fov_y': np.rad2deg(fov_y.item()),
113
+ 'intrinsics': intrinsics.cpu().numpy().tolist(),
114
+ }, f)
115
+
116
+ if dump_gt:
117
+ dump_path.joinpath('gt').mkdir(parents=True, exist_ok=True)
118
+ cv2.imwrite(str(dump_path / 'gt' / 'image.jpg'), cv2.cvtColor((image.cpu().numpy().transpose(1, 2, 0) * 255).astype(np.uint8), cv2.COLOR_RGB2BGR))
119
+
120
+ if 'points' in sample:
121
+ points = sample['points']
122
+ cv2.imwrite(str(dump_path / 'gt' / 'points.exr'), cv2.cvtColor(points.cpu().numpy().astype(np.float32), cv2.COLOR_RGB2BGR), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
123
+
124
+ if 'depth' in sample:
125
+ depth = sample['depth']
126
+ mask = sample['depth_mask']
127
+ cv2.imwrite(str(dump_path / 'gt' / 'depth.png'), cv2.cvtColor(colorize_depth(depth.cpu().numpy(), mask=mask.cpu().numpy()), cv2.COLOR_RGB2BGR))
128
+
129
+ if 'normal' in sample:
130
+ normal = sample['normal']
131
+ cv2.imwrite(str(dump_path / 'gt' / 'normal.png'), cv2.cvtColor(colorize_normal(normal.cpu().numpy()), cv2.COLOR_RGB2BGR))
132
+
133
+ if 'depth_mask' in sample:
134
+ mask = sample['depth_mask']
135
+ cv2.imwrite(str(dump_path / 'gt' /'mask.png'), (mask.cpu().numpy() * 255).astype(np.uint8))
136
+
137
+ if 'intrinsics' in sample:
138
+ intrinsics = sample['intrinsics']
139
+ fov_x, fov_y = intrinsics_to_fov(intrinsics)
140
+ with open(dump_path / 'gt' / 'info.json', 'w') as f:
141
+ json.dump({
142
+ 'fov_x': np.rad2deg(fov_x.item()),
143
+ 'fov_y': np.rad2deg(fov_y.item()),
144
+ 'intrinsics': intrinsics.cpu().numpy().tolist(),
145
+ }, f)
146
+
147
+ # Save intermediate results
148
+ if i % 100 == 0 or i == len(eval_data_pipe) - 1:
149
+ Path(output_path).write_text(
150
+ json.dumps({
151
+ **all_metrics,
152
+ benchmark_name: key_average(metrics_list)
153
+ }, indent=4)
154
+ )
155
+ pbar.update(1)
156
+
157
+ all_metrics[benchmark_name] = key_average(metrics_list)
158
+
159
+ # Save final results
160
+ all_metrics['mean'] = key_average(list(all_metrics.values()))
161
+ Path(output_path).write_text(json.dumps(all_metrics, indent=4))
162
+
163
+
164
+ if __name__ == '__main__':
165
+ main()
moge/scripts/infer.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_IO_ENABLE_OPENEXR'] = '1'
3
+ from pathlib import Path
4
+ import sys
5
+ if (_package_root := str(Path(__file__).absolute().parents[2])) not in sys.path:
6
+ sys.path.insert(0, _package_root)
7
+ from typing import *
8
+ import itertools
9
+ import json
10
+ import warnings
11
+
12
+ import click
13
+
14
+
15
+ @click.command(help='Inference script')
16
+ @click.option('--input', '-i', 'input_path', type=click.Path(exists=True), help='Input image or folder path. "jpg" and "png" are supported.')
17
+ @click.option('--fov_x', 'fov_x_', type=float, default=None, help='If camera parameters are known, set the horizontal field of view in degrees. Otherwise, MoGe will estimate it.')
18
+ @click.option('--output', '-o', 'output_path', default='./output', type=click.Path(), help='Output folder path')
19
+ @click.option('--pretrained', 'pretrained_model_name_or_path', type=str, default=None, help='Pretrained model name or path. If not provided, the corresponding default model will be chosen.')
20
+ @click.option('--version', 'model_version', type=click.Choice(['v1', 'v2']), default='v2', help='Model version. Defaults to "v2"')
21
+ @click.option('--device', 'device_name', type=str, default='cuda', help='Device name (e.g. "cuda", "cuda:0", "cpu"). Defaults to "cuda"')
22
+ @click.option('--fp16', 'use_fp16', is_flag=True, help='Use fp16 precision for much faster inference.')
23
+ @click.option('--resize', 'resize_to', type=int, default=None, help='Resize the image(s) & output maps to a specific size. Defaults to None (no resizing).')
24
+ @click.option('--resolution_level', type=int, default=9, help='An integer [0-9] for the resolution level for inference. \
25
+ Higher value means more tokens and the finer details will be captured, but inference can be slower. \
26
+ Defaults to 9. Note that it is irrelevant to the output size, which is always the same as the input size. \
27
+ `resolution_level` actually controls `num_tokens`. See `num_tokens` for more details.')
28
+ @click.option('--num_tokens', type=int, default=None, help='number of tokens used for inference. A integer in the (suggested) range of `[1200, 2500]`. \
29
+ `resolution_level` will be ignored if `num_tokens` is provided. Default: None')
30
+ @click.option('--threshold', type=float, default=0.04, help='Threshold for removing edges. Defaults to 0.01. Smaller value removes more edges. "inf" means no thresholding.')
31
+ @click.option('--maps', 'save_maps_', is_flag=True, help='Whether to save the output maps (image, point map, depth map, normal map, mask) and fov.')
32
+ @click.option('--glb', 'save_glb_', is_flag=True, help='Whether to save the output as a.glb file. The color will be saved as a texture.')
33
+ @click.option('--ply', 'save_ply_', is_flag=True, help='Whether to save the output as a.ply file. The color will be saved as vertex colors.')
34
+ @click.option('--show', 'show', is_flag=True, help='Whether show the output in a window. Note that this requires pyglet<2 installed as required by trimesh.')
35
+ def main(
36
+ input_path: str,
37
+ fov_x_: float,
38
+ output_path: str,
39
+ pretrained_model_name_or_path: str,
40
+ model_version: str,
41
+ device_name: str,
42
+ use_fp16: bool,
43
+ resize_to: int,
44
+ resolution_level: int,
45
+ num_tokens: int,
46
+ threshold: float,
47
+ save_maps_: bool,
48
+ save_glb_: bool,
49
+ save_ply_: bool,
50
+ show: bool,
51
+ ):
52
+ import cv2
53
+ import numpy as np
54
+ import torch
55
+ from PIL import Image
56
+ from tqdm import tqdm
57
+ import click
58
+
59
+ from moge.model import import_model_class_by_version
60
+ from moge.utils.io import save_glb, save_ply
61
+ from moge.utils.vis import colorize_depth, colorize_normal
62
+ from moge.utils.geometry_numpy import depth_occlusion_edge_numpy
63
+ import utils3d
64
+
65
+ device = torch.device(device_name)
66
+
67
+ include_suffices = ['jpg', 'png', 'jpeg', 'JPG', 'PNG', 'JPEG']
68
+ if Path(input_path).is_dir():
69
+ image_paths = sorted(itertools.chain(*(Path(input_path).rglob(f'*.{suffix}') for suffix in include_suffices)))
70
+ else:
71
+ image_paths = [Path(input_path)]
72
+
73
+ if len(image_paths) == 0:
74
+ raise FileNotFoundError(f'No image files found in {input_path}')
75
+
76
+ if pretrained_model_name_or_path is None:
77
+ DEFAULT_PRETRAINED_MODEL_FOR_EACH_VERSION = {
78
+ "v1": "Ruicheng/moge-vitl",
79
+ "v2": "Ruicheng/moge-2-vitl-normal",
80
+ }
81
+ pretrained_model_name_or_path = DEFAULT_PRETRAINED_MODEL_FOR_EACH_VERSION[model_version]
82
+ model = import_model_class_by_version(model_version).from_pretrained(pretrained_model_name_or_path).to(device).eval()
83
+ if use_fp16:
84
+ model.half()
85
+
86
+ if not any([save_maps_, save_glb_, save_ply_]):
87
+ warnings.warn('No output format specified. Defaults to saving all. Please use "--maps", "--glb", or "--ply" to specify the output.')
88
+ save_maps_ = save_glb_ = save_ply_ = True
89
+
90
+ for image_path in (pbar := tqdm(image_paths, desc='Inference', disable=len(image_paths) <= 1)):
91
+ if not image_path.exists():
92
+ raise FileNotFoundError(f'File {image_path} does not exist.')
93
+ image = cv2.cvtColor(cv2.imread(str(image_path)), cv2.COLOR_BGR2RGB)
94
+ height, width = image.shape[:2]
95
+ if resize_to is not None:
96
+ height, width = min(resize_to, int(resize_to * height / width)), min(resize_to, int(resize_to * width / height))
97
+ image = cv2.resize(image, (width, height), cv2.INTER_AREA)
98
+ image_tensor = torch.tensor(image / 255, dtype=torch.float32, device=device).permute(2, 0, 1)
99
+
100
+ # Inference
101
+ output = model.infer(image_tensor, fov_x=fov_x_, resolution_level=resolution_level, num_tokens=num_tokens, use_fp16=use_fp16)
102
+ points, depth, mask, intrinsics = output['points'].cpu().numpy(), output['depth'].cpu().numpy(), output['mask'].cpu().numpy(), output['intrinsics'].cpu().numpy()
103
+ normal = output['normal'].cpu().numpy() if 'normal' in output else None
104
+
105
+ save_path = Path(output_path, image_path.relative_to(input_path).parent, image_path.stem)
106
+ save_path.mkdir(exist_ok=True, parents=True)
107
+
108
+ # Save images / maps
109
+ if save_maps_:
110
+ cv2.imwrite(str(save_path / 'image.jpg'), cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
111
+ cv2.imwrite(str(save_path / 'depth_vis.png'), cv2.cvtColor(colorize_depth(depth), cv2.COLOR_RGB2BGR))
112
+ cv2.imwrite(str(save_path / 'depth.exr'), depth, [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
113
+ cv2.imwrite(str(save_path / 'mask.png'), (mask * 255).astype(np.uint8))
114
+ cv2.imwrite(str(save_path / 'points.exr'), cv2.cvtColor(points, cv2.COLOR_RGB2BGR), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
115
+ if normal is not None:
116
+ cv2.imwrite(str(save_path / 'normal.png'), cv2.cvtColor(colorize_normal(normal), cv2.COLOR_RGB2BGR))
117
+ fov_x, fov_y = utils3d.np.intrinsics_to_fov(intrinsics)
118
+ with open(save_path / 'fov.json', 'w') as f:
119
+ json.dump({
120
+ 'fov_x': round(float(np.rad2deg(fov_x)), 2),
121
+ 'fov_y': round(float(np.rad2deg(fov_y)), 2),
122
+ }, f)
123
+
124
+ # Export mesh & visulization
125
+ if save_glb_ or save_ply_ or show:
126
+ mask_cleaned = mask & ~utils3d.np.depth_map_edge(depth, rtol=threshold)
127
+ if normal is None:
128
+ faces, vertices, vertex_colors, vertex_uvs = utils3d.np.build_mesh_from_map(
129
+ points,
130
+ image.astype(np.float32) / 255,
131
+ utils3d.np.uv_map(height, width),
132
+ mask=mask_cleaned,
133
+ tri=True
134
+ )
135
+ vertex_normals = None
136
+ else:
137
+ faces, vertices, vertex_colors, vertex_uvs, vertex_normals = utils3d.np.build_mesh_from_map(
138
+ points,
139
+ image.astype(np.float32) / 255,
140
+ utils3d.np.uv_map(height, width),
141
+ normal,
142
+ mask=mask_cleaned,
143
+ tri=True
144
+ )
145
+ # When exporting the model, follow the OpenGL coordinate conventions:
146
+ # - world coordinate system: x right, y up, z backward.
147
+ # - texture coordinate system: (0, 0) for left-bottom, (1, 1) for right-top.
148
+ vertices, vertex_uvs = vertices * [1, -1, -1], vertex_uvs * [1, -1] + [0, 1]
149
+ if normal is not None:
150
+ vertex_normals = vertex_normals * [1, -1, -1]
151
+
152
+ if save_glb_:
153
+ save_glb(save_path / 'mesh.glb', vertices, faces, vertex_uvs, image, vertex_normals)
154
+
155
+ if save_ply_:
156
+ save_ply(save_path / 'pointcloud.ply', vertices, np.zeros((0, 3), dtype=np.int32), vertex_colors, vertex_normals)
157
+
158
+ if show:
159
+ import trimesh
160
+ trimesh.Trimesh(
161
+ vertices=vertices,
162
+ vertex_colors=vertex_colors,
163
+ vertex_normals=vertex_normals,
164
+ faces=faces,
165
+ process=False
166
+ ).show()
167
+
168
+
169
+ if __name__ == '__main__':
170
+ main()
moge/scripts/infer_baseline.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_IO_ENABLE_OPENEXR'] = '1'
3
+ from pathlib import Path
4
+ import sys
5
+ if (_package_root := str(Path(__file__).absolute().parents[2])) not in sys.path:
6
+ sys.path.insert(0, _package_root)
7
+ import json
8
+ from pathlib import Path
9
+ from typing import *
10
+ import itertools
11
+ import warnings
12
+
13
+ import click
14
+
15
+
16
+ @click.command(context_settings={"allow_extra_args": True, "ignore_unknown_options": True}, help='Inference script for wrapped baselines methods')
17
+ @click.option('--baseline', 'baseline_code_path', required=True, type=click.Path(), help='Path to the baseline model python code.')
18
+ @click.option('--input', '-i', 'input_path', type=str, required=True, help='Input image or folder')
19
+ @click.option('--output', '-o', 'output_path', type=str, default='./output', help='Output folder')
20
+ @click.option('--size', 'image_size', type=int, default=None, help='Resize input image')
21
+ @click.option('--skip', is_flag=True, help='Skip existing output')
22
+ @click.option('--maps', 'save_maps_', is_flag=True, help='Save output point / depth maps')
23
+ @click.option('--ply', 'save_ply_', is_flag=True, help='Save mesh in PLY format')
24
+ @click.option('--glb', 'save_glb_', is_flag=True, help='Save mesh in GLB format')
25
+ @click.option('--threshold', type=float, default=0.03, help='Depth edge detection threshold for saving mesh')
26
+ @click.pass_context
27
+ def main(ctx: click.Context, baseline_code_path: str, input_path: str, output_path: str, image_size: int, skip: bool, save_maps_, save_ply_: bool, save_glb_: bool, threshold: float):
28
+ # Lazy import
29
+ import cv2
30
+ import numpy as np
31
+ from tqdm import tqdm
32
+ import torch
33
+ import utils3d
34
+
35
+ from moge.utils.io import save_ply, save_glb
36
+ from moge.utils.geometry_numpy import intrinsics_to_fov_numpy
37
+ from moge.utils.vis import colorize_depth, colorize_depth_affine, colorize_disparity
38
+ from moge.utils.tools import key_average, flatten_nested_dict, timeit, import_file_as_module
39
+ from moge.test.baseline import MGEBaselineInterface
40
+
41
+ # Load the baseline model
42
+ module = import_file_as_module(baseline_code_path, Path(baseline_code_path).stem)
43
+ baseline_cls: Type[MGEBaselineInterface] = getattr(module, 'Baseline')
44
+ baseline : MGEBaselineInterface = baseline_cls.load.main(ctx.args, standalone_mode=False)
45
+
46
+ # Input images list
47
+ include_suffices = ['jpg', 'png', 'jpeg', 'JPG', 'PNG', 'JPEG']
48
+ if Path(input_path).is_dir():
49
+ image_paths = sorted(itertools.chain(*(Path(input_path).rglob(f'*.{suffix}') for suffix in include_suffices)))
50
+ else:
51
+ image_paths = [Path(input_path)]
52
+
53
+ if not any([save_maps_, save_glb_, save_ply_]):
54
+ warnings.warn('No output format specified. Defaults to saving maps only. Please use "--maps", "--glb", or "--ply" to specify the output.')
55
+ save_maps_ = True
56
+
57
+ for image_path in (pbar := tqdm(image_paths, desc='Inference', disable=len(image_paths) <= 1)):
58
+ # Load one image at a time
59
+ image_np = cv2.cvtColor(cv2.imread(str(image_path)), cv2.COLOR_BGR2RGB)
60
+ height, width = image_np.shape[:2]
61
+ if image_size is not None and max(image_np.shape[:2]) > image_size:
62
+ height, width = min(image_size, int(image_size * height / width)), min(image_size, int(image_size * width / height))
63
+ image_np = cv2.resize(image_np, (width, height), cv2.INTER_AREA)
64
+ image = torch.from_numpy(image_np.astype(np.float32) / 255.0).permute(2, 0, 1).to(baseline.device)
65
+
66
+ # Inference
67
+ torch.cuda.synchronize()
68
+ with torch.inference_mode(), (timer := timeit('Inference', verbose=False, average=True)):
69
+ output = baseline.infer(image)
70
+ torch.cuda.synchronize()
71
+
72
+ inference_time = timer.average_time
73
+ pbar.set_postfix({'average inference time': f'{inference_time:.3f}s'})
74
+
75
+ # Save the output
76
+ save_path = Path(output_path, image_path.relative_to(input_path).parent, image_path.stem)
77
+ if skip and save_path.exists():
78
+ continue
79
+ save_path.mkdir(parents=True, exist_ok=True)
80
+
81
+ if save_maps_:
82
+ cv2.imwrite(str(save_path / 'image.jpg'), cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR))
83
+
84
+ if 'mask' in output:
85
+ mask = output['mask'].cpu().numpy()
86
+ cv2.imwrite(str(save_path /'mask.png'), (mask * 255).astype(np.uint8))
87
+
88
+ for k in ['points_metric', 'points_scale_invariant', 'points_affine_invariant']:
89
+ if k in output:
90
+ points = output[k].cpu().numpy()
91
+ cv2.imwrite(str(save_path / f'{k}.exr'), cv2.cvtColor(points, cv2.COLOR_RGB2BGR), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
92
+
93
+ for k in ['depth_metric', 'depth_scale_invariant', 'depth_affine_invariant', 'disparity_affine_invariant']:
94
+ if k in output:
95
+ depth = output[k].cpu().numpy()
96
+ cv2.imwrite(str(save_path / f'{k}.exr'), depth, [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
97
+ if k in ['depth_metric', 'depth_scale_invariant']:
98
+ depth_vis = colorize_depth(depth)
99
+ elif k == 'depth_affine_invariant':
100
+ depth_vis = colorize_depth_affine(depth)
101
+ elif k == 'disparity_affine_invariant':
102
+ depth_vis = colorize_disparity(depth)
103
+ cv2.imwrite(str(save_path / f'{k}_vis.png'), cv2.cvtColor(depth_vis, cv2.COLOR_RGB2BGR))
104
+
105
+ if 'intrinsics' in output:
106
+ intrinsics = output['intrinsics'].cpu().numpy()
107
+ fov_x, fov_y = intrinsics_to_fov_numpy(intrinsics)
108
+ with open(save_path / 'fov.json', 'w') as f:
109
+ json.dump({
110
+ 'fov_x': float(np.rad2deg(fov_x)),
111
+ 'fov_y': float(np.rad2deg(fov_y)),
112
+ 'intrinsics': intrinsics.tolist()
113
+ }, f, indent=4)
114
+
115
+ # Export mesh & visulization
116
+ if save_ply_ or save_glb_:
117
+ assert any(k in output for k in ['points_metric', 'points_scale_invariant', 'points_affine_invariant']), 'No point map found in output'
118
+ points = next(output[k] for k in ['points_metric', 'points_scale_invariant', 'points_affine_invariant'] if k in output).cpu().numpy()
119
+ mask = output['mask'] if 'mask' in output else np.ones_like(points[..., 0], dtype=bool)
120
+ normals, normals_mask = utils3d.np.point_map_to_normal_map(points, mask=mask)
121
+ faces, vertices, vertex_colors, vertex_uvs = utils3d.np.build_mesh_from_map(
122
+ points,
123
+ image_np.astype(np.float32) / 255,
124
+ utils3d.np.uv_map(height, width),
125
+ mask=mask & ~(utils3d.np.depth_map_edge(depth, rtol=threshold, mask=mask) & utils3d.np.normal_map_edge(normals, tol=5, mask=normals_mask)),
126
+ tri=True
127
+ )
128
+ # When exporting the model, follow the OpenGL coordinate conventions:
129
+ # - world coordinate system: x right, y up, z backward.
130
+ # - texture coordinate system: (0, 0) for left-bottom, (1, 1) for right-top.
131
+ vertices, vertex_uvs = vertices * [1, -1, -1], vertex_uvs * [1, -1] + [0, 1]
132
+
133
+ if save_glb_:
134
+ save_glb(save_path / 'mesh.glb', vertices, faces, vertex_uvs, image_np)
135
+
136
+ if save_ply_:
137
+ save_ply(save_path / 'mesh.ply', vertices, faces, vertex_colors)
138
+
139
+ if __name__ == '__main__':
140
+ main()
moge/scripts/infer_panorama.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_IO_ENABLE_OPENEXR'] = '1'
3
+ from pathlib import Path
4
+ import sys
5
+ if (_package_root := str(Path(__file__).absolute().parents[2])) not in sys.path:
6
+ sys.path.insert(0, _package_root)
7
+ from typing import *
8
+ import itertools
9
+ import json
10
+ import warnings
11
+
12
+ import click
13
+
14
+
15
+ @click.command(help='Inference script for panorama images')
16
+ @click.option('--input', '-i', 'input_path', type=click.Path(exists=True), required=True, help='Input image or folder path. "jpg" and "png" are supported.')
17
+ @click.option('--output', '-o', 'output_path', type=click.Path(), default='./output', help='Output folder path')
18
+ @click.option('--pretrained', 'pretrained_model_name_or_path', type=str, default='Ruicheng/moge-vitl', help='Pretrained model name or path. Defaults to "Ruicheng/moge-vitl"')
19
+ @click.option('--device', 'device_name', type=str, default='cuda', help='Device name (e.g. "cuda", "cuda:0", "cpu"). Defaults to "cuda"')
20
+ @click.option('--resize', 'resize_to', type=int, default=None, help='Resize the image(s) & output maps to a specific size. Defaults to None (no resizing).')
21
+ @click.option('--resolution_level', type=int, default=9, help='An integer [0-9] for the resolution level of inference. The higher, the better but slower. Defaults to 9. Note that it is irrelevant to the output resolution.')
22
+ @click.option('--threshold', type=float, default=0.03, help='Threshold for removing edges. Defaults to 0.03. Smaller value removes more edges. "inf" means no thresholding.')
23
+ @click.option('--batch_size', type=int, default=4, help='Batch size for inference. Defaults to 4.')
24
+ @click.option('--splitted', 'save_splitted', is_flag=True, help='Whether to save the splitted images. Defaults to False.')
25
+ @click.option('--maps', 'save_maps_', is_flag=True, help='Whether to save the output maps and fov(image, depth, mask, points, fov).')
26
+ @click.option('--glb', 'save_glb_', is_flag=True, help='Whether to save the output as a.glb file. The color will be saved as a texture.')
27
+ @click.option('--ply', 'save_ply_', is_flag=True, help='Whether to save the output as a.ply file. The color will be saved as vertex colors.')
28
+ @click.option('--show', 'show', is_flag=True, help='Whether show the output in a window. Note that this requires pyglet<2 installed as required by trimesh.')
29
+ def main(
30
+ input_path: str,
31
+ output_path: str,
32
+ pretrained_model_name_or_path: str,
33
+ device_name: str,
34
+ resize_to: int,
35
+ resolution_level: int,
36
+ threshold: float,
37
+ batch_size: int,
38
+ save_splitted: bool,
39
+ save_maps_: bool,
40
+ save_glb_: bool,
41
+ save_ply_: bool,
42
+ show: bool,
43
+ ):
44
+ # Lazy import
45
+ import cv2
46
+ import numpy as np
47
+ from numpy import ndarray
48
+ import torch
49
+ from PIL import Image
50
+ from tqdm import tqdm, trange
51
+ import trimesh
52
+ import trimesh.visual
53
+ from scipy.sparse import csr_array, hstack, vstack
54
+ from scipy.ndimage import convolve
55
+ from scipy.sparse.linalg import lsmr
56
+
57
+ import utils3d
58
+ from moge.model.v1 import MoGeModel
59
+ from moge.utils.io import save_glb, save_ply
60
+ from moge.utils.vis import colorize_depth
61
+ from moge.utils.panorama import spherical_uv_to_directions, get_panorama_cameras, split_panorama_image, merge_panorama_depth
62
+
63
+
64
+ device = torch.device(device_name)
65
+
66
+ include_suffices = ['jpg', 'png', 'jpeg', 'JPG', 'PNG', 'JPEG']
67
+ if Path(input_path).is_dir():
68
+ image_paths = sorted(itertools.chain(*(Path(input_path).rglob(f'*.{suffix}') for suffix in include_suffices)))
69
+ else:
70
+ image_paths = [Path(input_path)]
71
+
72
+ if len(image_paths) == 0:
73
+ raise FileNotFoundError(f'No image files found in {input_path}')
74
+
75
+ # Write outputs
76
+ if not any([save_maps_, save_glb_, save_ply_]):
77
+ warnings.warn('No output format specified. Defaults to saving all. Please use "--maps", "--glb", or "--ply" to specify the output.')
78
+ save_maps_ = save_glb_ = save_ply_ = True
79
+
80
+ model = MoGeModel.from_pretrained(pretrained_model_name_or_path).to(device).eval()
81
+
82
+ for image_path in (pbar := tqdm(image_paths, desc='Total images', disable=len(image_paths) <= 1)):
83
+ image = cv2.cvtColor(cv2.imread(str(image_path)), cv2.COLOR_BGR2RGB)
84
+ height, width = image.shape[:2]
85
+ if resize_to is not None:
86
+ height, width = min(resize_to, int(resize_to * height / width)), min(resize_to, int(resize_to * width / height))
87
+ image = cv2.resize(image, (width, height), cv2.INTER_AREA)
88
+
89
+ splitted_extrinsics, splitted_intriniscs = get_panorama_cameras()
90
+ splitted_resolution = 512
91
+ splitted_images = split_panorama_image(image, splitted_extrinsics, splitted_intriniscs, splitted_resolution)
92
+
93
+ # Infer each view
94
+ print('Inferring...') if pbar.disable else pbar.set_postfix_str(f'Inferring')
95
+
96
+ splitted_distance_maps, splitted_masks = [], []
97
+ for i in trange(0, len(splitted_images), batch_size, desc='Inferring splitted views', disable=len(splitted_images) <= batch_size, leave=False):
98
+ image_tensor = torch.tensor(np.stack(splitted_images[i:i + batch_size]) / 255, dtype=torch.float32, device=device).permute(0, 3, 1, 2)
99
+ fov_x, fov_y = np.rad2deg(utils3d.np.intrinsics_to_fov(np.array(splitted_intriniscs[i:i + batch_size])))
100
+ fov_x = torch.tensor(fov_x, dtype=torch.float32, device=device)
101
+ output = model.infer(image_tensor, fov_x=fov_x, apply_mask=False)
102
+ distance_map, mask = output['points'].norm(dim=-1).cpu().numpy(), output['mask'].cpu().numpy()
103
+ splitted_distance_maps.extend(list(distance_map))
104
+ splitted_masks.extend(list(mask))
105
+
106
+ # Save splitted
107
+ if save_splitted:
108
+ splitted_save_path = Path(output_path, image_path.stem, 'splitted')
109
+ splitted_save_path.mkdir(exist_ok=True, parents=True)
110
+ for i in range(len(splitted_images)):
111
+ cv2.imwrite(str(splitted_save_path / f'{i:02d}.jpg'), cv2.cvtColor(splitted_images[i], cv2.COLOR_RGB2BGR))
112
+ cv2.imwrite(str(splitted_save_path / f'{i:02d}_distance_vis.png'), cv2.cvtColor(colorize_depth(splitted_distance_maps[i], splitted_masks[i]), cv2.COLOR_RGB2BGR))
113
+
114
+ # Merge
115
+ print('Merging...') if pbar.disable else pbar.set_postfix_str(f'Merging')
116
+
117
+ merging_width, merging_height = min(1920, width), min(960, height)
118
+ panorama_depth, panorama_mask = merge_panorama_depth(merging_width, merging_height, splitted_distance_maps, splitted_masks, splitted_extrinsics, splitted_intriniscs)
119
+ panorama_depth = panorama_depth.astype(np.float32)
120
+ panorama_depth = cv2.resize(panorama_depth, (width, height), cv2.INTER_LINEAR)
121
+ panorama_mask = cv2.resize(panorama_mask.astype(np.uint8), (width, height), cv2.INTER_NEAREST) > 0
122
+ points = panorama_depth[:, :, None] * spherical_uv_to_directions(utils3d.np.uv_map(height, width))
123
+
124
+ # Write outputs
125
+ print('Writing outputs...') if pbar.disable else pbar.set_postfix_str(f'Inferring')
126
+ save_path = Path(output_path, image_path.relative_to(input_path).parent, image_path.stem)
127
+ save_path.mkdir(exist_ok=True, parents=True)
128
+ if save_maps_:
129
+ cv2.imwrite(str(save_path / 'image.jpg'), cv2.cvtColor(image, cv2.COLOR_RGB2BGR))
130
+ cv2.imwrite(str(save_path / 'depth_vis.png'), cv2.cvtColor(colorize_depth(panorama_depth, mask=panorama_mask), cv2.COLOR_RGB2BGR))
131
+ cv2.imwrite(str(save_path / 'depth.exr'), panorama_depth, [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
132
+ cv2.imwrite(str(save_path / 'points.exr'), points, [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
133
+ cv2.imwrite(str(save_path /'mask.png'), (panorama_mask * 255).astype(np.uint8))
134
+
135
+ # Export mesh & visulization
136
+ if save_glb_ or save_ply_ or show:
137
+ normals, normals_mask = utils3d.np.point_map_to_normal_map(points, panorama_mask)
138
+ faces, vertices, vertex_colors, vertex_uvs = utils3d.np.build_mesh_from_map(
139
+ points,
140
+ image.astype(np.float32) / 255,
141
+ utils3d.np.uv_map(height, width),
142
+ mask=panorama_mask & ~(utils3d.np.depth_map_edge(panorama_depth, rtol=threshold) & utils3d.np.normal_map_edge(normals, tol=5, mask=normals_mask)),
143
+ tri=True
144
+ )
145
+
146
+ if save_glb_:
147
+ save_glb(save_path / 'mesh.glb', vertices, faces, vertex_uvs, image)
148
+
149
+ if save_ply_:
150
+ save_ply(save_path / 'mesh.ply', vertices, faces, vertex_colors)
151
+
152
+ if show:
153
+ trimesh.Trimesh(
154
+ vertices=vertices,
155
+ vertex_colors=vertex_colors,
156
+ faces=faces,
157
+ process=False
158
+ ).show()
159
+
160
+
161
+ if __name__ == '__main__':
162
+ main()
moge/scripts/train.py ADDED
@@ -0,0 +1,461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ import sys
4
+ if (_package_root := str(Path(__file__).absolute().parents[2])) not in sys.path:
5
+ sys.path.insert(0, _package_root)
6
+ import json
7
+ import time
8
+ import random
9
+ from typing import *
10
+ import itertools
11
+ from contextlib import nullcontext
12
+ from concurrent.futures import ThreadPoolExecutor
13
+ import io
14
+
15
+ import numpy as np
16
+ import cv2
17
+ from PIL import Image
18
+ import torch
19
+ import torch.nn as nn
20
+ import torch.nn.functional as F
21
+ import torch.version
22
+ import accelerate
23
+ from accelerate import Accelerator, DistributedDataParallelKwargs
24
+ from accelerate.utils import set_seed
25
+ import utils3d
26
+ import click
27
+ from tqdm import tqdm, trange
28
+ import mlflow
29
+ torch.backends.cudnn.benchmark = False # Varying input size, make sure cudnn benchmark is disabled
30
+
31
+ from moge.train.dataloader import TrainDataLoaderPipeline
32
+ from moge.train.losses import (
33
+ affine_invariant_global_loss,
34
+ affine_invariant_local_loss,
35
+ edge_loss,
36
+ normal_loss,
37
+ mask_l2_loss,
38
+ mask_bce_loss,
39
+ metric_scale_loss,
40
+ normal_map_loss,
41
+ monitoring,
42
+ )
43
+ from moge.train.utils import build_optimizer, build_lr_scheduler
44
+ from moge.utils.geometry_torch import intrinsics_to_fov
45
+ from moge.utils.vis import colorize_depth, colorize_normal
46
+ from moge.utils.tools import key_average, recursive_replace, CallbackOnException, flatten_nested_dict
47
+ from moge.test.metrics import compute_metrics
48
+
49
+
50
+ @click.command()
51
+ @click.option('--config', 'config_path', type=str, default='configs/debug.json')
52
+ @click.option('--workspace', type=str, default='workspace/debug', help='Path to the workspace')
53
+ @click.option('--checkpoint', 'checkpoint_path', type=str, default=None, help='Path to the checkpoint to load. "latest" to load latest checkpoint in workspace, integer to load by step number')
54
+ @click.option('--batch_size_forward', type=int, default=8, help='Batch size for each forward pass on each device')
55
+ @click.option('--gradient_accumulation_steps', type=int, default=1, help='Number of steps to accumulate gradients')
56
+ @click.option('--enable_gradient_checkpointing', type=bool, default=True, help='Use gradient checkpointing in backbone')
57
+ @click.option('--enable_mixed_precision', type=bool, default=False, help='Use mixed precision training. Backbone is converted to FP16')
58
+ @click.option('--enable_ema', type=bool, default=True, help='Maintain an exponential moving average of the model weights')
59
+ @click.option('--num_iterations', type=int, default=1000000, help='Number of iterations to train the model')
60
+ @click.option('--save_every', type=int, default=10000, help='Save checkpoint every n iterations')
61
+ @click.option('--log_every', type=int, default=1000, help='Log metrics every n iterations')
62
+ @click.option('--vis_every', type=int, default=0, help='Visualize every n iterations')
63
+ @click.option('--num_vis_images', type=int, default=32, help='Number of images to visualize, must be a multiple of divided batch size')
64
+ @click.option('--enable_mlflow', type=bool, default=True, help='Log metrics to MLFlow')
65
+ @click.option('--seed', type=int, default=0, help='Random seed')
66
+ def main(
67
+ config_path: str,
68
+ workspace: str,
69
+ checkpoint_path: str,
70
+ batch_size_forward: int,
71
+ gradient_accumulation_steps: int,
72
+ enable_gradient_checkpointing: bool,
73
+ enable_mixed_precision: bool,
74
+ enable_ema: bool,
75
+ num_iterations: int,
76
+ save_every: int,
77
+ log_every: int,
78
+ vis_every: int,
79
+ num_vis_images: int,
80
+ enable_mlflow: bool,
81
+ seed: Optional[int],
82
+ ):
83
+ # Load config
84
+ with open(config_path, 'r') as f:
85
+ config = json.load(f)
86
+
87
+ accelerator = Accelerator(
88
+ gradient_accumulation_steps=gradient_accumulation_steps,
89
+ mixed_precision='fp16' if enable_mixed_precision else None,
90
+ kwargs_handlers=[
91
+ DistributedDataParallelKwargs(find_unused_parameters=True)
92
+ ]
93
+ )
94
+ device = accelerator.device
95
+ batch_size_total = batch_size_forward * gradient_accumulation_steps * accelerator.num_processes
96
+
97
+ # Log config
98
+ if accelerator.is_main_process:
99
+ if enable_mlflow:
100
+ try:
101
+ mlflow.log_params({
102
+ **click.get_current_context().params,
103
+ 'batch_size_total': batch_size_total,
104
+ })
105
+ except:
106
+ print('Failed to log config to MLFlow')
107
+ Path(workspace).mkdir(parents=True, exist_ok=True)
108
+ with Path(workspace).joinpath('config.json').open('w') as f:
109
+ json.dump(config, f, indent=4)
110
+
111
+ # Set seed
112
+ if seed is not None:
113
+ set_seed(seed, device_specific=True)
114
+
115
+ # Initialize model
116
+ print('Initialize model')
117
+ with accelerator.local_main_process_first():
118
+ from moge.model import import_model_class_by_version
119
+ MoGeModel = import_model_class_by_version(config['model_version'])
120
+ model = MoGeModel(**config['model'])
121
+ count_total_parameters = sum(p.numel() for p in model.parameters())
122
+ print(f'Total parameters: {count_total_parameters}')
123
+
124
+ # Set up EMA model
125
+ if enable_ema and accelerator.is_main_process:
126
+ ema_avg_fn = lambda averaged_model_parameter, model_parameter, num_averaged: 0.999 * averaged_model_parameter + 0.001 * model_parameter
127
+ ema_model = torch.optim.swa_utils.AveragedModel(model, device=accelerator.device, avg_fn=ema_avg_fn)
128
+
129
+ # Set gradient checkpointing
130
+ if enable_gradient_checkpointing:
131
+ model.enable_gradient_checkpointing()
132
+ import warnings
133
+ warnings.filterwarnings("ignore", category=FutureWarning, module="torch.utils.checkpoint")
134
+
135
+ # Initalize optimizer & lr scheduler
136
+ optimizer = build_optimizer(model, config['optimizer'])
137
+ lr_scheduler = build_lr_scheduler(optimizer, config['lr_scheduler'])
138
+
139
+ count_grouped_parameters = [sum(p.numel() for p in param_group['params'] if p.requires_grad) for param_group in optimizer.param_groups]
140
+ for i, count in enumerate(count_grouped_parameters):
141
+ print(f'- Group {i}: {count} parameters')
142
+
143
+ # Attempt to load checkpoint
144
+ checkpoint: Dict[str, Any]
145
+ with accelerator.local_main_process_first():
146
+ if checkpoint_path is None:
147
+ # - No checkpoint
148
+ checkpoint = None
149
+ elif checkpoint_path.endswith('.pt'):
150
+ # - Load specific checkpoint file
151
+ print(f'Load checkpoint: {checkpoint_path}')
152
+ checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=True)
153
+ elif checkpoint_path == "latest":
154
+ # - Load latest checkpoint
155
+ checkpoint_path = Path(workspace, 'checkpoint', 'latest.pt')
156
+ if checkpoint_path.exists():
157
+ print(f'Load checkpoint: {checkpoint_path}')
158
+ checkpoint = torch.load(checkpoint_path, map_location='cpu', weights_only=True)
159
+ i_step = checkpoint['step']
160
+ if 'model' not in checkpoint and (checkpoint_model_path := Path(workspace, 'checkpoint', f'{i_step:08d}.pt')).exists():
161
+ print(f'Load model checkpoint: {checkpoint_model_path}')
162
+ checkpoint['model'] = torch.load(checkpoint_model_path, map_location='cpu', weights_only=True)['model']
163
+ if 'optimizer' not in checkpoint and (checkpoint_optimizer_path := Path(workspace, 'checkpoint', f'{i_step:08d}_optimizer.pt')).exists():
164
+ print(f'Load optimizer checkpoint: {checkpoint_optimizer_path}')
165
+ checkpoint.update(torch.load(checkpoint_optimizer_path, map_location='cpu', weights_only=True))
166
+ if enable_ema and accelerator.is_main_process:
167
+ if 'ema_model' not in checkpoint and (checkpoint_ema_model_path := Path(workspace, 'checkpoint', f'{i_step:08d}_ema.pt')).exists():
168
+ print(f'Load EMA model checkpoint: {checkpoint_ema_model_path}')
169
+ checkpoint['ema_model'] = torch.load(checkpoint_ema_model_path, map_location='cpu', weights_only=True)['model']
170
+ else:
171
+ print(f'No latest checkpoint found. Start from scratch.')
172
+ checkpoint = None
173
+ else:
174
+ # - Load by step number
175
+ i_step = int(checkpoint_path)
176
+ checkpoint = {'step': i_step}
177
+ if (checkpoint_model_path := Path(workspace, 'checkpoint', f'{i_step:08d}.pt')).exists():
178
+ print(f'Load model checkpoint: {checkpoint_model_path}')
179
+ checkpoint['model'] = torch.load(checkpoint_model_path, map_location='cpu', weights_only=True)['model']
180
+ if (checkpoint_optimizer_path := Path(workspace, 'checkpoint', f'{i_step:08d}_optimizer.pt')).exists():
181
+ print(f'Load optimizer checkpoint: {checkpoint_optimizer_path}')
182
+ checkpoint.update(torch.load(checkpoint_optimizer_path, map_location='cpu', weights_only=True))
183
+ if enable_ema and accelerator.is_main_process:
184
+ if (checkpoint_ema_model_path := Path(workspace, 'checkpoint', f'{i_step:08d}_ema.pt')).exists():
185
+ print(f'Load EMA model checkpoint: {checkpoint_ema_model_path}')
186
+ checkpoint['ema_model'] = torch.load(checkpoint_ema_model_path, map_location='cpu', weights_only=True)['model']
187
+
188
+ if checkpoint is None:
189
+ # Initialize model weights
190
+ print('Initialize model weights')
191
+ with accelerator.local_main_process_first():
192
+ model.init_weights()
193
+ initial_step = 0
194
+ else:
195
+ model.load_state_dict(checkpoint['model'], strict=False)
196
+ if 'step' in checkpoint:
197
+ initial_step = checkpoint['step'] + 1
198
+ else:
199
+ initial_step = 0
200
+ if 'optimizer' in checkpoint:
201
+ optimizer.load_state_dict(checkpoint['optimizer'])
202
+ if enable_ema and accelerator.is_main_process and 'ema_model' in checkpoint:
203
+ ema_model.module.load_state_dict(checkpoint['ema_model'], strict=False)
204
+ if 'lr_scheduler' in checkpoint:
205
+ lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
206
+
207
+ del checkpoint
208
+
209
+ model, optimizer = accelerator.prepare(model, optimizer)
210
+ if torch.version.hip and isinstance(model, torch.nn.parallel.DistributedDataParallel):
211
+ # Hacking potential gradient synchronization issue in ROCm backend
212
+ from moge.model.utils import sync_ddp_hook
213
+ model.register_comm_hook(None, sync_ddp_hook)
214
+
215
+ # Initialize training data pipeline
216
+ with accelerator.local_main_process_first():
217
+ train_data_pipe = TrainDataLoaderPipeline(config['data'], batch_size_forward)
218
+
219
+ def _write_bytes_retry_loop(save_path: Path, data: bytes):
220
+ while True:
221
+ try:
222
+ save_path.write_bytes(data)
223
+ break
224
+ except Exception as e:
225
+ print('Error while saving checkpoint, retrying in 1 minute: ', e)
226
+ time.sleep(60)
227
+
228
+ # Ready to train
229
+ records = []
230
+ model.train()
231
+ with (
232
+ train_data_pipe,
233
+ tqdm(initial=initial_step, total=num_iterations, desc='Training', disable=not accelerator.is_main_process) as pbar,
234
+ ThreadPoolExecutor(max_workers=1) as save_checkpoint_executor,
235
+ ):
236
+ # Get some batches for visualization
237
+ if accelerator.is_main_process:
238
+ batches_for_vis: List[Dict[str, torch.Tensor]] = []
239
+ num_vis_images = num_vis_images // batch_size_forward * batch_size_forward
240
+ for _ in range(num_vis_images // batch_size_forward):
241
+ batch = train_data_pipe.get()
242
+ batches_for_vis.append(batch)
243
+
244
+ # Visualize GT
245
+ if vis_every > 0 and accelerator.is_main_process and initial_step == 0:
246
+ save_dir = Path(workspace).joinpath('vis/gt')
247
+ for i_batch, batch in enumerate(tqdm(batches_for_vis, desc='Visualize GT', leave=False)):
248
+ image, gt_depth, gt_normal, gt_intrinsics, info = batch['image'], batch['depth'], batch['normal'], batch['intrinsics'], batch['info']
249
+ gt_points = utils3d.pt.depth_map_to_point_map(gt_depth, intrinsics=gt_intrinsics)
250
+ for i_instance in range(batch['image'].shape[0]):
251
+ idx = i_batch * batch_size_forward + i_instance
252
+ image_i = (image[i_instance].numpy().transpose(1, 2, 0) * 255).astype(np.uint8)
253
+ gt_depth_i = gt_depth[i_instance].numpy()
254
+ gt_points_i = gt_points[i_instance].numpy()
255
+ gt_normal_i = gt_normal[i_instance].numpy()
256
+ save_dir.joinpath(f'{idx:04d}').mkdir(parents=True, exist_ok=True)
257
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/image.jpg')), cv2.cvtColor(image_i, cv2.COLOR_RGB2BGR))
258
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/points.exr')), cv2.cvtColor(gt_points_i, cv2.COLOR_RGB2BGR), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
259
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/depth_vis.png')), cv2.cvtColor(colorize_depth(gt_depth_i), cv2.COLOR_RGB2BGR))
260
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/normal.png')), cv2.cvtColor(colorize_normal(gt_normal_i), cv2.COLOR_RGB2BGR))
261
+ with save_dir.joinpath(f'{idx:04d}/info.json').open('w') as f:
262
+ json.dump(info[i_instance], f)
263
+
264
+ # Reset seed to avoid training on the same data when resuming training
265
+ if seed is not None:
266
+ set_seed(seed + initial_step, device_specific=True)
267
+
268
+ # Training loop
269
+ for i_step in range(initial_step, num_iterations):
270
+
271
+ i_accumulate, weight_accumulate = 0, 0
272
+ while i_accumulate < gradient_accumulation_steps:
273
+ # Load batch
274
+ batch = train_data_pipe.get()
275
+ image, gt_depth, gt_normal, gt_mask_fin, gt_mask_inf, gt_intrinsics, label_type, is_metric = batch['image'], batch['depth'], batch['normal'], batch['depth_mask_fin'], batch['depth_mask_inf'], batch['intrinsics'], batch['label_type'], batch['is_metric']
276
+ image, gt_depth, gt_normal, gt_mask_fin, gt_mask_inf, gt_intrinsics = image.to(device), gt_depth.to(device), gt_normal.to(device), gt_mask_fin.to(device), gt_mask_inf.to(device), gt_intrinsics.to(device)
277
+ current_batch_size = image.shape[0]
278
+ if all(label == 'invalid' for label in label_type):
279
+ continue # NOTE: Skip all-invalid batches to avoid messing up the optimizer.
280
+
281
+ gt_points = utils3d.pt.depth_map_to_point_map(gt_depth, intrinsics=gt_intrinsics)
282
+ gt_focal = 1 / (1 / gt_intrinsics[..., 0, 0] ** 2 + 1 / gt_intrinsics[..., 1, 1] ** 2) ** 0.5
283
+
284
+ with accelerator.accumulate(model):
285
+ # Forward
286
+ if i_step <= config.get('low_resolution_training_steps', 0):
287
+ num_tokens = config['model']['num_tokens_range'][0]
288
+ else:
289
+ num_tokens = accelerate.utils.broadcast_object_list([random.randint(*config['model']['num_tokens_range'])])[0]
290
+ with torch.autocast(device_type=accelerator.device.type, dtype=torch.float16, enabled=enable_mixed_precision):
291
+ output = model(image, num_tokens=num_tokens)
292
+ pred_points, pred_mask, pred_normal, pred_metric_scale = (output.get(k, None) for k in ['points', 'mask', 'normal', 'metric_scale'])
293
+
294
+ # Compute loss (per instance)
295
+ loss_list, weight_list = [], []
296
+ for i in range(current_batch_size):
297
+ gt_metric_scale = None
298
+ loss_dict, weight_dict, misc_dict = {}, {}, {}
299
+ misc_dict['monitoring'] = monitoring(pred_points[i])
300
+ for k, v in config['loss'][label_type[i]].items():
301
+ weight_dict[k] = v['weight']
302
+ if v['function'] == 'affine_invariant_global_loss':
303
+ loss_dict[k], misc_dict[k], gt_metric_scale = affine_invariant_global_loss(pred_points[i], gt_points[i], **v['params'])
304
+ elif v['function'] == 'affine_invariant_local_loss':
305
+ loss_dict[k], misc_dict[k] = affine_invariant_local_loss(pred_points[i], gt_points[i], gt_focal[i], gt_metric_scale, **v['params'])
306
+ elif v['function'] == 'normal_loss':
307
+ loss_dict[k], misc_dict[k] = normal_loss(pred_points[i], gt_points[i])
308
+ elif v['function'] == 'edge_loss':
309
+ loss_dict[k], misc_dict[k] = edge_loss(pred_points[i], gt_points[i])
310
+ elif v['function'] == 'normal_map_loss':
311
+ loss_dict[k], misc_dict[k] = normal_map_loss(pred_normal[i], gt_normal[i])
312
+ elif v['function'] == 'mask_bce_loss':
313
+ loss_dict[k], misc_dict[k] = mask_bce_loss(pred_mask[i], gt_mask_fin[i], gt_mask_inf[i])
314
+ elif v['function'] == 'mask_l2_loss':
315
+ loss_dict[k], misc_dict[k] = mask_l2_loss(pred_mask[i], gt_mask_fin[i], gt_mask_inf[i])
316
+ elif v['function'] == 'metric_scale_loss':
317
+ if is_metric[i] and pred_metric_scale is not None:
318
+ loss_dict[k], misc_dict[k] = metric_scale_loss(pred_metric_scale[i], gt_metric_scale)
319
+ else:
320
+ raise ValueError(f'Undefined loss function: {v["function"]}')
321
+ weight_dict = {'.'.join(k): v for k, v in flatten_nested_dict(weight_dict).items()}
322
+ loss_dict = {'.'.join(k): v for k, v in flatten_nested_dict(loss_dict).items()}
323
+ loss_ = sum([weight_dict[k] * loss_dict[k] for k in loss_dict], start=torch.tensor(0.0, device=device))
324
+ loss_list.append(loss_)
325
+
326
+ if torch.isnan(loss_).item():
327
+ pbar.write(f'NaN loss in process {accelerator.process_index}')
328
+ pbar.write(str(loss_dict))
329
+
330
+ misc_dict = {'.'.join(k): v for k, v in flatten_nested_dict(misc_dict).items()}
331
+ records.append({
332
+ **{k: v.item() for k, v in loss_dict.items()},
333
+ **misc_dict,
334
+ })
335
+
336
+ loss = sum(loss_list) / len(loss_list)
337
+
338
+ # Backward & update
339
+ accelerator.backward(loss)
340
+ if accelerator.sync_gradients:
341
+ if not enable_mixed_precision and any(torch.isnan(p.grad).any() for p in model.parameters() if p.grad is not None):
342
+ if accelerator.is_main_process:
343
+ pbar.write(f'NaN gradients, skip update')
344
+ optimizer.zero_grad()
345
+ continue
346
+ accelerator.clip_grad_norm_(model.parameters(), 1.0)
347
+
348
+ optimizer.step()
349
+ optimizer.zero_grad()
350
+
351
+ i_accumulate += 1
352
+
353
+ lr_scheduler.step()
354
+
355
+ # EMA update
356
+ if enable_ema and accelerator.is_main_process and accelerator.sync_gradients:
357
+ ema_model.update_parameters(model)
358
+
359
+ # Log metrics
360
+ if i_step == initial_step or i_step % log_every == 0:
361
+ records = [key_average(records)]
362
+ records = accelerator.gather_for_metrics(records, use_gather_object=True)
363
+ if accelerator.is_main_process:
364
+ records = key_average(records)
365
+ if enable_mlflow:
366
+ try:
367
+ mlflow.log_metrics(records, step=i_step)
368
+ except Exception as e:
369
+ print(f'Error while logging metrics to mlflow: {e}')
370
+ records = []
371
+
372
+ # Save model weight checkpoint
373
+ if accelerator.is_main_process and (i_step % save_every == 0):
374
+ # NOTE: Writing checkpoint is done in a separate thread to avoid blocking the main process
375
+ pbar.write(f'Save checkpoint: {i_step:08d}')
376
+ Path(workspace, 'checkpoint').mkdir(parents=True, exist_ok=True)
377
+
378
+ # Model checkpoint
379
+ with io.BytesIO() as f:
380
+ torch.save({
381
+ 'model_config': config['model'],
382
+ 'model': accelerator.unwrap_model(model).state_dict(),
383
+ }, f)
384
+ checkpoint_bytes = f.getvalue()
385
+ save_checkpoint_executor.submit(
386
+ _write_bytes_retry_loop, Path(workspace, 'checkpoint', f'{i_step:08d}.pt'), checkpoint_bytes
387
+ )
388
+
389
+ # Optimizer checkpoint
390
+ with io.BytesIO() as f:
391
+ torch.save({
392
+ 'model_config': config['model'],
393
+ 'step': i_step,
394
+ 'optimizer': optimizer.state_dict(),
395
+ 'lr_scheduler': lr_scheduler.state_dict(),
396
+ }, f)
397
+ checkpoint_bytes = f.getvalue()
398
+ save_checkpoint_executor.submit(
399
+ _write_bytes_retry_loop, Path(workspace, 'checkpoint', f'{i_step:08d}_optimizer.pt'), checkpoint_bytes
400
+ )
401
+
402
+ # EMA model checkpoint
403
+ if enable_ema:
404
+ with io.BytesIO() as f:
405
+ torch.save({
406
+ 'model_config': config['model'],
407
+ 'model': ema_model.module.state_dict(),
408
+ }, f)
409
+ checkpoint_bytes = f.getvalue()
410
+ save_checkpoint_executor.submit(
411
+ _write_bytes_retry_loop, Path(workspace, 'checkpoint', f'{i_step:08d}_ema.pt'), checkpoint_bytes
412
+ )
413
+
414
+ # Latest checkpoint
415
+ with io.BytesIO() as f:
416
+ torch.save({
417
+ 'model_config': config['model'],
418
+ 'step': i_step,
419
+ }, f)
420
+ checkpoint_bytes = f.getvalue()
421
+ save_checkpoint_executor.submit(
422
+ _write_bytes_retry_loop, Path(workspace, 'checkpoint', 'latest.pt'), checkpoint_bytes
423
+ )
424
+
425
+ # Visualize
426
+ if vis_every > 0 and accelerator.is_main_process and (i_step == initial_step or i_step % vis_every == 0):
427
+ unwrapped_model = accelerator.unwrap_model(model)
428
+ save_dir = Path(workspace).joinpath(f'vis/step_{i_step:08d}')
429
+ save_dir.mkdir(parents=True, exist_ok=True)
430
+ with torch.inference_mode():
431
+ for i_batch, batch in enumerate(tqdm(batches_for_vis, desc=f'Visualize: {i_step:08d}', leave=False)):
432
+ image, gt_depth, gt_intrinsics = batch['image'], batch['depth'], batch['intrinsics']
433
+ image, gt_depth, gt_intrinsics = image.to(device), gt_depth.to(device), gt_intrinsics.to(device)
434
+
435
+ output = unwrapped_model.infer(image)
436
+ pred_points = output['points'].cpu().numpy() if 'points' in output else None
437
+ pred_depth = output['depth'].cpu().numpy() if 'depth' in output else None
438
+ pred_mask = output['mask'].cpu().numpy() if 'mask' in output else None
439
+ pred_normal = output['normal'].cpu().numpy() if 'normal' in output else None
440
+ pred_uncertainty = output['uncertainty'].cpu().numpy() if 'uncertainty' in output else None
441
+ image = (image.cpu().numpy().transpose(0, 2, 3, 1) * 255).astype(np.uint8)
442
+
443
+ for i_instance in range(image.shape[0]):
444
+ idx = i_batch * batch_size_forward + i_instance
445
+ save_dir.joinpath(f'{idx:04d}').mkdir(parents=True, exist_ok=True)
446
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/image.jpg')), cv2.cvtColor(image[i_instance], cv2.COLOR_RGB2BGR))
447
+ if pred_points is not None:
448
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/points.exr')), cv2.cvtColor(pred_points[i_instance], cv2.COLOR_RGB2BGR), [cv2.IMWRITE_EXR_TYPE, cv2.IMWRITE_EXR_TYPE_FLOAT])
449
+ if pred_mask is not None:
450
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/mask.png')), pred_mask[i_instance] * 255)
451
+ if pred_depth is not None:
452
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/depth_vis.png')), cv2.cvtColor(colorize_depth(pred_depth[i_instance], pred_mask[i_instance] if pred_mask is not None else None), cv2.COLOR_RGB2BGR))
453
+ if pred_normal is not None:
454
+ cv2.imwrite(str(save_dir.joinpath(f'{idx:04d}/normal_vis.png')), cv2.cvtColor(colorize_normal(pred_normal[i_instance], pred_mask[i_instance] if pred_mask is not None else None), cv2.COLOR_RGB2BGR))
455
+
456
+ pbar.set_postfix({'loss': loss.item()}, refresh=False)
457
+ pbar.update(1)
458
+
459
+
460
+ if __name__ == '__main__':
461
+ main()
moge/scripts/vis_data.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ['OPENCV_IO_ENABLE_OPENEXR'] = '1'
3
+ import sys
4
+ from pathlib import Path
5
+ if (_package_root := str(Path(__file__).absolute().parents[2])) not in sys.path:
6
+ sys.path.insert(0, _package_root)
7
+
8
+ import click
9
+
10
+
11
+ @click.command()
12
+ @click.argument('folder_or_path', type=click.Path(exists=True))
13
+ @click.option('--output', '-o', 'output_folder', type=click.Path(), help='Path to output folder')
14
+ @click.option('--max_depth', '-m', type=float, default=float('inf'), help='max depth')
15
+ @click.option('--fov', type=float, default=None, help='field of view in degrees')
16
+ @click.option('--show', 'show', is_flag=True, help='show point cloud')
17
+ @click.option('--depth', 'depth_filename', type=str, default='depth.png', help='depth image file name')
18
+ @click.option('--ply', 'save_ply', is_flag=True, help='save point cloud as PLY file')
19
+ @click.option('--depth_vis', 'save_depth_vis', is_flag=True, help='save depth image')
20
+ @click.option('--inf', 'inf_mask', is_flag=True, help='use infinity mask')
21
+ @click.option('--version', 'version', type=str, default='v3', help='version of rgbd data')
22
+ def main(
23
+ folder_or_path: str,
24
+ output_folder: str,
25
+ max_depth: float,
26
+ fov: float,
27
+ depth_filename: str,
28
+ show: bool,
29
+ save_ply: bool,
30
+ save_depth_vis: bool,
31
+ inf_mask: bool,
32
+ version: str
33
+ ):
34
+ # Lazy import
35
+ import cv2
36
+ import numpy as np
37
+ import utils3d
38
+ from tqdm import tqdm
39
+ import trimesh
40
+
41
+ from moge.utils.io import read_image, read_depth, read_json
42
+ from moge.utils.vis import colorize_depth, colorize_normal
43
+
44
+ filepaths = sorted(p.parent for p in Path(folder_or_path).rglob('meta.json'))
45
+
46
+ for filepath in tqdm(filepaths):
47
+ image = read_image(Path(filepath, 'image.jpg'))
48
+ depth = read_depth(Path(filepath, depth_filename))
49
+ meta = read_json(Path(filepath,'meta.json'))
50
+ depth_mask = np.isfinite(depth)
51
+ depth_mask_inf = (depth == np.inf)
52
+ intrinsics = np.array(meta['intrinsics'])
53
+
54
+ extrinsics = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], dtype=float) # OpenGL's identity camera
55
+ verts = utils3d.np.unproject_cv(utils3d.np.uv_map(image.shape[:2]), depth, extrinsics=extrinsics, intrinsics=intrinsics)
56
+
57
+ depth_mask_ply = depth_mask & (depth < depth[depth_mask].min() * max_depth)
58
+ point_cloud = trimesh.PointCloud(verts[depth_mask_ply], image[depth_mask_ply] / 255)
59
+
60
+ if show:
61
+ point_cloud.show()
62
+
63
+ if output_folder is None:
64
+ output_path = filepath
65
+ else:
66
+ output_path = Path(output_folder, filepath.name)
67
+ output_path.mkdir(exist_ok=True, parents=True)
68
+
69
+ if inf_mask:
70
+ depth = np.where(depth_mask_inf, np.inf, depth)
71
+ depth_mask = depth_mask | depth_mask_inf
72
+
73
+ if save_depth_vis:
74
+ p = output_path.joinpath('depth_vis.png')
75
+ cv2.imwrite(str(p), cv2.cvtColor(colorize_depth(depth, depth_mask), cv2.COLOR_RGB2BGR))
76
+ print(f"{p}")
77
+
78
+ if save_ply:
79
+ p = output_path.joinpath('pointcloud.ply')
80
+ point_cloud.export(p)
81
+ print(f"{p}")
82
+
83
+ if __name__ == '__main__':
84
+ main()
moge/test/__init__.py ADDED
File without changes
moge/test/baseline.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+
3
+ import click
4
+ import torch
5
+
6
+
7
+ class MGEBaselineInterface:
8
+ """
9
+ Abstract class for model wrapper to uniformize the interface of loading and inference across different models.
10
+ """
11
+ device: torch.device
12
+
13
+ @click.command()
14
+ @staticmethod
15
+ def load(*args, **kwargs) -> "MGEBaselineInterface":
16
+ """
17
+ Customized static method to create an instance of the model wrapper from command line arguments. Decorated by `click.command()`
18
+ """
19
+ raise NotImplementedError(f"{type(self).__name__} has not implemented the load method.")
20
+
21
+ def infer(self, image: torch.FloatTensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
22
+ """
23
+ ### Parameters
24
+ `image`: [B, 3, H, W] or [3, H, W], RGB values in range [0, 1]
25
+ `intrinsics`: [B, 3, 3] or [3, 3], camera intrinsics. Optional.
26
+
27
+ ### Returns
28
+ A dictionary containing:
29
+ - `points_*`. point map output in OpenCV identity camera space.
30
+ Supported suffixes: `metric`, `scale_invariant`, `affine_invariant`.
31
+ - `depth_*`. depth map output
32
+ Supported suffixes: `metric` (in meters), `scale_invariant`, `affine_invariant`.
33
+ - `disparity_affine_invariant`. affine disparity map output
34
+ """
35
+ raise NotImplementedError(f"{type(self).__name__} has not implemented the infer method.")
36
+
37
+ def infer_for_evaluation(self, image: torch.FloatTensor, intrinsics: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]:
38
+ """
39
+ If the model has a special evaluation mode, override this method to provide the evaluation mode inference.
40
+
41
+ By default, this method simply calls `infer()`.
42
+ """
43
+ return self.infer(image, intrinsics)
moge/test/dataloader.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from typing import *
3
+ from pathlib import Path
4
+ import math
5
+
6
+ import numpy as np
7
+ import torch
8
+ from PIL import Image
9
+ import cv2
10
+ import utils3d
11
+ import pipeline
12
+
13
+ from ..utils.geometry_numpy import focal_to_fov_numpy, norm3d
14
+ from ..utils.io import *
15
+ from ..utils.tools import timeit
16
+
17
+
18
+ class EvalDataLoaderPipeline:
19
+
20
+ def __init__(
21
+ self,
22
+ path: str,
23
+ width: int,
24
+ height: int,
25
+ split: int = '.index.txt',
26
+ drop_max_depth: float = 1000.,
27
+ num_load_workers: int = 4,
28
+ num_process_workers: int = 8,
29
+ include_segmentation: bool = False,
30
+ include_normal: bool = False,
31
+ depth_to_normal: bool = False,
32
+ max_segments: int = 100,
33
+ min_seg_area: int = 1000,
34
+ depth_unit: str = None,
35
+ has_sharp_boundary = False,
36
+ subset: int = None,
37
+ ):
38
+ filenames = Path(path).joinpath(split).read_text(encoding='utf-8').splitlines()
39
+ filenames = filenames[::subset]
40
+ self.width = width
41
+ self.height = height
42
+ self.drop_max_depth = drop_max_depth
43
+ self.path = Path(path)
44
+ self.filenames = filenames
45
+ self.include_segmentation = include_segmentation
46
+ self.include_normal = include_normal
47
+ self.max_segments = max_segments
48
+ self.min_seg_area = min_seg_area
49
+ self.depth_to_normal = depth_to_normal
50
+ self.depth_unit = depth_unit
51
+ self.has_sharp_boundary = has_sharp_boundary
52
+
53
+ self.rng = np.random.default_rng(seed=0)
54
+
55
+ self.pipeline = pipeline.Sequential([
56
+ self._generator,
57
+ pipeline.Parallel([self._load_instance] * num_load_workers),
58
+ pipeline.Parallel([self._process_instance] * num_process_workers),
59
+ pipeline.Buffer(4)
60
+ ])
61
+
62
+ def __len__(self):
63
+ return math.ceil(len(self.filenames))
64
+
65
+ def _generator(self):
66
+ for idx in range(len(self)):
67
+ yield idx
68
+
69
+ def _load_instance(self, idx):
70
+ if idx >= len(self.filenames):
71
+ return None
72
+
73
+ path = self.path.joinpath(self.filenames[idx])
74
+
75
+ instance = {
76
+ 'filename': self.filenames[idx],
77
+ 'width': self.width,
78
+ 'height': self.height,
79
+ }
80
+ instance['image'] = read_image(Path(path, 'image.jpg'))
81
+
82
+ depth = read_depth(Path(path, 'depth.png')) # ignore depth unit from depth file, use config instead
83
+ instance.update({
84
+ 'depth': np.nan_to_num(depth, nan=1, posinf=1, neginf=1),
85
+ 'depth_mask': np.isfinite(depth),
86
+ 'depth_mask_inf': np.isinf(depth),
87
+ })
88
+
89
+ if self.include_segmentation:
90
+ segmentation_mask, segmentation_labels = read_segmentation(Path(path,'segmentation.png'))
91
+ instance.update({
92
+ 'segmentation_mask': segmentation_mask,
93
+ 'segmentation_labels': segmentation_labels,
94
+ })
95
+
96
+ meta = read_json(Path(path, 'meta.json'))
97
+ instance['intrinsics'] = np.array(meta['intrinsics'], dtype=np.float32)
98
+
99
+ return instance
100
+
101
+ def _process_instance(self, instance: dict):
102
+ if instance is None:
103
+ return None
104
+
105
+ image, depth, depth_mask, intrinsics = instance['image'], instance['depth'], instance['depth_mask'], instance['intrinsics']
106
+ segmentation_mask, segmentation_labels = instance.get('segmentation_mask', None), instance.get('segmentation_labels', None)
107
+
108
+ raw_height, raw_width = image.shape[:2]
109
+ raw_horizontal, raw_vertical = abs(1.0 / intrinsics[0, 0]), abs(1.0 / intrinsics[1, 1])
110
+ raw_pixel_w, raw_pixel_h = raw_horizontal / raw_width, raw_vertical / raw_height
111
+ tgt_width, tgt_height = instance['width'], instance['height']
112
+ tgt_aspect = tgt_width / tgt_height
113
+
114
+ # set expected target view field
115
+ tgt_horizontal = min(raw_horizontal, raw_vertical * tgt_aspect)
116
+ tgt_vertical = tgt_horizontal / tgt_aspect
117
+
118
+ # set target view direction
119
+ cu, cv = 0.5, 0.5
120
+ direction = utils3d.np.unproject_cv(np.array([[cu, cv]], dtype=np.float32), np.array([1.0], dtype=np.float32), intrinsics=intrinsics)[0]
121
+ R = utils3d.np.rotation_matrix_from_vectors(direction, np.array([0, 0, 1], dtype=np.float32))
122
+
123
+ # restrict target view field within the raw view
124
+ corners = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=np.float32)
125
+ corners = np.concatenate([corners, np.ones((4, 1), dtype=np.float32)], axis=1) @ (np.linalg.inv(intrinsics).T @ R.T) # corners in viewport's camera plane
126
+ corners = corners[:, :2] / corners[:, 2:3]
127
+
128
+ warp_horizontal, warp_vertical = abs(1.0 / intrinsics[0, 0]), abs(1.0 / intrinsics[1, 1])
129
+ for i in range(4):
130
+ intersection, _ = utils3d.np.ray_intersection(
131
+ np.array([0., 0.]), np.array([[tgt_aspect, 1.0], [tgt_aspect, -1.0]]),
132
+ corners[i - 1], corners[i] - corners[i - 1],
133
+ )
134
+ warp_horizontal, warp_vertical = min(warp_horizontal, 2 * np.abs(intersection[:, 0]).min()), min(warp_vertical, 2 * np.abs(intersection[:, 1]).min())
135
+ tgt_horizontal, tgt_vertical = min(tgt_horizontal, warp_horizontal), min(tgt_vertical, warp_vertical)
136
+
137
+ # get target view intrinsics
138
+ fx, fy = 1.0 / tgt_horizontal, 1.0 / tgt_vertical
139
+ tgt_intrinsics = utils3d.np.intrinsics_from_focal_center(fx, fy, 0.5, 0.5).astype(np.float32)
140
+
141
+ # do homogeneous transformation with the rotation and intrinsics
142
+ # 4.1 The image and depth is resized first to approximately the same pixel size as the target image with PIL's antialiasing resampling
143
+ tgt_pixel_w, tgt_pixel_h = tgt_horizontal / tgt_width, tgt_vertical / tgt_height # (should be exactly the same for x and y axes)
144
+ rescaled_w, rescaled_h = int(raw_width * raw_pixel_w / tgt_pixel_w), int(raw_height * raw_pixel_h / tgt_pixel_h)
145
+ image = np.array(Image.fromarray(image).resize((rescaled_w, rescaled_h), Image.Resampling.LANCZOS))
146
+
147
+ depth, depth_mask = utils3d.np.masked_nearest_resize(depth, mask=depth_mask, size=(rescaled_h, rescaled_w))
148
+ distance = norm3d(utils3d.np.depth_map_to_point_map(depth, intrinsics=intrinsics))
149
+ segmentation_mask = cv2.resize(segmentation_mask, (rescaled_w, rescaled_h), interpolation=cv2.INTER_NEAREST) if segmentation_mask is not None else None
150
+
151
+ # 4.2 calculate homography warping
152
+ transform = intrinsics @ np.linalg.inv(R) @ np.linalg.inv(tgt_intrinsics)
153
+ uv_tgt = utils3d.np.uv_map(tgt_height, tgt_width)
154
+ pts = np.concatenate([uv_tgt, np.ones((tgt_height, tgt_width, 1), dtype=np.float32)], axis=-1) @ transform.T
155
+ uv_remap = pts[:, :, :2] / (pts[:, :, 2:3] + 1e-12)
156
+ pixel_remap = utils3d.np.uv_to_pixel(uv_remap, (rescaled_h, rescaled_w)).astype(np.float32)
157
+
158
+ tgt_image = cv2.remap(image, pixel_remap[:, :, 0], pixel_remap[:, :, 1], cv2.INTER_LINEAR)
159
+ tgt_distance = cv2.remap(distance, pixel_remap[:, :, 0], pixel_remap[:, :, 1], cv2.INTER_NEAREST)
160
+ tgt_ray_length = utils3d.np.unproject_cv(uv_tgt, np.ones_like(uv_tgt[:, :, 0]), intrinsics=tgt_intrinsics)
161
+ tgt_ray_length = (tgt_ray_length[:, :, 0] ** 2 + tgt_ray_length[:, :, 1] ** 2 + tgt_ray_length[:, :, 2] ** 2) ** 0.5
162
+ tgt_depth = tgt_distance / (tgt_ray_length + 1e-12)
163
+ tgt_depth_mask = cv2.remap(depth_mask.astype(np.uint8), pixel_remap[:, :, 0], pixel_remap[:, :, 1], cv2.INTER_NEAREST) > 0
164
+ tgt_segmentation_mask = cv2.remap(segmentation_mask, pixel_remap[:, :, 0], pixel_remap[:, :, 1], cv2.INTER_NEAREST) if segmentation_mask is not None else None
165
+
166
+ # drop depth greater than drop_max_depth
167
+ max_depth = np.nanquantile(np.where(tgt_depth_mask, tgt_depth, np.nan), 0.01) * self.drop_max_depth
168
+ tgt_depth_mask &= tgt_depth <= max_depth
169
+ tgt_depth = np.nan_to_num(tgt_depth, nan=0.0)
170
+
171
+ if self.depth_unit is not None:
172
+ tgt_depth *= self.depth_unit
173
+
174
+ if not np.any(tgt_depth_mask):
175
+ # always make sure that mask is not empty, otherwise the loss calculation will crash
176
+ tgt_depth_mask = np.ones_like(tgt_depth_mask)
177
+ tgt_depth = np.ones_like(tgt_depth)
178
+ instance['label_type'] = 'invalid'
179
+
180
+ tgt_pts = utils3d.np.unproject_cv(uv_tgt, tgt_depth, intrinsics=tgt_intrinsics)
181
+
182
+ # Process segmentation labels
183
+ if self.include_segmentation and segmentation_mask is not None:
184
+ for k in ['undefined', 'unannotated', 'background', 'sky']:
185
+ if k in segmentation_labels:
186
+ del segmentation_labels[k]
187
+ seg_id2count = dict(zip(*np.unique(tgt_segmentation_mask, return_counts=True)))
188
+ sorted_labels = sorted(segmentation_labels.keys(), key=lambda x: seg_id2count.get(segmentation_labels[x], 0), reverse=True)
189
+ segmentation_labels = {k: segmentation_labels[k] for k in sorted_labels[:self.max_segments] if seg_id2count.get(segmentation_labels[k], 0) >= self.min_seg_area}
190
+
191
+ instance.update({
192
+ 'image': torch.from_numpy(tgt_image.astype(np.float32) / 255.0).permute(2, 0, 1),
193
+ 'depth': torch.from_numpy(tgt_depth).float(),
194
+ 'depth_mask': torch.from_numpy(tgt_depth_mask).bool(),
195
+ 'intrinsics': torch.from_numpy(tgt_intrinsics).float(),
196
+ 'points': torch.from_numpy(tgt_pts).float(),
197
+ 'segmentation_mask': torch.from_numpy(tgt_segmentation_mask).long() if tgt_segmentation_mask is not None else None,
198
+ 'segmentation_labels': segmentation_labels,
199
+ 'is_metric': self.depth_unit is not None,
200
+ 'has_sharp_boundary': self.has_sharp_boundary,
201
+ })
202
+
203
+ instance = {k: v for k, v in instance.items() if v is not None}
204
+
205
+ return instance
206
+
207
+ def start(self):
208
+ self.pipeline.start()
209
+
210
+ def stop(self):
211
+ self.pipeline.stop()
212
+
213
+ def __enter__(self):
214
+ self.start()
215
+ return self
216
+
217
+ def __exit__(self, exc_type, exc_value, traceback):
218
+ self.stop()
219
+
220
+ def get(self):
221
+ return self.pipeline.get()
moge/test/metrics.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ from numbers import Number
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import numpy as np
7
+ import utils3d
8
+
9
+ from ..utils.geometry_torch import (
10
+ weighted_mean,
11
+ intrinsics_to_fov
12
+ )
13
+ from ..utils.alignment import (
14
+ align_points_scale_z_shift,
15
+ align_points_scale_xyz_shift,
16
+ align_points_xyz_shift,
17
+ align_affine_lstsq,
18
+ align_depth_scale,
19
+ align_depth_affine,
20
+ align_points_scale,
21
+ )
22
+ from ..utils.tools import key_average, timeit
23
+
24
+
25
+ def rel_depth(pred: torch.Tensor, gt: torch.Tensor, eps: float = 1e-6):
26
+ rel = (torch.abs(pred - gt) / (gt + eps)).mean()
27
+ return rel.item()
28
+
29
+
30
+ def delta1_depth(pred: torch.Tensor, gt: torch.Tensor, eps: float = 1e-6):
31
+ delta1 = (torch.maximum(gt / pred, pred / gt) < 1.25).float().mean()
32
+ return delta1.item()
33
+
34
+
35
+ def rel_point(pred: torch.Tensor, gt: torch.Tensor, eps: float = 1e-6):
36
+ dist_gt = torch.norm(gt, dim=-1)
37
+ dist_err = torch.norm(pred - gt, dim=-1)
38
+ rel = (dist_err / (dist_gt + eps)).mean()
39
+ return rel.item()
40
+
41
+
42
+ def delta1_point(pred: torch.Tensor, gt: torch.Tensor, eps: float = 1e-6):
43
+ dist_pred = torch.norm(pred, dim=-1)
44
+ dist_gt = torch.norm(gt, dim=-1)
45
+ dist_err = torch.norm(pred - gt, dim=-1)
46
+
47
+ delta1 = (dist_err < 0.25 * torch.minimum(dist_gt, dist_pred)).float().mean()
48
+ return delta1.item()
49
+
50
+
51
+ def rel_point_local(pred: torch.Tensor, gt: torch.Tensor, diameter: torch.Tensor):
52
+ dist_err = torch.norm(pred - gt, dim=-1)
53
+ rel = (dist_err / diameter).mean()
54
+ return rel.item()
55
+
56
+
57
+ def delta1_point_local(pred: torch.Tensor, gt: torch.Tensor, diameter: torch.Tensor):
58
+ dist_err = torch.norm(pred - gt, dim=-1)
59
+ delta1 = (dist_err < 0.25 * diameter).float().mean()
60
+ return delta1.item()
61
+
62
+
63
+ def boundary_f1(pred: torch.Tensor, gt: torch.Tensor, mask: torch.Tensor, radius: int = 1):
64
+ neighbor_x, neight_y = torch.meshgrid(
65
+ torch.linspace(-radius, radius, 2 * radius + 1, device=pred.device),
66
+ torch.linspace(-radius, radius, 2 * radius + 1, device=pred.device),
67
+ indexing='xy'
68
+ )
69
+ neighbor_mask = (neighbor_x ** 2 + neight_y ** 2) <= radius ** 2 + 1e-5
70
+
71
+ pred_window = utils3d.pt.sliding_window(pred, window_size=2 * radius + 1, stride=1, dim=(-2, -1)) # [H, W, 2*R+1, 2*R+1]
72
+ gt_window = utils3d.pt.sliding_window(gt, window_size=2 * radius + 1, stride=1, dim=(-2, -1)) # [H, W, 2*R+1, 2*R+1]
73
+ mask_window = neighbor_mask & utils3d.pt.sliding_window(mask, window_size=2 * radius + 1, stride=1, dim=(-2, -1)) # [H, W, 2*R+1, 2*R+1]
74
+
75
+ pred_rel = pred_window / pred[radius:-radius, radius:-radius, None, None]
76
+ gt_rel = gt_window / gt[radius:-radius, radius:-radius, None, None]
77
+ valid = mask[radius:-radius, radius:-radius, None, None] & mask_window
78
+
79
+ f1_list = []
80
+ w_list = t_list = torch.linspace(0.05, 0.25, 10).tolist()
81
+
82
+ for t in t_list:
83
+ pred_label = pred_rel > 1 + t
84
+ gt_label = gt_rel > 1 + t
85
+ TP = (pred_label & gt_label & valid).float().sum()
86
+ precision = TP / (gt_label & valid).float().sum().clamp_min(1e-12)
87
+ recall = TP / (pred_label & valid).float().sum().clamp_min(1e-12)
88
+ f1 = 2 * precision * recall / (precision + recall).clamp_min(1e-12)
89
+ f1_list.append(f1.item())
90
+
91
+ f1_avg = sum(w * f1 for w, f1 in zip(w_list, f1_list)) / sum(w_list)
92
+ return f1_avg
93
+
94
+
95
+ def compute_metrics(
96
+ pred: Dict[str, torch.Tensor],
97
+ gt: Dict[str, torch.Tensor],
98
+ vis: bool = False
99
+ ) -> Tuple[Dict[str, Dict[str, Number]], Dict[str, torch.Tensor]]:
100
+ """
101
+ A unified function to compute metrics for different types of predictions and ground truths.
102
+
103
+ #### Supported keys in pred:
104
+ - `disparity_affine_invariant`: disparity map predicted by a depth estimator with scale and shift invariant.
105
+ - `depth_scale_invariant`: depth map predicted by a depth estimator with scale invariant.
106
+ - `depth_affine_invariant`: depth map predicted by a depth estimator with scale and shift invariant.
107
+ - `depth_metric`: depth map predicted by a depth estimator with no scale or shift.
108
+ - `points_scale_invariant`: point map predicted by a point estimator with scale invariant.
109
+ - `points_affine_invariant`: point map predicted by a point estimator with scale and xyz shift invariant.
110
+ - `points_metric`: point map predicted by a point estimator with no scale or shift.
111
+ - `intrinsics`: normalized camera intrinsics matrix.
112
+
113
+ #### Required keys in gt:
114
+ - `depth`: depth map ground truth (in metric units if `depth_metric` is used)
115
+ - `points`: point map ground truth in camera coordinates.
116
+ - `mask`: mask indicating valid pixels in the ground truth.
117
+ - `intrinsics`: normalized ground-truth camera intrinsics matrix.
118
+ - `is_metric`: whether the depth is in metric units.
119
+ """
120
+ metrics = {}
121
+ misc = {}
122
+
123
+ mask = gt['depth_mask']
124
+ gt_depth = gt['depth']
125
+ gt_points = gt['points']
126
+
127
+ height, width = mask.shape[-2:]
128
+ lr_mask, lr_index = utils3d.pt.masked_nearest_resize(mask=mask, size=(64, 64), return_index=True)
129
+
130
+ only_depth = not any('point' in k for k in pred)
131
+ pred_depth_aligned, pred_points_aligned = None, None
132
+
133
+ # Metric depth
134
+ if 'depth_metric' in pred and gt['is_metric']:
135
+ pred_depth, gt_depth = pred['depth_metric'], gt['depth']
136
+ metrics['depth_metric'] = {
137
+ 'rel': rel_depth(pred_depth[mask], gt_depth[mask]),
138
+ 'delta1': delta1_depth(pred_depth[mask], gt_depth[mask])
139
+ }
140
+
141
+ if pred_depth_aligned is None:
142
+ pred_depth_aligned = pred_depth
143
+
144
+ # Scale-invariant depth
145
+ if 'depth_scale_invariant' in pred:
146
+ pred_depth_scale_invariant = pred['depth_scale_invariant']
147
+ elif 'depth_metric' in pred:
148
+ pred_depth_scale_invariant = pred['depth_metric']
149
+ else:
150
+ pred_depth_scale_invariant = None
151
+
152
+ if pred_depth_scale_invariant is not None:
153
+ pred_depth = pred_depth_scale_invariant
154
+
155
+ pred_depth_lr_masked, gt_depth_lr_masked = pred_depth[lr_index][lr_mask], gt_depth[lr_index][lr_mask]
156
+ scale = align_depth_scale(pred_depth_lr_masked, gt_depth_lr_masked, 1 / gt_depth_lr_masked)
157
+ pred_depth = pred_depth * scale
158
+
159
+ metrics['depth_scale_invariant'] = {
160
+ 'rel': rel_depth(pred_depth[mask], gt_depth[mask]),
161
+ 'delta1': delta1_depth(pred_depth[mask], gt_depth[mask])
162
+ }
163
+
164
+ if pred_depth_aligned is None:
165
+ pred_depth_aligned = pred_depth
166
+
167
+ # Affine-invariant depth
168
+ if 'depth_affine_invariant' in pred:
169
+ pred_depth_affine_invariant = pred['depth_affine_invariant']
170
+ elif 'depth_scale_invariant' in pred:
171
+ pred_depth_affine_invariant = pred['depth_scale_invariant']
172
+ elif 'depth_metric' in pred:
173
+ pred_depth_affine_invariant = pred['depth_metric']
174
+ else:
175
+ pred_depth_affine_invariant = None
176
+
177
+ if pred_depth_affine_invariant is not None:
178
+ pred_depth = pred_depth_affine_invariant
179
+
180
+ pred_depth_lr_masked, gt_depth_lr_masked = pred_depth[lr_index][lr_mask], gt_depth[lr_index][lr_mask]
181
+ scale, shift = align_depth_affine(pred_depth_lr_masked, gt_depth_lr_masked, 1 / gt_depth_lr_masked)
182
+ pred_depth = pred_depth * scale + shift
183
+
184
+ metrics['depth_affine_invariant'] = {
185
+ 'rel': rel_depth(pred_depth[mask], gt_depth[mask]),
186
+ 'delta1': delta1_depth(pred_depth[mask], gt_depth[mask])
187
+ }
188
+
189
+ if pred_depth_aligned is None:
190
+ pred_depth_aligned = pred_depth
191
+
192
+ # Affine-invariant disparity
193
+ if 'disparity_affine_invariant' in pred:
194
+ pred_disparity_affine_invariant = pred['disparity_affine_invariant']
195
+ elif 'depth_scale_invariant' in pred:
196
+ pred_disparity_affine_invariant = 1 / pred['depth_scale_invariant']
197
+ elif 'depth_metric' in pred:
198
+ pred_disparity_affine_invariant = 1 / pred['depth_metric']
199
+ else:
200
+ pred_disparity_affine_invariant = None
201
+
202
+ if pred_disparity_affine_invariant is not None:
203
+ pred_disp = pred_disparity_affine_invariant
204
+
205
+ scale, shift = align_affine_lstsq(pred_disp[mask], 1 / gt_depth[mask])
206
+ pred_disp = pred_disp * scale + shift
207
+
208
+ # NOTE: The alignment is done on the disparity map could introduce extreme outliers at disparities close to 0.
209
+ # Therefore we clamp the disparities by minimum ground truth disparity.
210
+ pred_depth = 1 / pred_disp.clamp_min(1 / gt_depth[mask].max().item())
211
+
212
+ metrics['disparity_affine_invariant'] = {
213
+ 'rel': rel_depth(pred_depth[mask], gt_depth[mask]),
214
+ 'delta1': delta1_depth(pred_depth[mask], gt_depth[mask])
215
+ }
216
+
217
+ if pred_depth_aligned is None:
218
+ pred_depth_aligned = 1 / pred_disp.clamp_min(1e-6)
219
+
220
+ # Metric points
221
+ if 'points_metric' in pred and gt['is_metric']:
222
+ pred_points = pred['points_metric']
223
+
224
+ pred_points_lr_masked, gt_points_lr_masked = pred_points[lr_index][lr_mask], gt_points[lr_index][lr_mask]
225
+ shift = align_points_xyz_shift(pred_points_lr_masked, gt_points_lr_masked, 1 / gt_points_lr_masked.norm(dim=-1))
226
+ pred_points = pred_points + shift
227
+
228
+ metrics['points_metric'] = {
229
+ 'rel': rel_point(pred_points[mask], gt_points[mask]),
230
+ 'delta1': delta1_point(pred_points[mask], gt_points[mask])
231
+ }
232
+
233
+ if pred_points_aligned is None:
234
+ pred_points_aligned = pred['points_metric']
235
+
236
+ # Scale-invariant points (in camera space)
237
+ if 'points_scale_invariant' in pred:
238
+ pred_points_scale_invariant = pred['points_scale_invariant']
239
+ elif 'points_metric' in pred:
240
+ pred_points_scale_invariant = pred['points_metric']
241
+ else:
242
+ pred_points_scale_invariant = None
243
+
244
+ if pred_points_scale_invariant is not None:
245
+ pred_points = pred_points_scale_invariant
246
+
247
+ pred_points_lr_masked, gt_points_lr_masked = pred_points_scale_invariant[lr_index][lr_mask], gt_points[lr_index][lr_mask]
248
+ scale = align_points_scale(pred_points_lr_masked, gt_points_lr_masked, 1 / gt_points_lr_masked.norm(dim=-1))
249
+ pred_points = pred_points * scale
250
+
251
+ metrics['points_scale_invariant'] = {
252
+ 'rel': rel_point(pred_points[mask], gt_points[mask]),
253
+ 'delta1': delta1_point(pred_points[mask], gt_points[mask])
254
+ }
255
+
256
+ if vis and pred_points_aligned is None:
257
+ pred_points_aligned = pred['points_scale_invariant'] * scale
258
+
259
+ # Affine-invariant points
260
+ if 'points_affine_invariant' in pred:
261
+ pred_points_affine_invariant = pred['points_affine_invariant']
262
+ elif 'points_scale_invariant' in pred:
263
+ pred_points_affine_invariant = pred['points_scale_invariant']
264
+ elif 'points_metric' in pred:
265
+ pred_points_affine_invariant = pred['points_metric']
266
+ else:
267
+ pred_points_affine_invariant = None
268
+
269
+ if pred_points_affine_invariant is not None:
270
+ pred_points = pred_points_affine_invariant
271
+
272
+ pred_points_lr_masked, gt_points_lr_masked = pred_points[lr_index][lr_mask], gt_points[lr_index][lr_mask]
273
+ scale, shift = align_points_scale_xyz_shift(pred_points_lr_masked, gt_points_lr_masked, 1 / gt_points_lr_masked.norm(dim=-1))
274
+ pred_points = pred_points * scale + shift
275
+
276
+ metrics['points_affine_invariant'] = {
277
+ 'rel': rel_point(pred_points[mask], gt_points[mask]),
278
+ 'delta1': delta1_point(pred_points[mask], gt_points[mask])
279
+ }
280
+
281
+ if vis and pred_points_aligned is None:
282
+ pred_points_aligned = pred['points_affine_invariant'] * scale + shift
283
+
284
+ # Local points
285
+ if 'segmentation_mask' in gt and 'points' in gt and any('points' in k for k in pred.keys()):
286
+ pred_points = next(pred[k] for k in pred.keys() if 'points' in k)
287
+ gt_points = gt['points']
288
+ segmentation_mask = gt['segmentation_mask']
289
+ segmentation_labels = gt['segmentation_labels']
290
+ segmentation_mask_lr = segmentation_mask[lr_index]
291
+ local_points_metrics = []
292
+ for _, seg_id in segmentation_labels.items():
293
+ valid_mask = (segmentation_mask == seg_id) & mask
294
+
295
+ pred_points_masked = pred_points[valid_mask]
296
+ gt_points_masked = gt_points[valid_mask]
297
+
298
+ valid_mask_lr = (segmentation_mask_lr == seg_id) & lr_mask
299
+ if valid_mask_lr.sum().item() < 10:
300
+ continue
301
+ pred_points_masked_lr = pred_points[lr_index][valid_mask_lr]
302
+ gt_points_masked_lr = gt_points[lr_index][valid_mask_lr]
303
+ diameter = (gt_points_masked.max(dim=0).values - gt_points_masked.min(dim=0).values).max()
304
+ scale, shift = align_points_scale_xyz_shift(pred_points_masked_lr, gt_points_masked_lr, 1 / diameter.expand(gt_points_masked_lr.shape[0]))
305
+ pred_points_masked = pred_points_masked * scale + shift
306
+
307
+ local_points_metrics.append({
308
+ 'rel': rel_point_local(pred_points_masked, gt_points_masked, diameter),
309
+ 'delta1': delta1_point_local(pred_points_masked, gt_points_masked, diameter),
310
+ })
311
+
312
+ metrics['local_points'] = key_average(local_points_metrics)
313
+
314
+ # FOV. NOTE: If there is no random augmentation applied to the input images, all GT FOV are generallly the same.
315
+ # Fair evaluation of FOV requires random augmentation.
316
+ if 'intrinsics' in pred and 'intrinsics' in gt:
317
+ pred_intrinsics = pred['intrinsics']
318
+ gt_intrinsics = gt['intrinsics']
319
+ pred_fov_x, pred_fov_y = intrinsics_to_fov(pred_intrinsics)
320
+ gt_fov_x, gt_fov_y = intrinsics_to_fov(gt_intrinsics)
321
+ metrics['fov_x'] = {
322
+ 'mae': torch.rad2deg(pred_fov_x - gt_fov_x).abs().mean().item(),
323
+ 'deviation': torch.rad2deg(pred_fov_x - gt_fov_x).item(),
324
+ }
325
+
326
+ # Boundary F1
327
+ if pred_depth_aligned is not None and gt['has_sharp_boundary']:
328
+ metrics['boundary'] = {
329
+ 'radius1_f1': boundary_f1(pred_depth_aligned, gt_depth, mask, radius=1),
330
+ 'radius2_f1': boundary_f1(pred_depth_aligned, gt_depth, mask, radius=2),
331
+ 'radius3_f1': boundary_f1(pred_depth_aligned, gt_depth, mask, radius=3),
332
+ }
333
+
334
+ if vis:
335
+ if pred_points_aligned is not None:
336
+ misc['pred_points'] = pred_points_aligned
337
+ if only_depth:
338
+ misc['pred_points'] = utils3d.pt.depth_map_to_point_map(pred_depth_aligned, intrinsics=gt['intrinsics'])
339
+ if pred_depth_aligned is not None:
340
+ misc['pred_depth'] = pred_depth_aligned
341
+
342
+ return metrics, misc
moge/train/__init__.py ADDED
File without changes
moge/train/dataloader.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ import json
4
+ import time
5
+ import random
6
+ from typing import *
7
+ import traceback
8
+ import itertools
9
+ from numbers import Number
10
+ import io
11
+
12
+ import numpy as np
13
+ import cv2
14
+ from PIL import Image
15
+ import torch
16
+ import torchvision.transforms.v2.functional as TF
17
+ import utils3d
18
+ import pipeline
19
+ from tqdm import tqdm
20
+
21
+ from ..utils.io import *
22
+ from ..utils.geometry_numpy import harmonic_mean_numpy, norm3d, depth_occlusion_edge_numpy
23
+ from ..utils.data_augmentation import sample_perspective, warp_perspective, image_color_augmentation
24
+
25
+
26
+ class TrainDataLoaderPipeline:
27
+ def __init__(self, config: dict, batch_size: int, num_load_workers: int = 4, num_process_workers: int = 8, buffer_size: int = 8):
28
+ self.config = config
29
+
30
+ self.batch_size = batch_size
31
+ self.clamp_max_depth = config['clamp_max_depth']
32
+ self.fov_range_absolute = config.get('fov_range_absolute', 0.0)
33
+ self.fov_range_relative = config.get('fov_range_relative', 0.0)
34
+ self.center_augmentation = config.get('center_augmentation', 0.0)
35
+ self.image_augmentation = config.get('image_augmentation', [])
36
+ self.depth_interpolation = config.get('depth_interpolation', 'bilinear')
37
+
38
+ if 'image_sizes' in config:
39
+ self.image_size_strategy = 'fixed'
40
+ self.image_sizes = config['image_sizes']
41
+ elif 'aspect_ratio_range' in config and 'area_range' in config:
42
+ self.image_size_strategy = 'aspect_area'
43
+ self.aspect_ratio_range = config['aspect_ratio_range']
44
+ self.area_range = config['area_range']
45
+ else:
46
+ raise ValueError('Invalid image size configuration')
47
+
48
+ # Load datasets
49
+ self.datasets = {}
50
+ for dataset in tqdm(config['datasets'], desc='Loading datasets'):
51
+ name = dataset['name']
52
+ content = Path(dataset['path'], dataset.get('index', '.index.txt')).joinpath().read_text()
53
+ filenames = content.splitlines()
54
+ self.datasets[name] = {
55
+ **dataset,
56
+ 'path': dataset['path'],
57
+ 'filenames': filenames,
58
+ }
59
+ self.dataset_names = [dataset['name'] for dataset in config['datasets']]
60
+ self.dataset_weights = [dataset['weight'] for dataset in config['datasets']]
61
+
62
+ # Build pipeline
63
+ self.pipeline = pipeline.Sequential([
64
+ self._sample_batch,
65
+ pipeline.Unbatch(),
66
+ pipeline.Parallel([self._load_instance] * num_load_workers),
67
+ pipeline.Parallel([self._process_instance] * num_process_workers),
68
+ pipeline.Batch(self.batch_size),
69
+ self._collate_batch,
70
+ pipeline.Buffer(buffer_size),
71
+ ])
72
+
73
+ self.invalid_instance = {
74
+ 'intrinsics': np.array([[1.0, 0.0, 0.5], [0.0, 1.0, 0.5], [0.0, 0.0, 1.0]], dtype=np.float32),
75
+ 'image': np.zeros((256, 256, 3), dtype=np.uint8),
76
+ 'depth': np.ones((256, 256), dtype=np.float32),
77
+ 'depth_mask': np.ones((256, 256), dtype=bool),
78
+ 'depth_mask_inf': np.zeros((256, 256), dtype=bool),
79
+ 'label_type': 'invalid',
80
+ }
81
+
82
+ def _sample_batch(self):
83
+ batch_id = 0
84
+ last_area = None
85
+ while True:
86
+ # Depending on the sample strategy, choose a dataset and a filename
87
+ batch_id += 1
88
+ batch = []
89
+
90
+ # Sample instances
91
+ for _ in range(self.batch_size):
92
+ dataset_name = random.choices(self.dataset_names, weights=self.dataset_weights)[0]
93
+ filename = random.choice(self.datasets[dataset_name]['filenames'])
94
+
95
+ path = Path(self.datasets[dataset_name]['path'], filename)
96
+
97
+ instance = {
98
+ 'batch_id': batch_id,
99
+ 'seed': random.randint(0, 2 ** 32 - 1),
100
+ 'dataset': dataset_name,
101
+ 'filename': filename,
102
+ 'path': path,
103
+ 'label_type': self.datasets[dataset_name]['label_type'],
104
+ }
105
+ batch.append(instance)
106
+
107
+ # Decide the image size for this batch
108
+ if self.image_size_strategy == 'fixed':
109
+ width, height = random.choice(self.config['image_sizes'])
110
+ elif self.image_size_strategy == 'aspect_area':
111
+ area = random.uniform(*self.area_range)
112
+ aspect_ratio_ranges = [self.datasets[instance['dataset']].get('aspect_ratio_range', self.aspect_ratio_range) for instance in batch]
113
+ aspect_ratio_range = (min(r[0] for r in aspect_ratio_ranges), max(r[1] for r in aspect_ratio_ranges))
114
+ aspect_ratio = random.uniform(*aspect_ratio_range)
115
+ width, height = int((area * aspect_ratio) ** 0.5), int((area / aspect_ratio) ** 0.5)
116
+ else:
117
+ raise ValueError('Invalid image size strategy')
118
+
119
+ for instance in batch:
120
+ instance['width'], instance['height'] = width, height
121
+
122
+ yield batch
123
+
124
+ def _load_instance(self, instance: dict):
125
+ try:
126
+ image = read_image(Path(instance['path'], 'image.jpg'))
127
+ depth = read_depth(Path(instance['path'], self.datasets[instance['dataset']].get('depth', 'depth.png')))
128
+ meta = read_json(Path(instance['path'], 'meta.json'))
129
+ intrinsics = np.array(meta['intrinsics'], dtype=np.float32)
130
+ data = {
131
+ 'image': image,
132
+ 'depth': depth,
133
+ 'intrinsics': intrinsics
134
+ }
135
+ instance.update({
136
+ **data,
137
+ })
138
+ except Exception as e:
139
+ print(f"Failed to load instance {instance['dataset']}/{instance['filename']} because of exception:", e)
140
+ instance.update(self.invalid_instance)
141
+ return instance
142
+
143
+ def _process_instance(self, instance: Dict[str, Union[np.ndarray, str, float, bool]]):
144
+ raw_image, raw_depth, raw_intrinsics, label_type = instance['image'], instance['depth'], instance['intrinsics'], instance['label_type']
145
+ raw_normal, raw_normal_mask = utils3d.np.depth_map_to_normal_map(raw_depth, intrinsics=raw_intrinsics, mask=np.isfinite(raw_depth), edge_threshold=88)
146
+ raw_normal = np.where(raw_normal_mask[..., None], raw_normal, np.nan)
147
+ depth_unit = self.datasets[instance['dataset']].get('depth_unit', None)
148
+
149
+ raw_height, raw_width = raw_image.shape[:2]
150
+ raw_fov_x, raw_fov_y = utils3d.np.intrinsics_to_fov(raw_intrinsics)
151
+ tgt_width, tgt_height = instance['width'], instance['height']
152
+ tgt_aspect = tgt_width / tgt_height
153
+
154
+ rng = np.random.default_rng(instance['seed'])
155
+
156
+ # Sample perspective transformation
157
+ tgt_intrinsics, R = sample_perspective(
158
+ raw_intrinsics,
159
+ tgt_aspect=tgt_aspect,
160
+ center_augmentation=self.datasets[instance['dataset']].get('center_augmentation', self.center_augmentation),
161
+ fov_range_absolute=self.datasets[instance['dataset']].get('fov_range_absolute', self.fov_range_absolute),
162
+ fov_range_relative=self.datasets[instance['dataset']].get('fov_range_relative', self.fov_range_relative),
163
+ rng=rng
164
+ )
165
+
166
+ # Warp
167
+ transform = tgt_intrinsics @ R @ np.linalg.inv(raw_intrinsics)
168
+ # - Warp image
169
+ tgt_image = warp_perspective(raw_image, transform, tgt_size=(tgt_height, tgt_width), interpolation='lanczos')
170
+ # - Warp depth
171
+ depth_edge_mask = utils3d.np.depth_map_edge(raw_depth, mask=np.isfinite(raw_depth), kernel_size=5, ltol=0.01)
172
+ depth_bilinear_mask = np.isfinite(raw_depth) & ~depth_edge_mask
173
+ warped_depth_bilinear_mask = warp_perspective(depth_bilinear_mask.astype(np.float32), transform, (tgt_height, tgt_width), interpolation='bilinear')
174
+ warped_depth_nearest = warp_perspective(raw_depth, transform, (tgt_height, tgt_width), interpolation='nearest', sparse_mask=~np.isnan(raw_depth))
175
+ warped_depth_bilinear = 1 / warp_perspective(1 / raw_depth, transform, (tgt_height, tgt_width), interpolation='bilinear') # NOTE: Bilinear intepolation in disparity space maintains planar surfaces.
176
+ warped_depth = np.where(warped_depth_bilinear_mask == 1., warped_depth_bilinear, warped_depth_nearest)
177
+ tgt_uvhomo = np.concatenate([utils3d.np.uv_map((tgt_height, tgt_width)), np.ones((tgt_height, tgt_width, 1), dtype=np.float32)], axis=-1)
178
+ tgt_depth = warped_depth / np.dot(tgt_uvhomo, np.linalg.inv(transform)[2, :])
179
+ # - Warp normal
180
+ warped_normal = warp_perspective(raw_normal, transform, (tgt_height, tgt_width), interpolation='bilinear')
181
+ tgt_normal = warped_normal @ R.T
182
+
183
+ # always make sure that mask is not empty
184
+ if np.isfinite(tgt_depth).sum() / tgt_depth.size < 0.001:
185
+ tgt_depth = np.ones_like(tgt_depth)
186
+ instance['label_type'] = 'invalid'
187
+
188
+ # Flip augmentation
189
+ if rng.choice([True, False]):
190
+ tgt_image = np.flip(tgt_image, axis=1).copy()
191
+ tgt_depth = np.flip(tgt_depth, axis=1).copy()
192
+ tgt_normal = np.flip(tgt_normal, axis=1).copy() * [-1, 1, 1]
193
+ # NOTE: if cx != 0.5, flip intrinsics accordingly.
194
+
195
+ # Color augmentation
196
+ image_augmentation = self.datasets[instance['dataset']].get('image_augmentation', self.image_augmentation)
197
+ tgt_image = image_color_augmentation(
198
+ tgt_image,
199
+ augmentations=image_augmentation,
200
+ rng=rng,
201
+ depth=tgt_depth,
202
+ )
203
+
204
+ # Set metric flag if depth is in metric unit
205
+ if depth_unit is not None:
206
+ tgt_depth *= depth_unit
207
+ instance['is_metric'] = True
208
+ else:
209
+ instance['is_metric'] = False
210
+
211
+ # Clip maximum depth
212
+ max_depth = np.nanquantile(np.where(np.isfinite(tgt_depth), tgt_depth, np.nan), 0.01) * self.clamp_max_depth
213
+ tgt_depth = np.where(np.isfinite(tgt_depth), np.clip(tgt_depth, 0, max_depth), tgt_depth)
214
+
215
+ tgt_depth_mask_inf = np.isinf(tgt_depth)
216
+ if self.datasets[instance['dataset']].get('finite_depth_mask', None) == "only_known":
217
+ tgt_depth_mask_fin = np.isfinite(tgt_depth)
218
+ else:
219
+ tgt_depth_mask_fin = ~tgt_depth_mask_inf
220
+
221
+ instance.update({
222
+ 'image': torch.from_numpy(tgt_image.astype(np.float32) / 255.0).permute(2, 0, 1),
223
+ 'depth': torch.from_numpy(tgt_depth).float(),
224
+ 'depth_mask_fin': torch.from_numpy(tgt_depth_mask_fin).bool(),
225
+ 'depth_mask_inf': torch.from_numpy(tgt_depth_mask_inf).bool(),
226
+ "normal": torch.from_numpy(tgt_normal).float(),
227
+ 'intrinsics': torch.from_numpy(tgt_intrinsics).float(),
228
+ })
229
+ return instance
230
+
231
+ def _collate_batch(self, instances: List[Dict[str, Any]]):
232
+ batch = {k: torch.stack([instance[k] for instance in instances], dim=0) for k in ['image', 'depth', 'depth_mask_fin', 'depth_mask_inf', 'normal', 'intrinsics']}
233
+ batch = {
234
+ 'label_type': [instance['label_type'] for instance in instances],
235
+ 'is_metric': [instance['is_metric'] for instance in instances],
236
+ 'info': [{'dataset': instance['dataset'], 'filename': instance['filename']} for instance in instances],
237
+ **batch,
238
+ }
239
+ return batch
240
+
241
+ def get(self) -> Dict[str, Union[torch.Tensor, str]]:
242
+ return self.pipeline.get()
243
+
244
+ def start(self):
245
+ self.pipeline.start()
246
+
247
+ def stop(self):
248
+ self.pipeline.stop()
249
+
250
+ def __enter__(self):
251
+ self.start()
252
+ return self
253
+
254
+ def __exit__(self, exc_type, exc_value, traceback):
255
+ self.pipeline.stop()
256
+ return False
257
+
258
+
moge/train/losses.py ADDED
@@ -0,0 +1,293 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import math
3
+
4
+ import torch
5
+ import torch.nn.functional as F
6
+ import utils3d
7
+
8
+ from ..utils.geometry_torch import (
9
+ weighted_mean,
10
+ harmonic_mean,
11
+ geometric_mean,
12
+ normalized_view_plane_uv,
13
+ angle_diff_vec3
14
+ )
15
+ from ..utils.alignment import (
16
+ align_points_scale_z_shift,
17
+ align_points_scale,
18
+ align_points_scale_xyz_shift,
19
+ align_points_z_shift,
20
+ )
21
+
22
+
23
+ def _smooth(err: torch.FloatTensor, beta: float = 0.0) -> torch.FloatTensor:
24
+ if beta == 0:
25
+ return err
26
+ else:
27
+ return torch.where(err < beta, 0.5 * err.square() / beta, err - 0.5 * beta)
28
+
29
+
30
+ def affine_invariant_global_loss(
31
+ pred_points: torch.Tensor,
32
+ gt_points: torch.Tensor,
33
+ align_resolution: int = 64,
34
+ beta: float = 0.0,
35
+ trunc: float = 1.0,
36
+ sparsity_aware: bool = False
37
+ ):
38
+ device = pred_points.device
39
+
40
+ mask = torch.isfinite(gt_points).all(dim=-1)
41
+ gt_points = torch.where(mask[..., None], gt_points, 1)
42
+
43
+ # Align
44
+ pred_points_lr, gt_points_lr, lr_mask = utils3d.pt.masked_nearest_resize(pred_points, gt_points, mask=mask, size=(align_resolution, align_resolution))
45
+ scale, shift = align_points_scale_z_shift(pred_points_lr.flatten(-3, -2), gt_points_lr.flatten(-3, -2), lr_mask.flatten(-2, -1) / gt_points_lr[..., 2].flatten(-2, -1).clamp_min(1e-2), trunc=trunc)
46
+ valid = scale > 0
47
+ scale, shift = torch.where(valid, scale, 0), torch.where(valid[..., None], shift, 0)
48
+
49
+ pred_points = scale[..., None, None, None] * pred_points + shift[..., None, None, :]
50
+
51
+ # Compute loss
52
+ weight = (valid[..., None, None] & mask).float() / gt_points[..., 2].clamp_min(1e-5)
53
+ weight = weight.clamp_max(10.0 * weighted_mean(weight, mask, dim=(-2, -1), keepdim=True)) # In case your data contains extremely small depth values
54
+ loss = _smooth((pred_points - gt_points).abs() * weight[..., None], beta=beta).mean(dim=(-3, -2, -1))
55
+
56
+ if sparsity_aware:
57
+ # Reweighting improves performance on sparse depth data. NOTE: this is not used in MoGe-1.
58
+ sparsity = mask.float().mean(dim=(-2, -1)) / lr_mask.float().mean(dim=(-2, -1))
59
+ loss = loss / (sparsity + 1e-7)
60
+
61
+ err = (pred_points.detach() - gt_points).norm(dim=-1) / gt_points[..., 2]
62
+
63
+ # Record any scalar metric
64
+ misc = {
65
+ 'truncated_error': weighted_mean(err.clamp_max(1.0), mask).item(),
66
+ 'delta': weighted_mean((err < 1).float(), mask).item()
67
+ }
68
+
69
+ return loss, misc, scale.detach()
70
+
71
+
72
+ def monitoring(points: torch.Tensor):
73
+ return {
74
+ 'std': points.std().item(),
75
+ }
76
+
77
+
78
+ def compute_anchor_sampling_weight(
79
+ points: torch.Tensor,
80
+ mask: torch.Tensor,
81
+ radius_2d: torch.Tensor,
82
+ radius_3d: torch.Tensor,
83
+ num_test: int = 64
84
+ ) -> torch.Tensor:
85
+ # Importance sampling to balance the sampled probability of fine strutures.
86
+ # NOTE: MoGe-1 uses uniform random sampling instead of importance sampling.
87
+ # This is an incremental trick introduced later than the publication of MoGe-1 paper.
88
+
89
+ height, width = points.shape[-3:-1]
90
+
91
+ pixel_i, pixel_j = torch.meshgrid(
92
+ torch.arange(height, device=points.device),
93
+ torch.arange(width, device=points.device),
94
+ indexing='ij'
95
+ )
96
+
97
+ test_delta_i = torch.randint(-radius_2d, radius_2d + 1, (height, width, num_test,), device=points.device) # [num_test]
98
+ test_delta_j = torch.randint(-radius_2d, radius_2d + 1, (height, width, num_test,), device=points.device) # [num_test]
99
+ test_i, test_j = pixel_i[..., None] + test_delta_i, pixel_j[..., None] + test_delta_j # [height, width, num_test]
100
+ test_mask = (test_i >= 0) & (test_i < height) & (test_j >= 0) & (test_j < width) # [height, width, num_test]
101
+ test_i, test_j = test_i.clamp(0, height - 1), test_j.clamp(0, width - 1) # [height, width, num_test]
102
+ test_mask = test_mask & mask[..., test_i, test_j] # [..., height, width, num_test]
103
+ test_points = points[..., test_i, test_j, :] # [..., height, width, num_test, 3]
104
+ test_dist = (test_points - points[..., None, :]).norm(dim=-1) # [..., height, width, num_test]
105
+
106
+ weight = 1 / ((test_dist <= radius_3d[..., None]) & test_mask).float().sum(dim=-1).clamp_min(1)
107
+ weight = torch.where(mask, weight, 0)
108
+ weight = weight / weight.sum(dim=(-2, -1), keepdim=True).add(1e-7) # [..., height, width]
109
+ return weight
110
+
111
+
112
+ def affine_invariant_local_loss(
113
+ pred_points: torch.Tensor,
114
+ gt_points: torch.Tensor,
115
+ focal: torch.Tensor,
116
+ global_scale: torch.Tensor,
117
+ level: Literal[4, 16, 64],
118
+ align_resolution: int = 32,
119
+ num_patches: int = 16,
120
+ beta: float = 0.0,
121
+ trunc: float = 1.0,
122
+ sparsity_aware: bool = False
123
+ ):
124
+ device, dtype = pred_points.device, pred_points.dtype
125
+ *batch_shape, height, width, _ = pred_points.shape
126
+ batch_size = math.prod(batch_shape)
127
+
128
+ gt_mask = torch.isfinite(gt_points).all(dim=-1)
129
+ gt_points = torch.where(gt_mask[..., None], gt_points, 1)
130
+ pred_points, gt_points, gt_mask, focal, global_scale = pred_points.reshape(-1, height, width, 3), gt_points.reshape(-1, height, width, 3), gt_mask.reshape(-1, height, width), focal.reshape(-1), global_scale.reshape(-1) if global_scale is not None else None
131
+
132
+ # Sample patch anchor points indices [num_total_patches]
133
+ radius_2d = math.ceil(0.5 / level * (height ** 2 + width ** 2) ** 0.5)
134
+ radius_3d = 0.5 / level / focal * gt_points[..., 2]
135
+ anchor_sampling_weights = compute_anchor_sampling_weight(gt_points, gt_mask, radius_2d, radius_3d, num_test=64)
136
+ where_mask = torch.where(gt_mask)
137
+ random_selection = torch.multinomial(anchor_sampling_weights[where_mask], num_patches * batch_size, replacement=True)
138
+ patch_batch_idx, patch_anchor_i, patch_anchor_j = [indices[random_selection] for indices in where_mask] # [num_total_patches]
139
+
140
+ # Get patch indices [num_total_patches, patch_h, patch_w]
141
+ patch_i, patch_j = torch.meshgrid(
142
+ torch.arange(-radius_2d, radius_2d + 1, device=device),
143
+ torch.arange(-radius_2d, radius_2d + 1, device=device),
144
+ indexing='ij'
145
+ )
146
+ patch_i, patch_j = patch_i + patch_anchor_i[:, None, None], patch_j + patch_anchor_j[:, None, None]
147
+ patch_mask = (patch_i >= 0) & (patch_i < height) & (patch_j >= 0) & (patch_j < width)
148
+ patch_i, patch_j = patch_i.clamp(0, height - 1), patch_j.clamp(0, width - 1)
149
+
150
+ # Get patch mask and gt patch points
151
+ gt_patch_anchor_points = gt_points[patch_batch_idx, patch_anchor_i, patch_anchor_j]
152
+ gt_patch_radius_3d = 0.5 / level / focal[patch_batch_idx] * gt_patch_anchor_points[:, 2]
153
+ gt_patch_points = gt_points[patch_batch_idx[:, None, None], patch_i, patch_j]
154
+ gt_patch_dist = (gt_patch_points - gt_patch_anchor_points[:, None, None, :]).norm(dim=-1)
155
+ patch_mask &= gt_mask[patch_batch_idx[:, None, None], patch_i, patch_j]
156
+ patch_mask &= gt_patch_dist <= gt_patch_radius_3d[:, None, None]
157
+
158
+ # Pick only non-empty patches
159
+ MINIMUM_POINTS_PER_PATCH = 32
160
+ nonempty = torch.where(patch_mask.sum(dim=(-2, -1)) >= MINIMUM_POINTS_PER_PATCH)
161
+ num_nonempty_patches = nonempty[0].shape[0]
162
+ if num_nonempty_patches == 0:
163
+ return torch.tensor(0.0, dtype=dtype, device=device), {}
164
+
165
+ # Finalize all patch variables
166
+ patch_batch_idx, patch_i, patch_j = patch_batch_idx[nonempty], patch_i[nonempty], patch_j[nonempty]
167
+ patch_mask = patch_mask[nonempty] # [num_nonempty_patches, patch_h, patch_w]
168
+ gt_patch_points = gt_patch_points[nonempty] # [num_nonempty_patches, patch_h, patch_w, 3]
169
+ gt_patch_radius_3d = gt_patch_radius_3d[nonempty] # [num_nonempty_patches]
170
+ gt_patch_anchor_points = gt_patch_anchor_points[nonempty] # [num_nonempty_patches, 3]
171
+ pred_patch_points = pred_points[patch_batch_idx[:, None, None], patch_i, patch_j]
172
+
173
+ # Align patch points
174
+ pred_patch_points_lr, gt_patch_points_lr, patch_lr_mask = utils3d.pt.masked_nearest_resize(pred_patch_points, gt_patch_points, mask=patch_mask, size=(align_resolution, align_resolution))
175
+ local_scale, local_shift = align_points_scale_xyz_shift(pred_patch_points_lr.flatten(-3, -2), gt_patch_points_lr.flatten(-3, -2), patch_lr_mask.flatten(-2) / gt_patch_radius_3d[:, None].add(1e-7), trunc=trunc)
176
+ if global_scale is not None:
177
+ scale_differ = local_scale / global_scale[patch_batch_idx]
178
+ patch_valid = (scale_differ > 0.1) & (scale_differ < 10.0) & (global_scale > 0)
179
+ else:
180
+ patch_valid = local_scale > 0
181
+ local_scale, local_shift = torch.where(patch_valid, local_scale, 0), torch.where(patch_valid[:, None], local_shift, 0)
182
+ patch_mask &= patch_valid[:, None, None]
183
+
184
+ pred_patch_points = local_scale[:, None, None, None] * pred_patch_points + local_shift[:, None, None, :] # [num_patches_nonempty, patch_h, patch_w, 3]
185
+
186
+ # Compute loss
187
+ gt_mean = harmonic_mean(gt_points[..., 2], gt_mask, dim=(-2, -1))
188
+ patch_weight = patch_mask.float() / gt_patch_points[..., 2].clamp_min(0.1 * gt_mean[patch_batch_idx, None, None]) # [num_patches_nonempty, patch_h, patch_w]
189
+ loss = _smooth((pred_patch_points - gt_patch_points).abs() * patch_weight[..., None], beta=beta).mean(dim=(-3, -2, -1)) # [num_patches_nonempty]
190
+
191
+ if sparsity_aware:
192
+ # Reweighting improves performance on sparse depth data. NOTE: this is not used in MoGe-1.
193
+ sparsity = patch_mask.float().mean(dim=(-2, -1)) / patch_lr_mask.float().mean(dim=(-2, -1))
194
+ loss = loss / (sparsity + 1e-7)
195
+ loss = torch.scatter_reduce(torch.zeros(batch_size, dtype=dtype, device=device), dim=0, index=patch_batch_idx, src=loss, reduce='sum') / num_patches
196
+ loss = loss.reshape(batch_shape)
197
+
198
+ err = (pred_patch_points.detach() - gt_patch_points).norm(dim=-1) / gt_patch_radius_3d[..., None, None]
199
+
200
+ # Record any scalar metric
201
+ misc = {
202
+ 'truncated_error': weighted_mean(err.clamp_max(1), patch_mask).item(),
203
+ 'delta': weighted_mean((err < 1).float(), patch_mask).item()
204
+ }
205
+
206
+ return loss, misc
207
+
208
+
209
+ def normal_loss(points: torch.Tensor, gt_points: torch.Tensor) -> torch.Tensor:
210
+ device, dtype = points.device, points.dtype
211
+ height, width = points.shape[-3:-1]
212
+
213
+ mask = torch.isfinite(gt_points).all(dim=-1)
214
+ gt_points = torch.where(mask[..., None], gt_points, 1)
215
+
216
+ leftup, rightup, leftdown, rightdown = points[..., :-1, :-1, :], points[..., :-1, 1:, :], points[..., 1:, :-1, :], points[..., 1:, 1:, :]
217
+ upxleft = torch.cross(rightup - rightdown, leftdown - rightdown, dim=-1)
218
+ leftxdown = torch.cross(leftup - rightup, rightdown - rightup, dim=-1)
219
+ downxright = torch.cross(leftdown - leftup, rightup - leftup, dim=-1)
220
+ rightxup = torch.cross(rightdown - leftdown, leftup - leftdown, dim=-1)
221
+
222
+ gt_leftup, gt_rightup, gt_leftdown, gt_rightdown = gt_points[..., :-1, :-1, :], gt_points[..., :-1, 1:, :], gt_points[..., 1:, :-1, :], gt_points[..., 1:, 1:, :]
223
+ gt_upxleft = torch.cross(gt_rightup - gt_rightdown, gt_leftdown - gt_rightdown, dim=-1)
224
+ gt_leftxdown = torch.cross(gt_leftup - gt_rightup, gt_rightdown - gt_rightup, dim=-1)
225
+ gt_downxright = torch.cross(gt_leftdown - gt_leftup, gt_rightup - gt_leftup, dim=-1)
226
+ gt_rightxup = torch.cross(gt_rightdown - gt_leftdown, gt_leftup - gt_leftdown, dim=-1)
227
+
228
+ mask_leftup, mask_rightup, mask_leftdown, mask_rightdown = mask[..., :-1, :-1], mask[..., :-1, 1:], mask[..., 1:, :-1], mask[..., 1:, 1:]
229
+ mask_upxleft = mask_rightup & mask_leftdown & mask_rightdown
230
+ mask_leftxdown = mask_leftup & mask_rightdown & mask_rightup
231
+ mask_downxright = mask_leftdown & mask_rightup & mask_leftup
232
+ mask_rightxup = mask_rightdown & mask_leftup & mask_leftdown
233
+
234
+ MIN_ANGLE, MAX_ANGLE, BETA_RAD = math.radians(1), math.radians(90), math.radians(3)
235
+
236
+ loss = mask_upxleft * _smooth(angle_diff_vec3(upxleft, gt_upxleft).clamp(MIN_ANGLE, MAX_ANGLE), beta=BETA_RAD) \
237
+ + mask_leftxdown * _smooth(angle_diff_vec3(leftxdown, gt_leftxdown).clamp(MIN_ANGLE, MAX_ANGLE), beta=BETA_RAD) \
238
+ + mask_downxright * _smooth(angle_diff_vec3(downxright, gt_downxright).clamp(MIN_ANGLE, MAX_ANGLE), beta=BETA_RAD) \
239
+ + mask_rightxup * _smooth(angle_diff_vec3(rightxup, gt_rightxup).clamp(MIN_ANGLE, MAX_ANGLE), beta=BETA_RAD)
240
+
241
+ loss = loss.mean() / (4 * max(points.shape[-3:-1]))
242
+
243
+ return loss, {}
244
+
245
+
246
+ def edge_loss(points: torch.Tensor, gt_points: torch.Tensor) -> torch.Tensor:
247
+ device, dtype = points.device, points.dtype
248
+ height, width = points.shape[-3:-1]
249
+
250
+ mask = torch.isfinite(gt_points).all(dim=-1)
251
+ gt_points = torch.where(mask[..., None], gt_points, 1)
252
+
253
+ dx = points[..., :-1, :, :] - points[..., 1:, :, :]
254
+ dy = points[..., :, :-1, :] - points[..., :, 1:, :]
255
+
256
+ gt_dx = gt_points[..., :-1, :, :] - gt_points[..., 1:, :, :]
257
+ gt_dy = gt_points[..., :, :-1, :] - gt_points[..., :, 1:, :]
258
+
259
+ mask_dx = mask[..., :-1, :] & mask[..., 1:, :]
260
+ mask_dy = mask[..., :, :-1] & mask[..., :, 1:]
261
+
262
+ MIN_ANGLE, MAX_ANGLE, BETA_RAD = math.radians(0.1), math.radians(90), math.radians(3)
263
+
264
+ loss_dx = mask_dx * _smooth(angle_diff_vec3(dx, gt_dx).clamp(MIN_ANGLE, MAX_ANGLE), beta=BETA_RAD)
265
+ loss_dy = mask_dy * _smooth(angle_diff_vec3(dy, gt_dy).clamp(MIN_ANGLE, MAX_ANGLE), beta=BETA_RAD)
266
+ loss = (loss_dx.mean(dim=(-2, -1)) + loss_dy.mean(dim=(-2, -1))) / (2 * max(points.shape[-3:-1]))
267
+
268
+ return loss, {}
269
+
270
+
271
+ def mask_l2_loss(pred_mask: torch.Tensor, gt_mask_pos: torch.Tensor, gt_mask_neg: torch.Tensor) -> torch.Tensor:
272
+ loss = gt_mask_neg.float() * pred_mask.square() + gt_mask_pos.float() * (1 - pred_mask).square()
273
+ loss = loss.mean(dim=(-2, -1))
274
+ return loss, {}
275
+
276
+
277
+ def mask_bce_loss(pred_mask_prob: torch.Tensor, gt_mask_pos: torch.Tensor, gt_mask_neg: torch.Tensor) -> torch.Tensor:
278
+ loss = (gt_mask_pos | gt_mask_neg) * F.binary_cross_entropy(pred_mask_prob, gt_mask_pos.float(), reduction='none')
279
+ loss = loss.mean(dim=(-2, -1))
280
+ return loss, {}
281
+
282
+
283
+ def metric_scale_loss(scale_pred: torch.Tensor, scale_gt: torch.Tensor):
284
+ valid = scale_gt > 0
285
+ return torch.where(valid, F.mse_loss(scale_pred.log(), torch.where(valid, scale_gt.log(), 0), reduction='none'), 0), {}
286
+
287
+
288
+ def normal_map_loss(pred_normal: torch.Tensor, gt_normal: torch.Tensor) -> torch.Tensor:
289
+ mask = torch.isfinite(gt_normal).all(dim=-1)
290
+ gt_normal = torch.where(mask[..., None], gt_normal, 1)
291
+
292
+ loss = (mask * utils3d.pt.angle_between(pred_normal, gt_normal).square()).mean(dim=(-2, -1))
293
+ return loss, {}
moge/train/utils.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import fnmatch
3
+
4
+ import sympy
5
+ import torch
6
+ import torch.nn as nn
7
+
8
+
9
+ def any_match(s: str, patterns: List[str]) -> bool:
10
+ return any(fnmatch.fnmatch(s, pat) for pat in patterns)
11
+
12
+
13
+ def build_optimizer(model: nn.Module, optimizer_config: Dict[str, Any]) -> torch.optim.Optimizer:
14
+ named_param_groups = [
15
+ {
16
+ k: p for k, p in model.named_parameters() if any_match(k, param_group_config['params']['include']) and not any_match(k, param_group_config['params'].get('exclude', []))
17
+ } for param_group_config in optimizer_config['params']
18
+ ]
19
+ excluded_params = [k for k, p in model.named_parameters() if p.requires_grad and not any(k in named_params for named_params in named_param_groups)]
20
+ assert len(excluded_params) == 0, f'The following parameters require grad but are excluded from the optimizer: {excluded_params}'
21
+ optimizer_cls = getattr(torch.optim, optimizer_config['type'])
22
+ optimizer = optimizer_cls([
23
+ {
24
+ **param_group_config,
25
+ 'params': list(params.values()),
26
+ } for param_group_config, params in zip(optimizer_config['params'], named_param_groups)
27
+ ])
28
+ return optimizer
29
+
30
+
31
+ def parse_lr_lambda(s: str) -> Callable[[int], float]:
32
+ epoch = sympy.symbols('epoch')
33
+ lr_lambda = sympy.sympify(s)
34
+ return sympy.lambdify(epoch, lr_lambda, 'math')
35
+
36
+
37
+ def build_lr_scheduler(optimizer: torch.optim.Optimizer, scheduler_config: Dict[str, Any]) -> torch.optim.lr_scheduler._LRScheduler:
38
+ if scheduler_config['type'] == "SequentialLR":
39
+ child_schedulers = [
40
+ build_lr_scheduler(optimizer, child_scheduler_config)
41
+ for child_scheduler_config in scheduler_config['params']['schedulers']
42
+ ]
43
+ return torch.optim.lr_scheduler.SequentialLR(optimizer, schedulers=child_schedulers, milestones=scheduler_config['params']['milestones'])
44
+ elif scheduler_config['type'] == "LambdaLR":
45
+ lr_lambda = scheduler_config['params']['lr_lambda']
46
+ if isinstance(lr_lambda, str):
47
+ lr_lambda = parse_lr_lambda(lr_lambda)
48
+ elif isinstance(lr_lambda, list):
49
+ lr_lambda = [parse_lr_lambda(l) for l in lr_lambda]
50
+ return torch.optim.lr_scheduler.LambdaLR(
51
+ optimizer,
52
+ lr_lambda=lr_lambda,
53
+ )
54
+ else:
55
+ scheduler_cls = getattr(torch.optim.lr_scheduler, scheduler_config['type'])
56
+ scheduler = scheduler_cls(optimizer, **scheduler_config.get('params', {}))
57
+ return scheduler
moge/utils/__init__.py ADDED
File without changes
moge/utils/alignment.py ADDED
@@ -0,0 +1,416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import *
2
+ import math
3
+ from collections import namedtuple
4
+
5
+ import numpy as np
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import torch.types
10
+ import utils3d
11
+
12
+
13
+ def scatter_min(size: int, dim: int, index: torch.LongTensor, src: torch.Tensor) -> torch.return_types.min:
14
+ "Scatter the minimum value along the given dimension of `input` into `src` at the indices specified in `index`."
15
+ shape = src.shape[:dim] + (size,) + src.shape[dim + 1:]
16
+ minimum = torch.full(shape, float('inf'), dtype=src.dtype, device=src.device).scatter_reduce(dim=dim, index=index, src=src, reduce='amin', include_self=False)
17
+ minimum_where = torch.where(src == torch.gather(minimum, dim=dim, index=index))
18
+ indices = torch.full(shape, -1, dtype=torch.long, device=src.device)
19
+ indices[(*minimum_where[:dim], index[minimum_where], *minimum_where[dim + 1:])] = minimum_where[dim]
20
+ return torch.return_types.min((minimum, indices))
21
+
22
+
23
+ def split_batch_fwd(fn: Callable, chunk_size: int, *args, **kwargs):
24
+ batch_size = next(x for x in (*args, *kwargs.values()) if isinstance(x, torch.Tensor)).shape[0]
25
+ n_chunks = batch_size // chunk_size + (batch_size % chunk_size > 0)
26
+ splited_args = tuple(arg.split(chunk_size, dim=0) if isinstance(arg, torch.Tensor) else [arg] * n_chunks for arg in args)
27
+ splited_kwargs = {k: [v.split(chunk_size, dim=0) if isinstance(v, torch.Tensor) else [v] * n_chunks] for k, v in kwargs.items()}
28
+ results = []
29
+ for i in range(n_chunks):
30
+ chunk_args = tuple(arg[i] for arg in splited_args)
31
+ chunk_kwargs = {k: v[i] for k, v in splited_kwargs.items()}
32
+ results.append(fn(*chunk_args, **chunk_kwargs))
33
+
34
+ if isinstance(results[0], tuple):
35
+ return tuple(torch.cat(r, dim=0) for r in zip(*results))
36
+ else:
37
+ return torch.cat(results, dim=0)
38
+
39
+
40
+ def _pad_inf(x_: torch.Tensor):
41
+ return torch.cat([torch.full_like(x_[..., :1], -torch.inf), x_, torch.full_like(x_[..., :1], torch.inf)], dim=-1)
42
+
43
+
44
+ def _pad_cumsum(cumsum: torch.Tensor):
45
+ return torch.cat([torch.zeros_like(cumsum[..., :1]), cumsum, cumsum[..., -1:]], dim=-1)
46
+
47
+
48
+ def _compute_residual(a: torch.Tensor, xyw: torch.Tensor, trunc: float):
49
+ return a.mul(xyw[..., 0]).sub_(xyw[..., 1]).abs_().mul_(xyw[..., 2]).clamp_max_(trunc).sum(dim=-1)
50
+
51
+
52
+ def align(x: torch.Tensor, y: torch.Tensor, w: torch.Tensor, trunc: Optional[Union[float, torch.Tensor]] = None, eps: float = 1e-7) -> Tuple[torch.Tensor, torch.Tensor, torch.LongTensor]:
53
+ """
54
+ If trunc is None, solve `min sum_i w_i * |a * x_i - y_i|`, otherwise solve `min sum_i min(trunc, w_i * |a * x_i - y_i|)`.
55
+
56
+ w_i must be >= 0.
57
+
58
+ ### Parameters:
59
+ - `x`: tensor of shape (..., n)
60
+ - `y`: tensor of shape (..., n)
61
+ - `w`: tensor of shape (..., n)
62
+ - `trunc`: optional, float or tensor of shape (..., n) or None
63
+
64
+ ### Returns:
65
+ - `a`: tensor of shape (...), differentiable
66
+ - `loss`: tensor of shape (...), value of loss function at `a`, detached
67
+ - `index`: tensor of shape (...), where a = y[idx] / x[idx]
68
+ """
69
+ if trunc is None:
70
+ x, y, w = torch.broadcast_tensors(x, y, w)
71
+ sign = torch.sign(x)
72
+ x, y = x * sign, y * sign
73
+ y_div_x = y / x.clamp_min(eps)
74
+ y_div_x, argsort = y_div_x.sort(dim=-1)
75
+
76
+ wx = torch.gather(x * w, dim=-1, index=argsort)
77
+ derivatives = 2 * wx.cumsum(dim=-1) - wx.sum(dim=-1, keepdim=True)
78
+ search = torch.searchsorted(derivatives, torch.zeros_like(derivatives[..., :1]), side='left').clamp_max(derivatives.shape[-1] - 1)
79
+
80
+ a = y_div_x.gather(dim=-1, index=search).squeeze(-1)
81
+ index = argsort.gather(dim=-1, index=search).squeeze(-1)
82
+ loss = (w * (a[..., None] * x - y).abs()).sum(dim=-1)
83
+
84
+ else:
85
+ # Reshape to (batch_size, n) for simplicity
86
+ x, y, w = torch.broadcast_tensors(x, y, w)
87
+ batch_shape = x.shape[:-1]
88
+ batch_size = math.prod(batch_shape)
89
+ x, y, w = x.reshape(-1, x.shape[-1]), y.reshape(-1, y.shape[-1]), w.reshape(-1, w.shape[-1])
90
+
91
+ sign = torch.sign(x)
92
+ x, y = x * sign, y * sign
93
+ wx, wy = w * x, w * y
94
+ xyw = torch.stack([x, y, w], dim=-1) # Stacked for convenient gathering
95
+
96
+ y_div_x = A = y / x.clamp_min(eps)
97
+ B = (wy - trunc) / wx.clamp_min(eps)
98
+ C = (wy + trunc) / wx.clamp_min(eps)
99
+ with torch.no_grad():
100
+ # Caculate prefix sum by orders of A, B, C
101
+ A, A_argsort = A.sort(dim=-1)
102
+ Q_A = torch.cumsum(torch.gather(wx, dim=-1, index=A_argsort), dim=-1)
103
+ A, Q_A = _pad_inf(A), _pad_cumsum(Q_A) # Pad [-inf, A1, ..., An, inf] and [0, Q1, ..., Qn, Qn] to handle edge cases.
104
+
105
+ B, B_argsort = B.sort(dim=-1)
106
+ Q_B = torch.cumsum(torch.gather(wx, dim=-1, index=B_argsort), dim=-1)
107
+ B, Q_B = _pad_inf(B), _pad_cumsum(Q_B)
108
+
109
+ C, C_argsort = C.sort(dim=-1)
110
+ Q_C = torch.cumsum(torch.gather(wx, dim=-1, index=C_argsort), dim=-1)
111
+ C, Q_C = _pad_inf(C), _pad_cumsum(Q_C)
112
+
113
+ # Caculate left and right derivative of A
114
+ j_A = torch.searchsorted(A, y_div_x, side='left').sub_(1)
115
+ j_B = torch.searchsorted(B, y_div_x, side='left').sub_(1)
116
+ j_C = torch.searchsorted(C, y_div_x, side='left').sub_(1)
117
+ left_derivative = 2 * torch.gather(Q_A, dim=-1, index=j_A) - torch.gather(Q_B, dim=-1, index=j_B) - torch.gather(Q_C, dim=-1, index=j_C)
118
+ j_A = torch.searchsorted(A, y_div_x, side='right').sub_(1)
119
+ j_B = torch.searchsorted(B, y_div_x, side='right').sub_(1)
120
+ j_C = torch.searchsorted(C, y_div_x, side='right').sub_(1)
121
+ right_derivative = 2 * torch.gather(Q_A, dim=-1, index=j_A) - torch.gather(Q_B, dim=-1, index=j_B) - torch.gather(Q_C, dim=-1, index=j_C)
122
+
123
+ # Find extrema
124
+ is_extrema = (left_derivative < 0) & (right_derivative >= 0)
125
+ is_extrema[..., 0] |= ~is_extrema.any(dim=-1) # In case all derivatives are zero, take the first one as extrema.
126
+ where_extrema_batch, where_extrema_index = torch.where(is_extrema)
127
+
128
+ # Calculate objective value at extrema
129
+ extrema_a = y_div_x[where_extrema_batch, where_extrema_index] # (num_extrema,)
130
+ MAX_ELEMENTS = 4096 ** 2 # Split into small batches to avoid OOM in case there are too many extrema.(~1G)
131
+ SPLIT_SIZE = MAX_ELEMENTS // x.shape[-1]
132
+ extrema_value = torch.cat([
133
+ _compute_residual(extrema_a_split[:, None], xyw[extrema_i_split, :, :], trunc)
134
+ for extrema_a_split, extrema_i_split in zip(extrema_a.split(SPLIT_SIZE), where_extrema_batch.split(SPLIT_SIZE))
135
+ ]) # (num_extrema,)
136
+
137
+ # Find minima among corresponding extrema
138
+ minima, indices = scatter_min(size=batch_size, dim=0, index=where_extrema_batch, src=extrema_value) # (batch_size,)
139
+ index = where_extrema_index[indices]
140
+
141
+ a = torch.gather(y, dim=-1, index=index[..., None]) / torch.gather(x, dim=-1, index=index[..., None]).clamp_min(eps)
142
+ a = a.reshape(batch_shape)
143
+ loss = minima.reshape(batch_shape)
144
+ index = index.reshape(batch_shape)
145
+
146
+ return a, loss, index
147
+
148
+
149
+ def align_depth_scale(depth_src: torch.Tensor, depth_tgt: torch.Tensor, weight: Optional[torch.Tensor], trunc: Optional[Union[float, torch.Tensor]] = None):
150
+ """
151
+ Align `depth_src` to `depth_tgt` with given constant weights.
152
+
153
+ ### Parameters:
154
+ - `depth_src: torch.Tensor` of shape (..., N)
155
+ - `depth_tgt: torch.Tensor` of shape (..., N)
156
+
157
+ """
158
+ scale, _, _ = align(depth_src, depth_tgt, weight, trunc)
159
+
160
+ return scale
161
+
162
+
163
+ def align_depth_affine(depth_src: torch.Tensor, depth_tgt: torch.Tensor, weight: Optional[torch.Tensor], trunc: Optional[Union[float, torch.Tensor]] = None):
164
+ """
165
+ Align `depth_src` to `depth_tgt` with given constant weights.
166
+
167
+ ### Parameters:
168
+ - `depth_src: torch.Tensor` of shape (..., N)
169
+ - `depth_tgt: torch.Tensor` of shape (..., N)
170
+ - `weight: torch.Tensor` of shape (..., N)
171
+ - `trunc: float` or tensor of shape (..., N) or None
172
+
173
+ ### Returns:
174
+ - `scale: torch.Tensor` of shape (...).
175
+ - `shift: torch.Tensor` of shape (...).
176
+ """
177
+ dtype, device = depth_src.dtype, depth_src.device
178
+
179
+ # Flatten batch dimensions for simplicity
180
+ batch_shape, n = depth_src.shape[:-1], depth_src.shape[-1]
181
+ batch_size = math.prod(batch_shape)
182
+ depth_src, depth_tgt, weight = depth_src.reshape(batch_size, n), depth_tgt.reshape(batch_size, n), weight.reshape(batch_size, n)
183
+
184
+ # Here, we take anchors only for non-zero weights.
185
+ # Although the results will be still correct even anchor points have zero weight,
186
+ # it is wasting computation and may cause instability in some cases, e.g. too many extrema.
187
+ anchors_where_batch, anchors_where_n = torch.where(weight > 0)
188
+
189
+ # Stop gradient when solving optimal anchors
190
+ with torch.no_grad():
191
+ depth_src_anchor = depth_src[anchors_where_batch, anchors_where_n] # (anchors)
192
+ depth_tgt_anchor = depth_tgt[anchors_where_batch, anchors_where_n] # (anchors)
193
+
194
+ depth_src_anchored = depth_src[anchors_where_batch, :] - depth_src_anchor[..., None] # (anchors, n)
195
+ depth_tgt_anchored = depth_tgt[anchors_where_batch, :] - depth_tgt_anchor[..., None] # (anchors, n)
196
+ weight_anchored = weight[anchors_where_batch, :] # (anchors, n)
197
+
198
+ scale, loss, index = align(depth_src_anchored, depth_tgt_anchored, weight_anchored, trunc) # (anchors)
199
+
200
+ loss, index_anchor = scatter_min(size=batch_size, dim=0, index=anchors_where_batch, src=loss) # (batch_size,)
201
+
202
+ # Reproduce by indexing for shorter compute graph
203
+ index_1 = anchors_where_n[index_anchor] # (batch_size,)
204
+ index_2 = index[index_anchor] # (batch_size,)
205
+
206
+ tgt_1, src_1 = torch.gather(depth_tgt, dim=1, index=index_1[..., None]).squeeze(-1), torch.gather(depth_src, dim=1, index=index_1[..., None]).squeeze(-1)
207
+ tgt_2, src_2 = torch.gather(depth_tgt, dim=1, index=index_2[..., None]).squeeze(-1), torch.gather(depth_src, dim=1, index=index_2[..., None]).squeeze(-1)
208
+
209
+ scale = (tgt_2 - tgt_1) / torch.where(src_2 != src_1, src_2 - src_1, 1e-7)
210
+ shift = tgt_1 - scale * src_1
211
+
212
+ scale, shift = scale.reshape(batch_shape), shift.reshape(batch_shape)
213
+
214
+ return scale, shift
215
+
216
+ def align_depth_affine_irls(depth_src: torch.Tensor, depth_tgt: torch.Tensor, weight: Optional[torch.Tensor], max_iter: int = 100, eps: float = 1e-12):
217
+ """
218
+ Align `depth_src` to `depth_tgt` with given constant weights using IRLS.
219
+ """
220
+ dtype, device = depth_src.dtype, depth_src.device
221
+
222
+ w = weight
223
+ x = torch.stack([depth_src, torch.ones_like(depth_src)], dim=-1)
224
+ y = depth_tgt
225
+
226
+ for i in range(max_iter):
227
+ beta = (x.transpose(-1, -2) @ (w * y)) @ (x.transpose(-1, -2) @ (w[..., None] * x)).inverse().transpose(-2, -1)
228
+ w = 1 / (y - (x @ beta[..., None])[..., 0]).abs().clamp_min(eps)
229
+
230
+ return beta[..., 0], beta[..., 1]
231
+
232
+
233
+ def align_points_scale(points_src: torch.Tensor, points_tgt: torch.Tensor, weight: Optional[torch.Tensor], trunc: Optional[Union[float, torch.Tensor]] = None):
234
+ """
235
+ ### Parameters:
236
+ - `points_src: torch.Tensor` of shape (..., N, 3)
237
+ - `points_tgt: torch.Tensor` of shape (..., N, 3)
238
+ - `weight: torch.Tensor` of shape (..., N)
239
+
240
+ ### Returns:
241
+ - `a: torch.Tensor` of shape (...). Only positive solutions are garunteed. You should filter out negative scales before using it.
242
+ - `b: torch.Tensor` of shape (...)
243
+ """
244
+ dtype, device = points_src.dtype, points_src.device
245
+
246
+ scale, _, _ = align(points_src.flatten(-2), points_tgt.flatten(-2), weight[..., None].expand_as(points_src).flatten(-2), trunc)
247
+
248
+ return scale
249
+
250
+
251
+ def align_points_scale_z_shift(points_src: torch.Tensor, points_tgt: torch.Tensor, weight: Optional[torch.Tensor], trunc: Optional[Union[float, torch.Tensor]] = None):
252
+ """
253
+ Align `points_src` to `points_tgt` with respect to a shared xyz scale and z shift.
254
+ It is similar to `align_affine` but scale and shift are applied to different dimensions.
255
+
256
+ ### Parameters:
257
+ - `points_src: torch.Tensor` of shape (..., N, 3)
258
+ - `points_tgt: torch.Tensor` of shape (..., N, 3)
259
+ - `weights: torch.Tensor` of shape (..., N)
260
+
261
+ ### Returns:
262
+ - `scale: torch.Tensor` of shape (...).
263
+ - `shift: torch.Tensor` of shape (..., 3). x and y shifts are zeros.
264
+ """
265
+ dtype, device = points_src.dtype, points_src.device
266
+
267
+ # Flatten batch dimensions for simplicity
268
+ batch_shape, n = points_src.shape[:-2], points_src.shape[-2]
269
+ batch_size = math.prod(batch_shape)
270
+ points_src, points_tgt, weight = points_src.reshape(batch_size, n, 3), points_tgt.reshape(batch_size, n, 3), weight.reshape(batch_size, n)
271
+
272
+ # Take anchors
273
+ anchor_where_batch, anchor_where_n = torch.where(weight > 0)
274
+ with torch.no_grad():
275
+ zeros = torch.zeros(anchor_where_batch.shape[0], device=device, dtype=dtype)
276
+ points_src_anchor = torch.stack([zeros, zeros, points_src[anchor_where_batch, anchor_where_n, 2]], dim=-1) # (anchors, 3)
277
+ points_tgt_anchor = torch.stack([zeros, zeros, points_tgt[anchor_where_batch, anchor_where_n, 2]], dim=-1) # (anchors, 3)
278
+
279
+ points_src_anchored = points_src[anchor_where_batch, :, :] - points_src_anchor[..., None, :] # (anchors, n, 3)
280
+ points_tgt_anchored = points_tgt[anchor_where_batch, :, :] - points_tgt_anchor[..., None, :] # (anchors, n, 3)
281
+ weight_anchored = weight[anchor_where_batch, :, None].expand(-1, -1, 3) # (anchors, n, 3)
282
+
283
+ # Solve optimal scale and shift for each anchor
284
+ MAX_ELEMENTS = 2 ** 20
285
+ scale, loss, index = split_batch_fwd(align, MAX_ELEMENTS // n, points_src_anchored.flatten(-2), points_tgt_anchored.flatten(-2), weight_anchored.flatten(-2), trunc) # (anchors,)
286
+
287
+ loss, index_anchor = scatter_min(size=batch_size, dim=0, index=anchor_where_batch, src=loss) # (batch_size,)
288
+
289
+ # Reproduce by indexing for shorter compute graph
290
+ index_2 = index[index_anchor] # (batch_size,) [0, 3n)
291
+ index_1 = anchor_where_n[index_anchor] * 3 + index_2 % 3 # (batch_size,) [0, 3n)
292
+
293
+ zeros = torch.zeros((batch_size, n), device=device, dtype=dtype)
294
+ points_tgt_00z, points_src_00z = torch.stack([zeros, zeros, points_tgt[..., 2]], dim=-1), torch.stack([zeros, zeros, points_src[..., 2]], dim=-1)
295
+ tgt_1, src_1 = torch.gather(points_tgt_00z.flatten(-2), dim=1, index=index_1[..., None]).squeeze(-1), torch.gather(points_src_00z.flatten(-2), dim=1, index=index_1[..., None]).squeeze(-1)
296
+ tgt_2, src_2 = torch.gather(points_tgt.flatten(-2), dim=1, index=index_2[..., None]).squeeze(-1), torch.gather(points_src.flatten(-2), dim=1, index=index_2[..., None]).squeeze(-1)
297
+
298
+ scale = (tgt_2 - tgt_1) / torch.where(src_2 != src_1, src_2 - src_1, 1.0)
299
+ shift = torch.gather(points_tgt_00z, dim=1, index=(index_1 // 3)[..., None, None].expand(-1, -1, 3)).squeeze(-2) - scale[..., None] * torch.gather(points_src_00z, dim=1, index=(index_1 // 3)[..., None, None].expand(-1, -1, 3)).squeeze(-2)
300
+ scale, shift = scale.reshape(batch_shape), shift.reshape(*batch_shape, 3)
301
+
302
+ return scale, shift
303
+
304
+
305
+ def align_points_scale_xyz_shift(points_src: torch.Tensor, points_tgt: torch.Tensor, weight: Optional[torch.Tensor], trunc: Optional[Union[float, torch.Tensor]] = None, max_iters: int = 30, eps: float = 1e-6):
306
+ """
307
+ Align `points_src` to `points_tgt` with respect to a shared xyz scale and z shift.
308
+ It is similar to `align_affine` but scale and shift are applied to different dimensions.
309
+
310
+ ### Parameters:
311
+ - `points_src: torch.Tensor` of shape (..., N, 3)
312
+ - `points_tgt: torch.Tensor` of shape (..., N, 3)
313
+ - `weights: torch.Tensor` of shape (..., N)
314
+
315
+ ### Returns:
316
+ - `scale: torch.Tensor` of shape (...).
317
+ - `shift: torch.Tensor` of shape (..., 3)
318
+ """
319
+ dtype, device = points_src.dtype, points_src.device
320
+
321
+ # Flatten batch dimensions for simplicity
322
+ batch_shape, n = points_src.shape[:-2], points_src.shape[-2]
323
+ batch_size = math.prod(batch_shape)
324
+ points_src, points_tgt, weight = points_src.reshape(batch_size, n, 3), points_tgt.reshape(batch_size, n, 3), weight.reshape(batch_size, n)
325
+
326
+ # Take anchors
327
+ anchor_where_batch, anchor_where_n = torch.where(weight > 0)
328
+
329
+ with torch.no_grad():
330
+ points_src_anchor = points_src[anchor_where_batch, anchor_where_n] # (anchors, 3)
331
+ points_tgt_anchor = points_tgt[anchor_where_batch, anchor_where_n] # (anchors, 3)
332
+
333
+ points_src_anchored = points_src[anchor_where_batch, :, :] - points_src_anchor[..., None, :] # (anchors, n, 3)
334
+ points_tgt_anchored = points_tgt[anchor_where_batch, :, :] - points_tgt_anchor[..., None, :] # (anchors, n, 3)
335
+ weight_anchored = weight[anchor_where_batch, :, None].expand(-1, -1, 3) # (anchors, n, 3)
336
+
337
+ # Solve optimal scale and shift for each anchor
338
+ MAX_ELEMENTS = 2 ** 20
339
+ scale, loss, index = split_batch_fwd(align, MAX_ELEMENTS // 2, points_src_anchored.flatten(-2), points_tgt_anchored.flatten(-2), weight_anchored.flatten(-2), trunc) # (anchors,)
340
+
341
+ # Get optimal scale and shift for each batch element
342
+ loss, index_anchor = scatter_min(size=batch_size, dim=0, index=anchor_where_batch, src=loss) # (batch_size,)
343
+
344
+ index_2 = index[index_anchor] # (batch_size,) [0, 3n)
345
+ index_1 = anchor_where_n[index_anchor] * 3 + index_2 % 3 # (batch_size,) [0, 3n)
346
+
347
+ src_1, tgt_1 = torch.gather(points_src.flatten(-2), dim=1, index=index_1[..., None]).squeeze(-1), torch.gather(points_tgt.flatten(-2), dim=1, index=index_1[..., None]).squeeze(-1)
348
+ src_2, tgt_2 = torch.gather(points_src.flatten(-2), dim=1, index=index_2[..., None]).squeeze(-1), torch.gather(points_tgt.flatten(-2), dim=1, index=index_2[..., None]).squeeze(-1)
349
+
350
+ scale = (tgt_2 - tgt_1) / torch.where(src_2 != src_1, src_2 - src_1, 1.0)
351
+ shift = torch.gather(points_tgt, dim=1, index=(index_1 // 3)[..., None, None].expand(-1, -1, 3)).squeeze(-2) - scale[..., None] * torch.gather(points_src, dim=1, index=(index_1 // 3)[..., None, None].expand(-1, -1, 3)).squeeze(-2)
352
+
353
+ scale, shift = scale.reshape(batch_shape), shift.reshape(*batch_shape, 3)
354
+
355
+ return scale, shift
356
+
357
+
358
+ def align_points_z_shift(points_src: torch.Tensor, points_tgt: torch.Tensor, weight: Optional[torch.Tensor], trunc: Optional[Union[float, torch.Tensor]] = None, max_iters: int = 30, eps: float = 1e-6):
359
+ """
360
+ Align `points_src` to `points_tgt` with respect to a Z-axis shift.
361
+
362
+ ### Parameters:
363
+ - `points_src: torch.Tensor` of shape (..., N, 3)
364
+ - `points_tgt: torch.Tensor` of shape (..., N, 3)
365
+ - `weights: torch.Tensor` of shape (..., N)
366
+
367
+ ### Returns:
368
+ - `scale: torch.Tensor` of shape (...).
369
+ - `shift: torch.Tensor` of shape (..., 3)
370
+ """
371
+ dtype, device = points_src.dtype, points_src.device
372
+
373
+ shift, _, _ = align(torch.ones_like(points_src[..., 2]), points_tgt[..., 2] - points_src[..., 2], weight, trunc)
374
+ shift = torch.stack([torch.zeros_like(shift), torch.zeros_like(shift), shift], dim=-1)
375
+
376
+ return shift
377
+
378
+
379
+ def align_points_xyz_shift(points_src: torch.Tensor, points_tgt: torch.Tensor, weight: Optional[torch.Tensor], trunc: Optional[Union[float, torch.Tensor]] = None, max_iters: int = 30, eps: float = 1e-6):
380
+ """
381
+ Align `points_src` to `points_tgt` with respect to a Z-axis shift.
382
+
383
+ ### Parameters:
384
+ - `points_src: torch.Tensor` of shape (..., N, 3)
385
+ - `points_tgt: torch.Tensor` of shape (..., N, 3)
386
+ - `weights: torch.Tensor` of shape (..., N)
387
+
388
+ ### Returns:
389
+ - `scale: torch.Tensor` of shape (...).
390
+ - `shift: torch.Tensor` of shape (..., 3)
391
+ """
392
+ dtype, device = points_src.dtype, points_src.device
393
+
394
+ shift, _, _ = align(torch.ones_like(points_src).swapaxes(-2, -1), (points_tgt - points_src).swapaxes(-2, -1), weight[..., None, :], trunc)
395
+
396
+ return shift
397
+
398
+
399
+ def align_affine_lstsq(x: torch.Tensor, y: torch.Tensor, w: torch.Tensor = None) -> Tuple[torch.Tensor, torch.Tensor]:
400
+ """
401
+ Solve `min sum_i w_i * (a * x_i + b - y_i ) ^ 2`, where `a` and `b` are scalars, with respect to `a` and `b` using least squares.
402
+
403
+ ### Parameters:
404
+ - `x: torch.Tensor` of shape (..., N)
405
+ - `y: torch.Tensor` of shape (..., N)
406
+ - `w: torch.Tensor` of shape (..., N)
407
+
408
+ ### Returns:
409
+ - `a: torch.Tensor` of shape (...,)
410
+ - `b: torch.Tensor` of shape (...,)
411
+ """
412
+ w_sqrt = torch.ones_like(x) if w is None else w.sqrt()
413
+ A = torch.stack([w_sqrt * x, torch.ones_like(x)], dim=-1)
414
+ B = (w_sqrt * y)[..., None]
415
+ a, b = torch.linalg.lstsq(A, B)[0].squeeze(-1).unbind(-1)
416
+ return a, b
moge/utils/data_augmentation.py ADDED
@@ -0,0 +1,250 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import time
4
+ import random
5
+ from typing import *
6
+ import itertools
7
+ from numbers import Number
8
+ import io
9
+
10
+ import numpy as np
11
+ import cv2
12
+ from PIL import Image
13
+ import torch
14
+ import torchvision.transforms.v2.functional as TF
15
+ import utils3d
16
+ from scipy.signal import fftconvolve
17
+
18
+ from ..utils.geometry_numpy import harmonic_mean_numpy, norm3d, depth_occlusion_edge_numpy
19
+
20
+
21
+ def sample_perspective(
22
+ src_intrinsics: np.ndarray,
23
+ tgt_aspect: float,
24
+ center_augmentation: float,
25
+ fov_range_absolute: Tuple[float, float],
26
+ fov_range_relative: Tuple[float, float],
27
+ rng: np.random.Generator = None
28
+ ) -> Tuple[np.ndarray, np.ndarray]:
29
+ raw_horizontal, raw_vertical = abs(1.0 / src_intrinsics[0, 0]), abs(1.0 / src_intrinsics[1, 1])
30
+ raw_fov_x, raw_fov_y = utils3d.np.intrinsics_to_fov(src_intrinsics)
31
+
32
+ # 1. set target fov
33
+ fov_range_absolute_min, fov_range_absolute_max = fov_range_absolute
34
+ fov_range_relative_min, fov_range_relative_max = fov_range_relative
35
+ tgt_fov_x_min = min(fov_range_relative_min * raw_fov_x, utils3d.focal_to_fov(utils3d.fov_to_focal(fov_range_relative_min * raw_fov_y) / tgt_aspect))
36
+ tgt_fov_x_max = min(fov_range_relative_max * raw_fov_x, utils3d.focal_to_fov(utils3d.fov_to_focal(fov_range_relative_max * raw_fov_y) / tgt_aspect))
37
+ tgt_fov_x_min, tgt_fov_max = max(np.deg2rad(fov_range_absolute_min), tgt_fov_x_min), min(np.deg2rad(fov_range_absolute_max), tgt_fov_x_max)
38
+ tgt_fov_x = rng.uniform(min(tgt_fov_x_min, tgt_fov_x_max), tgt_fov_x_max)
39
+ tgt_fov_y = utils3d.focal_to_fov(utils3d.np.fov_to_focal(tgt_fov_x) * tgt_aspect)
40
+
41
+ # 2. set target image center (principal point) and the corresponding z-direction in raw camera space
42
+ center_dtheta = center_augmentation * rng.uniform(-0.5, 0.5) * (raw_fov_x - tgt_fov_x)
43
+ center_dphi = center_augmentation * rng.uniform(-0.5, 0.5) * (raw_fov_y - tgt_fov_y)
44
+ cu, cv = 0.5 + 0.5 * np.tan(center_dtheta) / np.tan(raw_fov_x / 2), 0.5 + 0.5 * np.tan(center_dphi) / np.tan(raw_fov_y / 2)
45
+ direction = utils3d.np.unproject_cv(np.array([[cu, cv]], dtype=np.float32), np.array([1.0], dtype=np.float32), intrinsics=src_intrinsics)[0]
46
+
47
+ # 3. obtain the rotation matrix for homography warping (new_ext = R * old_ext)
48
+ R = utils3d.np.rotation_matrix_from_vectors(direction, np.array([0, 0, 1], dtype=np.float32))
49
+
50
+ # 4. shrink the target view to fit into the warped image
51
+ corners = np.array([[0, 0], [0, 1], [1, 1], [1, 0]], dtype=np.float32)
52
+ corners = np.concatenate([corners, np.ones((4, 1), dtype=np.float32)], axis=1) @ (np.linalg.inv(src_intrinsics).T @ R.T) # corners in viewport's camera plane
53
+ corners = corners[:, :2] / corners[:, 2:3]
54
+ tgt_horizontal, tgt_vertical = np.tan(tgt_fov_x / 2) * 2, np.tan(tgt_fov_y / 2) * 2
55
+ warp_horizontal, warp_vertical = float('inf'), float('inf')
56
+ for i in range(4):
57
+ intersection, _ = utils3d.np.ray_intersection(
58
+ np.array([0., 0.]), np.array([[tgt_aspect, 1.0], [tgt_aspect, -1.0]]),
59
+ corners[i - 1], corners[i] - corners[i - 1],
60
+ )
61
+ warp_horizontal, warp_vertical = min(warp_horizontal, 2 * np.abs(intersection[:, 0]).min()), min(warp_vertical, 2 * np.abs(intersection[:, 1]).min())
62
+ tgt_horizontal, tgt_vertical = min(tgt_horizontal, warp_horizontal), min(tgt_vertical, warp_vertical)
63
+
64
+ # 5. obtain the target intrinsics
65
+ fx, fy = 1 / tgt_horizontal, 1 / tgt_vertical
66
+ tgt_intrinsics = utils3d.np.intrinsics_from_focal_center(fx, fy, 0.5, 0.5).astype(np.float32)
67
+
68
+ return tgt_intrinsics, R
69
+
70
+
71
+ def warp_perspective(
72
+ src_map: np.ndarray = None,
73
+ transform: np.ndarray = None,
74
+ tgt_size: Tuple[int, int] = None,
75
+ interpolation: Literal['nearest', 'bilinear', 'lanczos'] = 'nearest',
76
+ sparse_mask: np.ndarray = None,
77
+ ):
78
+ """Perspective warping with careful resampling.
79
+ - For `lanczos`, use PIL to resize first to reduce aliasing.
80
+ - For `nearest` with sparse input, use mask-aware nearest resize to avoid losing points.
81
+ - For `bilinear` or `nearest` with dense input, directly use cv2.remap.
82
+
83
+ - `transform` is the matrix that transforms homogeneous pixel coordinates of source image to those of target image, i.e., `p_tgt = transform @ p_src`.
84
+ """
85
+
86
+ tgt_height, tgt_width = tgt_size
87
+ src_height, src_width = src_map.shape[:2]
88
+
89
+ # source to target transform
90
+ transform_pixel = np.array([[tgt_width, 0, -0.5], [0, tgt_height, -0.5], [0, 0, 1]], dtype=np.float32) @ transform @ np.array([[1 / src_width, 0, 0.5 / src_width], [0, 1 / src_height, 0.5 / src_height], [0, 0, 1]], dtype=np.float32)
91
+ # Get scale factor at the target center
92
+ w = np.dot(np.linalg.inv(transform_pixel)[2, :], np.array([tgt_width / 2, tgt_height / 2, 1], dtype=np.float32))
93
+ scale_x, scale_y = w * np.linalg.norm(transform_pixel[:2, :2], axis=0)
94
+
95
+ if interpolation == 'lanczos' and (scale_x < 0.8 or scale_y < 0.8):
96
+ # If lanczos & downsampling, use PIL to resize first to reduce aliasing
97
+ src_height, src_width = max(round(src_height * scale_y * 1.25), 16), max(round(src_width * scale_x * 1.25), 16)
98
+ src_map = np.array(Image.fromarray(src_map).resize((src_width, src_height), Image.Resampling.LANCZOS))
99
+ elif interpolation == 'nearest' and sparse_mask is not None and (scale_x < 1 or scale_y < 1):
100
+ # If nearest and sparse, use mask-aware nearest resize first to avoid losing points
101
+ src_height, src_width = max(round(src_height * scale_y), 16), max(round(src_width * scale_x), 16)
102
+ src_map, _ = utils3d.np.masked_nearest_resize(src_map, mask=sparse_mask, size=(src_height, src_width))
103
+
104
+ # Recompute the pixel-space transform after resizing
105
+ transform_pixel = np.array([[tgt_width, 0, -0.5], [0, tgt_height, -0.5], [0, 0, 1]], dtype=np.float32) @ transform @ np.array([[1 / src_width, 0, 0.5 / src_width], [0, 1 / src_height, 0.5 / src_height], [0, 0, 1]], dtype=np.float32)
106
+
107
+ # Remap
108
+ cv2_interpolation = {'nearest': cv2.INTER_NEAREST, 'bilinear': cv2.INTER_LINEAR, 'lanczos': cv2.INTER_LANCZOS4}[interpolation]
109
+ tgt_map = cv2.warpPerspective(src_map, transform_pixel, (tgt_width, tgt_height), flags=cv2_interpolation)
110
+
111
+ return tgt_map
112
+
113
+
114
+ def image_color_augmentation(image: np.ndarray, augmentations: List[Dict[str, Any]], rng: np.random.Generator = None, depth: np.ndarray = None):
115
+ height, width = image.shape[:2]
116
+ if rng is None:
117
+ rng = np.random.default_rng()
118
+ if 'jittering' in augmentations:
119
+ image = torch.from_numpy(image).permute(2, 0, 1)
120
+ image = TF.adjust_brightness(image, rng.uniform(0.9, 1.1))
121
+ image = TF.adjust_contrast(image, rng.uniform(0.9, 1.1))
122
+ image = TF.adjust_saturation(image, rng.uniform(0.9, 1.1))
123
+ image = TF.adjust_hue(image, rng.uniform(-0.05, 0.05))
124
+ image = TF.adjust_gamma(image, rng.uniform(0.9, 1.1))
125
+ image = image.permute(1, 2, 0).numpy()
126
+ if 'dof' in augmentations:
127
+ assert depth is not None, 'Depth map is required for DOF augmentation'
128
+ if rng.uniform() < 0.5:
129
+ dof_strength = rng.integers(12)
130
+ disp = 1 / depth
131
+ finite_mask = np.isfinite(depth)
132
+ disp_min, disp_max = disp[finite_mask].min(), disp[finite_mask].max()
133
+ disp = cv2.inpaint(np.nan_to_num(disp, nan=1), np.isnan(disp).astype(np.uint8), 3, cv2.INPAINT_TELEA).clip(0, disp_max)
134
+ dof_focus = rng.uniform(disp_min, disp_max)
135
+ image = depth_of_field(image, disp, dof_focus, dof_strength)
136
+ if 'shot_noise' in augmentations:
137
+ if rng.uniform() < 0.5:
138
+ k = np.exp(rng.uniform(np.log(100), np.log(10000))) / 255
139
+ image = (rng.poisson(image * k) / k).clip(0, 255).astype(np.uint8)
140
+ if 'blurring' in augmentations:
141
+ if rng.uniform() < 0.5:
142
+ ratio = rng.uniform(0.25, 1)
143
+ image = cv2.resize(cv2.resize(image, (int(width * ratio), int(height * ratio)), interpolation=cv2.INTER_AREA), (width, height), interpolation=rng.choice([cv2.INTER_LINEAR_EXACT, cv2.INTER_CUBIC, cv2.INTER_LANCZOS4]))
144
+ if 'jpeg_loss' in augmentations:
145
+ if rng.uniform() < 0.5:
146
+ image = cv2.imdecode(cv2.imencode('.jpg', image, [cv2.IMWRITE_JPEG_QUALITY, rng.integers(20, 100)])[1], cv2.IMREAD_COLOR)
147
+
148
+ return image
149
+
150
+
151
+
152
+ def disk_kernel(radius: int) -> np.ndarray:
153
+ """
154
+ Generate disk kernel with given radius.
155
+
156
+ Args:
157
+ radius (int): Radius of the disk (in pixels).
158
+
159
+ Returns:
160
+ np.ndarray: (2*radius+1, 2*radius+1) normalized convolution kernel.
161
+ """
162
+ # Create coordinate grid centered at (0,0)
163
+ L = np.arange(-radius, radius + 1)
164
+ X, Y = np.meshgrid(L, L)
165
+ # Generate disk: region inside circle with radius R is 1
166
+ kernel = ((X**2 + Y**2) <= radius**2).astype(np.float32)
167
+ # Normalize the kernel
168
+ kernel /= np.sum(kernel)
169
+ return kernel
170
+
171
+
172
+ def disk_blur(image: np.ndarray, radius: int) -> np.ndarray:
173
+ """
174
+ Apply disk blur to an image using FFT convolution.
175
+
176
+ Args:
177
+ image (np.ndarray): Input image, can be grayscale or color.
178
+ radius (int): Blur radius (in pixels).
179
+
180
+ Returns:
181
+ np.ndarray: Blurred image.
182
+ """
183
+ if radius == 0:
184
+ return image
185
+ kernel = disk_kernel(radius)
186
+ if image.ndim == 2:
187
+ blurred = fftconvolve(image, kernel, mode='same')
188
+ elif image.ndim == 3:
189
+ channels = []
190
+ for i in range(image.shape[2]):
191
+ blurred_channel = fftconvolve(image[..., i], kernel, mode='same')
192
+ channels.append(blurred_channel)
193
+ blurred = np.stack(channels, axis=-1)
194
+ else:
195
+ raise ValueError("Image must be 2D or 3D.")
196
+ return blurred
197
+
198
+
199
+ def depth_of_field(
200
+ img: np.ndarray,
201
+ disp: np.ndarray,
202
+ focus_disp : float,
203
+ max_blur_radius : int = 10,
204
+ ) -> np.ndarray:
205
+ """
206
+ Apply depth of field effect to an image.
207
+
208
+ Args:
209
+ img (numpy.ndarray): (H, W, 3) input image.
210
+ depth (numpy.ndarray): (H, W) depth map of the scene.
211
+ focus_depth (float): Focus depth of the lens.
212
+ strength (float): Strength of the depth of field effect.
213
+ max_blur_radius (int): Maximum blur radius (in pixels).
214
+
215
+ Returns:
216
+ numpy.ndarray: (H, W, 3) output image with depth of field effect applied.
217
+ """
218
+ # Precalculate dialated depth map for each blur radius
219
+ max_disp = np.max(disp)
220
+ disp = disp / max_disp
221
+ focus_disp = focus_disp / max_disp
222
+ dilated_disp = []
223
+ for radius in range(max_blur_radius + 1):
224
+ dilated_disp.append(cv2.dilate(disp, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2 * radius + 1, 2 * radius + 1)), iterations=1))
225
+
226
+ # Determine the blur radius for each pixel based on the depth map
227
+ blur_radii = np.clip(np.abs(disp - focus_disp) * max_blur_radius, 0, max_blur_radius).astype(np.int32)
228
+ for radius in range(max_blur_radius + 1):
229
+ dialted_blur_radii = np.clip(np.abs(dilated_disp[radius] - focus_disp) * max_blur_radius, 0, max_blur_radius).astype(np.int32)
230
+ mask = (dialted_blur_radii >= radius) & (dialted_blur_radii >= blur_radii) & (dilated_disp[radius] > disp)
231
+ blur_radii[mask] = dialted_blur_radii[mask]
232
+ blur_radii = np.clip(blur_radii, 0, max_blur_radius)
233
+ blur_radii = cv2.blur(blur_radii, (5, 5))
234
+
235
+ # Precalculate the blured image for each blur radius
236
+ unique_radii = np.unique(blur_radii)
237
+ precomputed = {}
238
+ for radius in range(max_blur_radius + 1):
239
+ if radius not in unique_radii:
240
+ continue
241
+ precomputed[radius] = disk_blur(img, radius)
242
+
243
+ # Composit the blured image for each pixel
244
+ output = np.zeros_like(img)
245
+ for r in unique_radii:
246
+ mask = blur_radii == r
247
+ output[mask] = precomputed[r][mask]
248
+
249
+ return output
250
+
moge/utils/download.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pathlib import Path
2
+ from typing import *
3
+ import requests
4
+
5
+ from tqdm import tqdm
6
+
7
+
8
+ __all__ = ["download_file", "download_bytes"]
9
+
10
+
11
+ def download_file(url: str, filepath: Union[str, Path], headers: dict = None, resume: bool = True) -> None:
12
+ # Ensure headers is a dict if not provided
13
+ headers = headers or {}
14
+
15
+ # Initialize local variables
16
+ file_path = Path(filepath)
17
+ downloaded_bytes = 0
18
+
19
+ # Check if we should resume the download
20
+ if resume and file_path.exists():
21
+ downloaded_bytes = file_path.stat().st_size
22
+ headers['Range'] = f"bytes={downloaded_bytes}-"
23
+
24
+ # Make a GET request to fetch the file
25
+ with requests.get(url, stream=True, headers=headers) as response:
26
+ response.raise_for_status() # This will raise an HTTPError if the status is 4xx/5xx
27
+
28
+ # Calculate the total size to download
29
+ total_size = downloaded_bytes + int(response.headers.get('content-length', 0))
30
+
31
+ # Display a progress bar while downloading
32
+ with (
33
+ tqdm(desc=f"Downloading {file_path.name}", total=total_size, unit='B', unit_scale=True, leave=False) as pbar,
34
+ open(file_path, 'ab') as file,
35
+ ):
36
+ # Set the initial position of the progress bar
37
+ pbar.update(downloaded_bytes)
38
+
39
+ # Write the content to the file in chunks
40
+ for chunk in response.iter_content(chunk_size=4096):
41
+ file.write(chunk)
42
+ pbar.update(len(chunk))
43
+
44
+
45
+ def download_bytes(url: str, headers: dict = None) -> bytes:
46
+ # Ensure headers is a dict if not provided
47
+ headers = headers or {}
48
+
49
+ # Make a GET request to fetch the file
50
+ with requests.get(url, stream=True, headers=headers) as response:
51
+ response.raise_for_status() # This will raise an HTTPError if the status is 4xx/5xx
52
+
53
+ # Read the content of the response
54
+ return response.content
55
+