Diffusers
Safetensors
zeyuren2002 commited on
Commit
40a3ea8
·
verified ·
1 Parent(s): 7f921f4

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. FE2E/.gitignore +35 -0
  2. FE2E/LICENSE +21 -0
  3. FE2E/README.md +159 -0
  4. FE2E/evaluation.py +553 -0
  5. FE2E/infer/__init__.py +0 -0
  6. FE2E/infer/__pycache__/__init__.cpython-310.pyc +0 -0
  7. FE2E/infer/__pycache__/inference.cpython-310.pyc +0 -0
  8. FE2E/infer/__pycache__/sampling.cpython-310.pyc +0 -0
  9. FE2E/infer/__pycache__/seed_all.cpython-310.pyc +0 -0
  10. FE2E/infer/alignment.py +70 -0
  11. FE2E/infer/configs/data_diode_all.yaml +5 -0
  12. FE2E/infer/configs/data_eth3d.yaml +7 -0
  13. FE2E/infer/configs/data_kitti_eigen_test.yaml +6 -0
  14. FE2E/infer/configs/data_nyu_test.yaml +5 -0
  15. FE2E/infer/configs/data_scannet_val.yaml +5 -0
  16. FE2E/infer/dataset/__init__.py +49 -0
  17. FE2E/infer/dataset/base_depth_dataset.py +257 -0
  18. FE2E/infer/dataset/diode_dataset.py +71 -0
  19. FE2E/infer/dataset/drivingstereo_dataset.py +32 -0
  20. FE2E/infer/dataset/eth3d_dataset.py +45 -0
  21. FE2E/infer/dataset/kitti_dataset.py +105 -0
  22. FE2E/infer/dataset/nyu_dataset.py +43 -0
  23. FE2E/infer/dataset/scannet_dataset.py +25 -0
  24. FE2E/infer/dataset_normal/__init__.py +27 -0
  25. FE2E/infer/dataset_normal/aug_basic.py +239 -0
  26. FE2E/infer/dataset_normal/hypersim/__init__.py +65 -0
  27. FE2E/infer/dataset_normal/hypersim/split/hypersim.txt +98 -0
  28. FE2E/infer/dataset_normal/ibims/__init__.py +47 -0
  29. FE2E/infer/dataset_normal/ibims/split/ibims.txt +100 -0
  30. FE2E/infer/dataset_normal/normal_dataloader.py +83 -0
  31. FE2E/infer/dataset_normal/nyuv2/__init__.py +66 -0
  32. FE2E/infer/dataset_normal/nyuv2/split/test.txt +654 -0
  33. FE2E/infer/dataset_normal/nyuv2/split/train.txt +795 -0
  34. FE2E/infer/dataset_normal/oasis/__init__.py +76 -0
  35. FE2E/infer/dataset_normal/oasis/split/val.txt +0 -0
  36. FE2E/infer/dataset_normal/scannet/__init__.py +64 -0
  37. FE2E/infer/dataset_normal/scannet/split/test.txt +300 -0
  38. FE2E/infer/dataset_normal/sintel/__init__.py +49 -0
  39. FE2E/infer/dataset_normal/sintel/split/sintel.txt +1064 -0
  40. FE2E/infer/image_utils.py +158 -0
  41. FE2E/infer/inference.py +569 -0
  42. FE2E/infer/inner_evaluation.py +598 -0
  43. FE2E/infer/sampling.py +47 -0
  44. FE2E/infer/seed_all.py +33 -0
  45. FE2E/infer/util/__init__.py +0 -0
  46. FE2E/infer/util/alignment.py +88 -0
  47. FE2E/infer/util/metric.py +151 -0
  48. FE2E/infer/util/normal_utils.py +78 -0
  49. FE2E/infer/visualize.py +130 -0
  50. FE2E/library/__init__.py +0 -0
FE2E/.gitignore ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_store
2
+ .idea
3
+ */.DS_store
4
+ __pycache__
5
+ */__pycache__/
6
+ */.ipynb_checkpoints/
7
+
8
+ #特殊文件
9
+ *.report
10
+ *.png
11
+ *-xdl
12
+
13
+ #几个不改动或无意义改动文件
14
+ Qwen/
15
+ pretrain/
16
+ splits/
17
+ log_err/
18
+ assets/
19
+ !assets/
20
+ !assets/demo.png
21
+ !assets/pipeline.png
22
+ vkitti/
23
+ flux/
24
+ compare_result/
25
+
26
+ #推理用数据集
27
+ infer/data_split/
28
+ infer/nyu_results_test/
29
+ infer/nyudepth/
30
+ infer/eval_results/
31
+ infer/diode/
32
+ infer/eth3d/
33
+ infer/kitti/
34
+ infer/scannet/
35
+ infer/dsine_eval/
FE2E/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2025 AMAP-ML
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
FE2E/README.md ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FE2E: From Editor to Dense Geometry Estimator
2
+
3
+ [![Page](https://img.shields.io/badge/Project-Website-pink?logo=googlechrome&logoColor=white)](https://amap-ml.github.io/FE2E/)
4
+ [![Paper](https://img.shields.io/badge/arXiv-2509.04338-b31b1b?logo=arxiv&logoColor=white)](https://arxiv.org/abs/2509.04338)
5
+ [![GitHub](https://img.shields.io/github/stars/AMAP-ML/FE2E?style=social)](https://github.com/AMAP-ML/FE2E)
6
+ [![HuggingFace](https://img.shields.io/badge/🤗%20HuggingFace-Model-yellow)](https://huggingface.co/exander/FE2E)
7
+ [![Video](https://img.shields.io/badge/BiliBili-Video-00A1D6)](https://www.bilibili.com/video/BV1zYXdBXE2x)
8
+ [![Video](https://img.shields.io/badge/YouTube-Video-red)](https://youtu.be/fyXwwH_-o5w)
9
+
10
+ [Jiyuan Wang](https://wangjiyuan9.github.io/)<sup>1,2</sup>,
11
+ [Chunyu Lin](https://scholar.google.com/citations?hl=zh-CN&user=t8xkhscAAAAJ)<sup>1&#9993;</sup>,
12
+ [Lei Sun](https://scholar.google.com/citations?user=your-id)<sup>2&#10013;</sup>,
13
+ [Rongying Liu](https://scholar.google.com/citations?user=your-id)<sup>1</sup>,
14
+ [Mingxing Li](https://scholar.google.com/citations?user=-pfkprkAAAAJ&hl=zh-CN&oi=ao)<sup>2</sup>,
15
+ [Lang Nie](https://scholar.google.com/citations?hl=zh-CN&user=vo__egkAAAAJ)<sup>3</sup>,
16
+ [Kang Liao](https://kangliao929.github.io/)<sup>4</sup>,
17
+ [Xiangxiang Chu](https://cxxgtxy.github.io/)<sup>2</sup>,
18
+ [Yao Zhao](https://faculty.bjtu.edu.cn/5900/)<sup>1</sup>
19
+
20
+ <span class="author-block"><sup>1</sup>Beijing Jiaotong University</span>
21
+ <span class="author-block"><sup>2</sup>Alibaba Group</span>
22
+ <span class="author-block"><sup>3</sup>Chongqing University of Posts and Telecommunications</span>
23
+ <span class="author-block"><sup>4</sup>Nanyang Technological University</span>
24
+ <span class="author-block"><sup>&#9993;</sup>Corresponding author. <sup>&#10013;</sup>Project leader.</span>
25
+
26
+ ![teaser](assets/demo.png)
27
+
28
+ We present **FE2E**, a DiT-based foundation model for monocular dense geometry prediction. FE2E adapts an advanced image editing model to dense geometry tasks and achieves strong zero-shot performance on both monocular depth and normal estimation.
29
+
30
+ ![pipeline](assets/pipeline.png)
31
+
32
+ ## 📢 News
33
+ - **[2026-03-17]**: Code and Checkpoint are available now!
34
+ - **[2026-02-21]**: FE2E was accepted by CVPR 2026!!! 🎉🎉🎉
35
+ - **[2025-09-05]**: Paper released on [arXiv](https://arxiv.org/abs/2509.04338).
36
+
37
+ ---
38
+
39
+ ## 🛠️ Setup
40
+
41
+ This codebase is prepared as an inference/evaluation release.
42
+
43
+ ```bash
44
+ pip install -r requirements.txt
45
+ ```
46
+
47
+ Recommended local layout:
48
+
49
+ ```text
50
+ FE2E/
51
+ ├── pretrain/
52
+ │ ├── step1x-edit-i1258.safetensors
53
+ │ ├── step1x-edit-v1p1-official.safetensors
54
+ │ └── vae.safetensors
55
+ ├── lora/
56
+ │ └── LDRN.safetensors
57
+ ├── infer/
58
+ │ ├── eth3d/
59
+ │ │ └── eth3d.tar
60
+ │ └── dsine_eval/
61
+ │ ├── nyuv2/
62
+ │ └── scannet/
63
+ └── logs/
64
+ ```
65
+
66
+ ---
67
+
68
+ ## 🔥 Training
69
+
70
+ ```text
71
+ [ ] Training code will be released later.
72
+ ```
73
+
74
+ ---
75
+
76
+ ## 🕹️ Inference
77
+
78
+ ### 1. Prepare Model Weights
79
+
80
+ 1. Download the base weights, which from the official [Step1X-Edit](https://github.com/stepfun-ai/Step1X-Edit) release.
81
+ 2. Download FE2E LoRA [checkpoint](https://huggingface.co/exander/FE2E/blob/main/LDRN.safetensors)
82
+
83
+
84
+ ### 2. Prepare Benchmark Datasets
85
+
86
+ - Depth benchmarks follow the external evaluation data convention from [Marigold](https://github.com/prs-eth/Marigold).
87
+ - Normal benchmarks follow the external evaluation data convention from [DSINE](https://github.com/baegwangbin/DSINE).
88
+
89
+
90
+ Supported depth benchmarks:
91
+ - `nyu_v2`,`kitti`,`eth3d`,`diode`,`scannet`
92
+
93
+ Supported normal benchmarks:
94
+ - `nyuv2`,`scannet`,`ibims`,`sintel`
95
+
96
+
97
+ ### 3. Run Evaluation
98
+
99
+ `[dataset] normal`:
100
+
101
+ ```bash
102
+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
103
+ MASTER_PORT=21258 \
104
+ PYTHONUNBUFFERED=1 \
105
+ python -u evaluation.py \
106
+ --model_path ./pretrain \
107
+ --eval_data_root ./infer \
108
+ --output_dir ./infer/eval_verify_scannet_normal_8gpu \
109
+ --num_gpus 8 \
110
+ --num_samples -1 \
111
+ --lora ./lora/LDRN.safetensors \
112
+ --single_denoise \
113
+ --prompt_type empty \
114
+ --norm_type ln \
115
+ --task_name normal \
116
+ --normal_eval_datasets [dataset]
117
+ ```
118
+
119
+ `[dataset] depth`:
120
+
121
+ ```bash
122
+ CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 \
123
+ MASTER_PORT=21257 \
124
+ PYTHONUNBUFFERED=1 \
125
+ python -u evaluation.py \
126
+ --model_path ./pretrain \
127
+ --eval_data_root ./infer \
128
+ --output_dir ./infer/eval_verify_eth3d_8gpu \
129
+ --num_gpus 8 \
130
+ --num_samples -1 \
131
+ --lora ./lora/LDRN.safetensors \
132
+ --single_denoise \
133
+ --prompt_type empty \
134
+ --norm_type ln \
135
+ --task_name depth \
136
+ --depth_eval_datasets [dataset]
137
+ ```
138
+
139
+
140
+ ### 4. Reference Logs
141
+ If you want to known the successful status, this repo includes run logs in `logs/`:
142
+ - `logs/verify_scannet_normal_8gpu_20260317_171345.log`
143
+ - `logs/verify_eth3d_8gpu_20260317_172004.log`
144
+
145
+
146
+ ---
147
+
148
+ ## 🎓 Citation
149
+
150
+ If you find our work useful, please cite:
151
+
152
+ ```bibtex
153
+ @article{wang2025editor,
154
+ title={From Editor to Dense Geometry Estimator},
155
+ author={Wang, JiYuan and Lin, Chunyu and Sun, Lei and Liu, Rongying and Nie, Lang and Li, Mingxing and Liao, Kang and Chu, Xiangxiang and Zhao, Yao},
156
+ journal={arXiv preprint arXiv:2509.04338},
157
+ year={2025}
158
+ }
159
+ ```
FE2E/evaluation.py ADDED
@@ -0,0 +1,553 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import re
4
+ import numpy as np
5
+ import torch
6
+ import torch.distributed as dist
7
+ import torch.multiprocessing as mp
8
+
9
+ from infer.seed_all import seed_all
10
+
11
+ # 设置环境变量消除tokenizers警告
12
+ os.environ['TOKENIZERS_PARALLELISM'] = 'false'
13
+ os.environ['NCCL_DEBUG'] = 'WARN'
14
+ # 消除torchvision警告
15
+ os.environ['TORCHVISION_DISABLE_DEPRECATED_WARNING'] = '1'
16
+
17
+ REPO_ROOT = os.path.dirname(os.path.abspath(__file__))
18
+ DEFAULT_QWEN_DIR = os.path.join(REPO_ROOT, "Qwen")
19
+ DEFAULT_DEPTH_DATASET_CONFIGS = {
20
+ "nyu_v2": "configs/data_nyu_test.yaml",
21
+ "kitti": "configs/data_kitti_eigen_test.yaml",
22
+ "eth3d": "configs/data_eth3d.yaml",
23
+ "diode": "configs/data_diode_all.yaml",
24
+ "scannet": "configs/data_scannet_val.yaml",
25
+ }
26
+ DEFAULT_NORMAL_DATASETS = {
27
+ "nyuv2": "test",
28
+ "scannet": "test",
29
+ "ibims": "ibims",
30
+ "sintel": "sintel",
31
+ "oasis": "val",
32
+ "hypersim": "hypersim",
33
+ }
34
+
35
+
36
+ def resolve_eval_data_root(args, *required_markers):
37
+ """Resolve the evaluation data root without depending on the launch cwd."""
38
+ candidates = []
39
+ if getattr(args, "eval_data_root", None):
40
+ candidates.append(os.path.abspath(args.eval_data_root))
41
+
42
+ candidates.extend(
43
+ [
44
+ os.path.join(REPO_ROOT, "infer"),
45
+ os.path.join(REPO_ROOT, "data"),
46
+ os.path.join(os.path.dirname(REPO_ROOT), "data"),
47
+ ]
48
+ )
49
+
50
+ for candidate in candidates:
51
+ if all(os.path.exists(os.path.join(candidate, marker)) for marker in required_markers):
52
+ return candidate
53
+
54
+ checked = ", ".join(
55
+ os.path.join(candidate, marker) for candidate in candidates for marker in required_markers
56
+ )
57
+ raise FileNotFoundError(f"未找到评测数据根目录,已检查: {checked}")
58
+
59
+
60
+ def parse_depth_eval_datasets(raw_value):
61
+ requested = [item.strip() for item in raw_value.split(",") if item.strip()]
62
+ if requested == ["all"]:
63
+ requested = list(DEFAULT_DEPTH_DATASET_CONFIGS.keys())
64
+ invalid = [item for item in requested if item not in DEFAULT_DEPTH_DATASET_CONFIGS]
65
+ if invalid:
66
+ raise ValueError(f"不支持的 depth 数据集: {invalid},可选值: {sorted(DEFAULT_DEPTH_DATASET_CONFIGS)}")
67
+ return {name: DEFAULT_DEPTH_DATASET_CONFIGS[name] for name in requested}
68
+
69
+
70
+ def parse_normal_eval_datasets(raw_value):
71
+ requested = [item.strip() for item in raw_value.split(",") if item.strip()]
72
+ if requested == ["all"]:
73
+ requested = list(DEFAULT_NORMAL_DATASETS.keys())
74
+ invalid = [item for item in requested if item not in DEFAULT_NORMAL_DATASETS]
75
+ if invalid:
76
+ raise ValueError(f"不支持的 normal 数据集: {invalid},可选值: {sorted(DEFAULT_NORMAL_DATASETS)}")
77
+ return [(name, DEFAULT_NORMAL_DATASETS[name]) for name in requested]
78
+
79
+
80
+ def collect_and_merge_dual_cfg_results(rank, world_size, gathered_metrics_Lpred, gathered_times):
81
+ """
82
+ 收集并合并双CFG配置的评估结果
83
+
84
+ Args:
85
+ rank: 当前进程的rank
86
+ world_size: 总进程数
87
+ gathered_metrics_Lpred
88
+ gathered_times: 处理时间收集结果
89
+
90
+ Returns:
91
+ tuple: (all_metrics_Lpred, dataset_times)
92
+ """
93
+ if rank != 0:
94
+ return None, None
95
+
96
+ # 合并处理时间
97
+ dataset_times = []
98
+ for times_list in gathered_times:
99
+ dataset_times.extend(times_list)
100
+
101
+ #先处理L的
102
+ all_metrics_L = {}
103
+ valid_metrics_L = [m for m in gathered_metrics_Lpred if m]
104
+ if valid_metrics_L:
105
+ for key in valid_metrics_L[0].keys():
106
+ values = [m[key] for m in valid_metrics_L if key in m]
107
+ if values:
108
+ all_metrics_L[key] = np.mean(values)
109
+
110
+ return all_metrics_L, dataset_times
111
+
112
+
113
+ def format_dual_cfg_results_table(dataset_name, model_identifier, all_metrics_L, dataset_times):
114
+ """
115
+ 格式化双CFG配置的结果表格
116
+
117
+ Args:
118
+ dataset_name: 数据集名称
119
+ model_identifier: 模型标识符
120
+ all_metrics_L: CFG=1的评估指标
121
+ dataset_times: 处理时间列表
122
+
123
+ Returns:
124
+ str: 格式化的结果字符串
125
+ """
126
+ eval_metrics_order = ["abs_relative_difference", "squared_relative_difference", "rmse_linear", "rmse_log", "delta1_acc", "delta2_acc", "delta3_acc"]
127
+
128
+ # 获取CFG=1的指标值
129
+ mean_errors_L = [all_metrics_L.get(metric, 0.0) for metric in eval_metrics_order]
130
+
131
+ # 构建表格
132
+ metrics_header = ["Dataset", "Model", "CFG", "abs_rel", "sq_rel", "rmse", "rmse_log", "a1", "a2", "a3"]
133
+
134
+ # CFG=1的结果行
135
+ values_data_L = [dataset_name, model_identifier, "CFG=1"] + [f"{v:.4f}" for v in mean_errors_L]
136
+
137
+ header_line = "| " + " | ".join(metrics_header) + " |"
138
+ separator_line = "| " + " | ".join(["---"] * len(metrics_header)) + " |"
139
+ values_line_L = "| " + " | ".join(values_data_L) + " |"
140
+
141
+ # 生成输出字符串
142
+ result_str = f"\n数据集 {dataset_name} 评估完成!\n"
143
+ result_str += "-" * 100 + "\n"
144
+ result_str += header_line + "\n"
145
+ result_str += separator_line + "\n"
146
+ result_str += values_line_L + "\n"
147
+
148
+ # 添加统计信息
149
+ sample_count = len(dataset_times)
150
+ result_str += f"样本数: {sample_count}\n"
151
+ if dataset_times:
152
+ result_str += f"平均处理时间: {np.mean(dataset_times):.2f}秒/图像\n"
153
+ result_str += "-" * 100 + "\n"
154
+
155
+ return result_str
156
+
157
+
158
+ def save_dual_cfg_results_summary(output_dir, all_dataset_results, model_identifier):
159
+ """
160
+ 保存所有数据集的双CFG评估结果摘要
161
+
162
+ Args:
163
+ output_dir: 输出目录
164
+ all_dataset_results: 所有数据集的结果字典
165
+ """
166
+ summary_file = os.path.join(output_dir, f"{model_identifier}.txt")
167
+
168
+ with open(summary_file, 'w', encoding='utf-8') as f:
169
+ f.write("=" * 120 + "\n")
170
+ f.write("双CFG配置深度评估结果汇总\n")
171
+ f.write("=" * 120 + "\n\n")
172
+
173
+ for dataset_name, result_data in all_dataset_results.items():
174
+ f.write(result_data['formatted_output'])
175
+ f.write(f"结果保存至: {result_data['eval_dir']}\n\n")
176
+
177
+ print(f"双CFG评估结果摘要已保存至: {summary_file}")
178
+
179
+
180
+ def parse_args():
181
+ '''Set the Args'''
182
+ parser = argparse.ArgumentParser(description="Run Step...")
183
+ parser.add_argument('--model_path', type=str, default='./pretrain', help='模型路径')
184
+ parser.add_argument('--qwen2vl_model_path', type=str, default=DEFAULT_QWEN_DIR, help='Qwen2.5-VL 模型目录')
185
+ parser.add_argument('--eval_data_root', type=str, default=None, help='评测数据根目录,默认自动在仓库相对路径下查找')
186
+ parser.add_argument("--seed", type=int, default=1234, help="随机种子")
187
+ parser.add_argument("--output_dir", type=str, default="./infer/eval_results", help="Output directory.")
188
+ parser.add_argument('--num_steps', type=int, default=28, help='扩散步数')
189
+ parser.add_argument('--num_samples', type=int, default=-1, help='生成样本数')
190
+ parser.add_argument('--cfg_guidance', type=float, default=6.0, help='CFG引导强度')
191
+ parser.add_argument('--size_level', type=int, default=768, help='输入图像大小')
192
+ parser.add_argument('--num_gpus', type=int, default=torch.cuda.device_count(), help='使用的GPU数量')
193
+ parser.add_argument('--save_viz', action='store_true', help='保存可视化结果')
194
+ parser.add_argument('--offload', action='store_true', help='使用CPU卸载以节省GPU内存')
195
+ parser.add_argument('--quantized', action='store_true', help='使用量化模型')
196
+ parser.add_argument('--lora', type=str, help='LoRA模型路径')
197
+ parser.add_argument('--single_denoise', action='store_true', default=False, help='单步推理')
198
+ parser.add_argument('--old_prompt', action='store_true', default=False, help='使用旧版提示')
199
+ parser.add_argument('--prompt_type', type=str, default='query', help='提示类型')
200
+ parser.add_argument('--prompt', type=str, default='Describe the 3D structure and layout of the scene in the image. Predict the depth of this image.', help='提示')
201
+ parser.add_argument('--norm_type', type=str, default='depth', help='预测结果的归一化方式,目前有 depth、disp、ln')
202
+ parser.add_argument('--task_name', type=str, default='depth', help='任务名称,支持 depth 或 normal')
203
+ parser.add_argument('--depth_eval_datasets', type=str, default='eth3d', help='逗号分隔的 depth 评测数据集')
204
+ parser.add_argument('--normal_eval_datasets', type=str, default='nyuv2,scannet', help='逗号分隔的 normal 评测数据集')
205
+ parser.add_argument('--debug', action='store_true', default=False, help='调试模式')
206
+ args = parser.parse_args()
207
+ if args.single_denoise:
208
+ args.num_steps = 1
209
+ return args
210
+
211
+
212
+ def extract_model_identifier(lora_path):
213
+ """
214
+ 从lora路径中提取模型标识符
215
+ 支持多种路径格式:
216
+ - ./log_err/dis-hvsge-log/ckpt.safetensors -> dis-hvsge-log
217
+ - /path/to/folder/ckpt-123 -> folder-epoch123
218
+ - /path/to/model.safetensors -> model
219
+ """
220
+ if not lora_path or not os.path.exists(lora_path):
221
+ return "DefaultModel"
222
+
223
+ # 规范化路径
224
+ lora_path = os.path.normpath(lora_path)
225
+
226
+ # 方法1: 匹配 /folder/ckpt-数字 格式
227
+ match = re.search(r'/([^/]+)/ckpt-(\d+)', lora_path)
228
+ if match:
229
+ folder_name = match.group(1)
230
+ epoch_num = int(match.group(2))
231
+ return f"{folder_name}-epoch{epoch_num}"
232
+
233
+ # 方法2: 匹配 /folder/ckpt.safetensors 格式
234
+ match = re.search(r'/([^/]+)/ckpt\.safetensors$', lora_path)
235
+ if match:
236
+ return match.group(1)
237
+
238
+ # 方法3: 匹配 ./folder/ckpt.safetensors 格式
239
+ match = re.search(r'[./]*([^/]+)/ckpt\.safetensors$', lora_path)
240
+ if match:
241
+ return match.group(1)
242
+
243
+ # 方法4: 从文件名中提取(兜底方案)
244
+ filename = os.path.basename(lora_path)
245
+ return filename.split('.')[0] if '.' in filename else filename
246
+
247
+
248
+ def setup(rank, world_size):
249
+ """初始化分布式环境"""
250
+ os.environ.setdefault('MASTER_ADDR', 'localhost')
251
+ os.environ.setdefault('MASTER_PORT', '21256')
252
+ dist.init_process_group("nccl", rank=rank, world_size=world_size)
253
+
254
+
255
+ def cleanup():
256
+ """清理分布式环境"""
257
+ dist.destroy_process_group()
258
+
259
+
260
+ def main_worker(rank, world_size, args, dataset_configs):
261
+ """每个进程的主函数"""
262
+ from infer.inference import ImageGenerator
263
+ from infer.inner_evaluation import evaluation_depth_custom_parallel
264
+
265
+ setup(rank, world_size)
266
+ torch.cuda.set_device(rank)
267
+ device = torch.device(f"cuda:{rank}")
268
+
269
+ if rank == 0:
270
+ print(f"[main_worker] 开始加载 pipeline, device={device}, datasets={list(dataset_configs.keys())}", flush=True)
271
+
272
+ pipeline = ImageGenerator(
273
+ ae_path=os.path.join(args.model_path, 'vae.safetensors'),
274
+ dit_path=os.path.join(args.model_path, "step1x-edit-i1258-FP8.safetensors" if args.quantized else "step1x-edit-i1258.safetensors"),
275
+ qwen2vl_model_path=args.qwen2vl_model_path,
276
+ max_length=640,
277
+ quantized=args.quantized,
278
+ offload=args.offload,
279
+ lora=args.lora,
280
+ device=str(device),
281
+ args=args,
282
+ )
283
+
284
+ if rank == 0:
285
+ print(f"Successfully loading pipeline from {args.model_path}.", flush=True)
286
+
287
+ test_data_dir = resolve_eval_data_root(args, "configs")
288
+
289
+ # 使用新的模型标识符提取函数
290
+ model_identifier = extract_model_identifier(args.lora)
291
+
292
+ if rank == 0:
293
+ print(f"模型标识符: {model_identifier}", flush=True)
294
+
295
+ all_dataset_results = {}
296
+ aligment_map = {"depth": "least_square", "disp": "least_square_disparity", "ln": "log_space"}
297
+ for dataset_name, config_path in dataset_configs.items():
298
+ # 修改输出目录结构:在数据集名称外添加模型名称层级
299
+ eval_dir = os.path.join(args.output_dir, model_identifier, dataset_name)
300
+ test_dataset_config = os.path.join(test_data_dir, config_path)
301
+ alignment_type = aligment_map[args.norm_type]
302
+
303
+ if rank == 0:
304
+ print(f"\n开始评估数据集: {dataset_name}", flush=True)
305
+ print(f"输出目录: {eval_dir}", flush=True)
306
+ print("=" * 80, flush=True)
307
+
308
+ metric_tracker_Lpred, metric_tracker_Rpred, processing_times = evaluation_depth_custom_parallel(
309
+ rank,
310
+ world_size,
311
+ eval_dir,
312
+ test_dataset_config,
313
+ args,
314
+ pipeline,
315
+ test_data_dir,
316
+ alignment=alignment_type,
317
+ save_pred_vis=args.save_viz,
318
+ )
319
+
320
+ # 同步所有进程
321
+ dist.barrier()
322
+
323
+ # 收集两个CFG配置的结果
324
+ gathered_metrics_Lpred = [None] * world_size
325
+ gathered_metrics_Rpred = [None] * world_size
326
+ gathered_times = [None] * world_size
327
+
328
+ # 从metric_tracker获取结果字典
329
+ metrics_dict_Lpred = metric_tracker_Lpred.result() if hasattr(metric_tracker_Lpred, 'result') else {}
330
+ # metrics_dict_Rpred = metric_tracker_Rpred.result() if hasattr(metric_tracker_Rpred, 'result') else {}
331
+
332
+ dist.all_gather_object(gathered_metrics_Lpred, metrics_dict_Lpred)
333
+ # dist.all_gather_object(gathered_metrics_Rpred, metrics_dict_Rpred)
334
+ dist.all_gather_object(gathered_times, processing_times)
335
+
336
+ if rank == 0:
337
+ metrics_dict_Lpred, dataset_times = collect_and_merge_dual_cfg_results(rank, world_size, gathered_metrics_Lpred, gathered_times)
338
+
339
+ if metrics_dict_Lpred:
340
+ # 格式化并输出结果表格
341
+ formatted_output = format_dual_cfg_results_table(dataset_name, model_identifier, metrics_dict_Lpred, dataset_times)
342
+
343
+ print(formatted_output)
344
+ print(f"结果保存至: {eval_dir}")
345
+
346
+ # 存储结果用于后续汇总
347
+ all_dataset_results[dataset_name] = {'metrics_Lpred': metrics_dict_Lpred, 'formatted_output': formatted_output, 'eval_dir': eval_dir, 'processing_times': dataset_times}
348
+
349
+ if rank == 0:
350
+ print(f"\n所有数据集评估完成! 结果保存在: {os.path.join(args.output_dir, model_identifier)}")
351
+
352
+ # 保存所有数据集的双CFG评估结果摘要
353
+ if all_dataset_results:
354
+ save_dual_cfg_results_summary(os.path.join(args.output_dir, model_identifier), all_dataset_results, model_identifier)
355
+
356
+ cleanup()
357
+
358
+ def main_worker_normal(rank, world_size, args, eval_datasets):
359
+ """normal预测的多进程主函数"""
360
+ from infer.inference import ImageGenerator
361
+ from infer.inner_evaluation import evaluation_normal_custom_parallel
362
+
363
+ setup(rank, world_size)
364
+ torch.cuda.set_device(rank)
365
+ device = torch.device(f"cuda:{rank}")
366
+
367
+ pipeline = ImageGenerator(
368
+ ae_path=os.path.join(args.model_path, 'vae.safetensors'),
369
+ dit_path=os.path.join(args.model_path, "step1x-edit-i1258-FP8.safetensors" if args.quantized else "step1x-edit-i1258.safetensors"),
370
+ qwen2vl_model_path=args.qwen2vl_model_path,
371
+ max_length=640,
372
+ quantized=args.quantized,
373
+ offload=args.offload,
374
+ lora=args.lora,
375
+ device=str(device),
376
+ args=args,
377
+ )
378
+
379
+ if rank == 0:
380
+ print(f"Successfully loading pipeline from {args.model_path}.")
381
+
382
+ test_data_dir = resolve_eval_data_root(args, "dsine_eval")
383
+ dataset_split_path = os.path.join(REPO_ROOT, "infer", "dataset_normal")
384
+
385
+ # 使用新的模型标识符提取函数
386
+ model_identifier = extract_model_identifier(args.lora)
387
+
388
+ # 修改输出目录结构:在任务名称外添加模型名称层级
389
+ eval_dir = os.path.join(args.output_dir, model_identifier, args.task_name)
390
+
391
+ if rank == 0:
392
+ print(f"模型标识符: {model_identifier}")
393
+ print(f"输出目录: {eval_dir}")
394
+
395
+ if rank == 0:
396
+ print(f"\n开始并行Normal评估,使用{world_size}个GPU")
397
+ print("=" * 80)
398
+
399
+ # 调用并行评估函数
400
+ all_normal_errors, all_processing_times, all_dataset_metrics = evaluation_normal_custom_parallel(
401
+ rank, world_size, eval_dir, test_data_dir, dataset_split_path, pipeline, args, eval_datasets, save_pred_vis=args.save_viz
402
+ )
403
+
404
+ # 同步所有进程
405
+ dist.barrier()
406
+
407
+ # 收集所有GPU的结果
408
+ gathered_normal_errors = [None] * world_size
409
+ gathered_processing_times = [None] * world_size
410
+ gathered_dataset_metrics = [None] * world_size
411
+
412
+ dist.all_gather_object(gathered_normal_errors, all_normal_errors)
413
+ dist.all_gather_object(gathered_processing_times, all_processing_times)
414
+ dist.all_gather_object(gathered_dataset_metrics, all_dataset_metrics)
415
+
416
+ if rank == 0:
417
+ # 合并所有GPU的结果
418
+ final_results = {}
419
+
420
+ for dataset_name, _ in eval_datasets:
421
+ print(f"\n合并数据集 {dataset_name} 的结果...")
422
+
423
+ # 合并normal errors
424
+ all_errors_for_dataset = []
425
+ all_times_for_dataset = []
426
+
427
+ for gpu_errors, gpu_times in zip(gathered_normal_errors, gathered_processing_times):
428
+ if gpu_errors[dataset_name] is not None:
429
+ all_errors_for_dataset.append(gpu_errors[dataset_name])
430
+ if gpu_times[dataset_name]:
431
+ all_times_for_dataset.extend(gpu_times[dataset_name])
432
+
433
+ # 计算最终指标
434
+ if all_errors_for_dataset:
435
+ combined_errors = torch.cat(all_errors_for_dataset, dim=0)
436
+ from infer.util import normal_utils
437
+ final_metrics = normal_utils.compute_normal_metrics(combined_errors)
438
+
439
+ print(f"数据集 {dataset_name} 最终结果:")
440
+ print("mean median rmse 5 7.5 11.25 22.5 30")
441
+ print("%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f" % (
442
+ final_metrics['mean'], final_metrics['median'], final_metrics['rmse'],
443
+ final_metrics['a1'], final_metrics['a2'], final_metrics['a3'],
444
+ final_metrics['a4'], final_metrics['a5']
445
+ ))
446
+
447
+ final_results[dataset_name] = {
448
+ 'metrics': final_metrics,
449
+ 'processing_times': all_times_for_dataset,
450
+ 'sample_count': len(combined_errors)
451
+ }
452
+
453
+ # 保存结果到文件
454
+ dataset_output_dir = os.path.join(eval_dir, dataset_name)
455
+ os.makedirs(dataset_output_dir, exist_ok=True)
456
+
457
+ from tabulate import tabulate
458
+ eval_text = f"Evaluation metrics for {dataset_name}:\n"
459
+ eval_text += f"Total samples: {len(combined_errors)}\n"
460
+ eval_text += f"Average processing time: {np.mean(all_times_for_dataset):.2f}s\n"
461
+ eval_text += tabulate([list(final_metrics.keys()), list(final_metrics.values())])
462
+
463
+ save_path = os.path.join(dataset_output_dir, "eval_metrics.txt")
464
+ with open(save_path, "w+") as f:
465
+ f.write(eval_text)
466
+
467
+ print(f"结果已保存至: {save_path}")
468
+ else:
469
+ print(f"数据集 {dataset_name}: 未找到有效数据")
470
+ final_results[dataset_name] = None
471
+
472
+ # 保存总体结果摘要
473
+ summary_file = os.path.join(eval_dir, f"{model_identifier}_normal_summary.txt")
474
+ with open(summary_file, 'w', encoding='utf-8') as f:
475
+ f.write("=" * 120 + "\n")
476
+ f.write("Normal预测多GPU并行评估结果汇总\n")
477
+ f.write("=" * 120 + "\n\n")
478
+
479
+ for dataset_name, result in final_results.items():
480
+ if result is not None:
481
+ f.write(f"数据集: {dataset_name}\n")
482
+ f.write(f"样本数: {result['sample_count']}\n")
483
+ f.write(f"平均处理时间: {np.mean(result['processing_times']):.2f}s\n")
484
+ metrics = result['metrics']
485
+ f.write("mean median rmse 5 7.5 11.25 22.5 30\n")
486
+ f.write("%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n" % (
487
+ metrics['mean'], metrics['median'], metrics['rmse'],
488
+ metrics['a1'], metrics['a2'], metrics['a3'],
489
+ metrics['a4'], metrics['a5']
490
+ ))
491
+ f.write("-" * 60 + "\n\n")
492
+ else:
493
+ f.write(f"数据集: {dataset_name} - 无有效数据\n\n")
494
+
495
+ print(f"\nNormal评估总结已保存至: {summary_file}")
496
+
497
+ cleanup()
498
+
499
+ def main():
500
+ args = parse_args()
501
+ if args.seed is not None:
502
+ seed_all(args.seed)
503
+
504
+ os.makedirs(args.output_dir, exist_ok=True)
505
+
506
+ # 检查GPU数量
507
+ world_size = min(args.num_gpus, torch.cuda.device_count())
508
+ if world_size <= 0:
509
+ print("错误:未检测到可用的GPU。")
510
+ return
511
+
512
+ # 设置多进程相关环境变量
513
+ os.environ['OMP_NUM_THREADS'] = '1'
514
+ os.environ['MKL_NUM_THREADS'] = '1'
515
+
516
+ print(f"即将使用 {world_size} 个GPU进行并行推理...")
517
+
518
+ if args.task_name == 'depth':
519
+ test_depth_dataset_configs = parse_depth_eval_datasets(args.depth_eval_datasets)
520
+ print(f"Depth评估数据集: {list(test_depth_dataset_configs.keys())}")
521
+
522
+ if world_size == 1:
523
+ # 单GPU情况,直接运行
524
+ main_worker(0, 1, args, test_depth_dataset_configs)
525
+ else:
526
+ # 多GPU情况,使用multiprocessing
527
+ try:
528
+ mp.spawn(main_worker, args=(world_size, args, test_depth_dataset_configs), nprocs=world_size, join=True)
529
+ except Exception as e:
530
+ print(f"多进程执行出错: {e}")
531
+ print("尝试降级到单GPU模式...")
532
+ # 降级到单GPU模式
533
+ main_worker(0, 1, args, test_depth_dataset_configs)
534
+
535
+ elif args.task_name == 'normal':
536
+ eval_datasets = parse_normal_eval_datasets(args.normal_eval_datasets)
537
+ print(f"Normal评估数据集: {eval_datasets}")
538
+
539
+ if world_size == 1:
540
+ main_worker_normal(0, 1, args, eval_datasets)
541
+ else:
542
+ try:
543
+ mp.spawn(main_worker_normal, args=(world_size, args, eval_datasets), nprocs=world_size, join=True)
544
+ except Exception as e:
545
+ print(f"多进程执行出错: {e}")
546
+ print("尝试降级到单GPU模式...")
547
+ # 降级到单GPU模式
548
+ main_worker_normal(0, 1, args, eval_datasets)
549
+ else:
550
+ raise ValueError(f"不支持的 task_name: {args.task_name},仅支持 depth 或 normal")
551
+
552
+ if __name__ == '__main__':
553
+ main()
FE2E/infer/__init__.py ADDED
File without changes
FE2E/infer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (138 Bytes). View file
 
FE2E/infer/__pycache__/inference.cpython-310.pyc ADDED
Binary file (14.8 kB). View file
 
FE2E/infer/__pycache__/sampling.cpython-310.pyc ADDED
Binary file (1.66 kB). View file
 
FE2E/infer/__pycache__/seed_all.cpython-310.pyc ADDED
Binary file (475 Bytes). View file
 
FE2E/infer/alignment.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+
4
+
5
+ def align_depth_least_square(
6
+ gt_arr: np.ndarray,
7
+ pred_arr: np.ndarray,
8
+ valid_mask_arr: np.ndarray,
9
+ return_scale_shift=True,
10
+ max_resolution=None,
11
+ ):
12
+ ori_shape = pred_arr.shape # input shape
13
+
14
+ gt = gt_arr.squeeze() # [H, W]
15
+ pred = pred_arr.squeeze()
16
+ valid_mask = valid_mask_arr.squeeze()
17
+
18
+ # Downsample
19
+ if max_resolution is not None:
20
+ scale_factor = np.min(max_resolution / np.array(ori_shape[-2:]))
21
+ if scale_factor < 1:
22
+ downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
23
+ gt = downscaler(torch.as_tensor(gt).unsqueeze(0)).numpy()
24
+ pred = downscaler(torch.as_tensor(pred).unsqueeze(0)).numpy()
25
+ valid_mask = (
26
+ downscaler(torch.as_tensor(valid_mask).unsqueeze(0).float())
27
+ .bool()
28
+ .numpy()
29
+ )
30
+
31
+ assert (
32
+ gt.shape == pred.shape == valid_mask.shape
33
+ ), f"{gt.shape}, {pred.shape}, {valid_mask.shape}"
34
+
35
+ gt_masked = gt[valid_mask].reshape((-1, 1))
36
+ pred_masked = pred[valid_mask].reshape((-1, 1))
37
+
38
+ # numpy solver
39
+ _ones = np.ones_like(pred_masked)
40
+ A = np.concatenate([pred_masked, _ones], axis=-1)
41
+ X = np.linalg.lstsq(A, gt_masked, rcond=None)[0]
42
+ scale, shift = X
43
+
44
+ aligned_pred = pred_arr * scale + shift
45
+
46
+ # restore dimensions
47
+ aligned_pred = aligned_pred.reshape(ori_shape)
48
+
49
+ if return_scale_shift:
50
+ return aligned_pred, scale, shift
51
+ else:
52
+ return aligned_pred
53
+
54
+
55
+ # ******************** disparity space ********************
56
+ def depth2disparity(depth, return_mask=False):
57
+ if isinstance(depth, torch.Tensor):
58
+ disparity = torch.zeros_like(depth)
59
+ elif isinstance(depth, np.ndarray):
60
+ disparity = np.zeros_like(depth)
61
+ non_negtive_mask = depth > 0
62
+ disparity[non_negtive_mask] = 1.0 / depth[non_negtive_mask]
63
+ if return_mask:
64
+ return disparity, non_negtive_mask
65
+ else:
66
+ return disparity
67
+
68
+
69
+ def disparity2depth(disparity, **kwargs):
70
+ return depth2disparity(disparity, **kwargs)
FE2E/infer/configs/data_diode_all.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ name: diode
2
+ disp_name: diode_val_all
3
+ dir: diode/diode_val.tar
4
+ filenames: infer/data_split/diode/diode_val_all_filename_list.txt
5
+ processing_res: 768
FE2E/infer/configs/data_eth3d.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ name: eth3d
2
+ disp_name: eth3d_full
3
+ # dir: eth3d
4
+ dir: eth3d/eth3d.tar
5
+ filenames: infer/data_split/eth3d/eth3d_filename_list.txt
6
+ processing_res: 768
7
+ alignment_max_res: 1024
FE2E/infer/configs/data_kitti_eigen_test.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ name: kitti
2
+ disp_name: kitti_eigen_test_full
3
+ dir: kitti/kitti_eigen_split_test.tar
4
+ filenames: infer/data_split/kitti/eigen_test_files_with_gt.txt
5
+ kitti_bm_crop: true
6
+ valid_mask_crop: eigen
FE2E/infer/configs/data_nyu_test.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ name: nyu_v2
2
+ disp_name: nyu_test_full
3
+ dir: nyudepth/nyu_labeled_extracted.tar
4
+ filenames: infer/data_split/nyu/filename_list_test.txt
5
+ eigen_valid_mask: true
FE2E/infer/configs/data_scannet_val.yaml ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ name: scannet
2
+ disp_name: scannet_val_800_1
3
+ # dir: scannet
4
+ dir: scannet/scannet_val_sampled_800_1.tar
5
+ filenames: infer/data_split/scannet/scannet_val_sampled_list_800_1.txt
FE2E/infer/dataset/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+ from .base_depth_dataset import BaseDepthDataset, get_pred_name, DatasetMode # noqa: F401
4
+ from .diode_dataset import DIODEDataset
5
+ from .eth3d_dataset import ETH3DDataset
6
+ from .kitti_dataset import KITTIDataset
7
+ from .nyu_dataset import NYUDataset
8
+ from .scannet_dataset import ScanNetDataset
9
+
10
+
11
+ dataset_name_class_dict = {
12
+ "nyu_v2": NYUDataset,
13
+ "kitti": KITTIDataset,
14
+ "eth3d": ETH3DDataset,
15
+ "diode": DIODEDataset,
16
+ "scannet": ScanNetDataset,
17
+ }
18
+
19
+ REPO_ROOT = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
20
+
21
+
22
+ def _resolve_split_file(path: str) -> str:
23
+ if os.path.isabs(path) or os.path.exists(path):
24
+ return path
25
+
26
+ candidate = os.path.join(REPO_ROOT, path)
27
+ if os.path.exists(candidate):
28
+ return candidate
29
+
30
+ return path
31
+
32
+
33
+ def get_dataset(cfg_data_split, base_data_dir: str, mode: DatasetMode, prompt_type="query", **kwargs) -> BaseDepthDataset:
34
+ if cfg_data_split.name in dataset_name_class_dict.keys():
35
+ dataset_class = dataset_name_class_dict[cfg_data_split.name]
36
+ filename_ls_path = cfg_data_split.filenames if not prompt_type == "full" else (cfg_data_split.filenames).replace(".txt", "_wc.txt")
37
+ filename_ls_path = _resolve_split_file(filename_ls_path)
38
+ dataset = dataset_class(
39
+ mode=mode,
40
+ filename_ls_path=filename_ls_path,
41
+ dataset_dir=os.path.join(base_data_dir, cfg_data_split.dir),
42
+ **cfg_data_split,
43
+ prompt_type=prompt_type,
44
+ **kwargs,
45
+ )
46
+ else:
47
+ raise NotImplementedError
48
+
49
+ return dataset
FE2E/infer/dataset/base_depth_dataset.py ADDED
@@ -0,0 +1,257 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import os
3
+ import random
4
+ import tarfile
5
+ from enum import Enum
6
+
7
+ import numpy as np
8
+ import torch
9
+ from PIL import Image
10
+ from torch.utils.data import Dataset
11
+ from torchvision.transforms import InterpolationMode, Resize
12
+ import torchvision.transforms.functional as F
13
+
14
+ class DatasetMode(Enum):
15
+ RGB_ONLY = "rgb_only"
16
+ EVAL = "evaluate"
17
+ TRAIN = "train"
18
+
19
+
20
+ def read_image_from_tar(tar_obj, img_rel_path):
21
+ image = tar_obj.extractfile("./" + img_rel_path)
22
+ image = image.read()
23
+ image = Image.open(io.BytesIO(image))
24
+
25
+
26
+ class BaseDepthDataset(Dataset):
27
+
28
+ def __init__(
29
+ self, mode: DatasetMode, filename_ls_path: str, dataset_dir: str, disp_name: str, min_depth, max_depth, has_filled_depth, name_mode, depth_transform=None, augmentation_args: dict = None, resize_to_hw=None,
30
+ move_invalid_to_far_plane: bool = True, rgb_transform=None, prompt_type="query", **kwargs,
31
+ ) -> None:
32
+ super().__init__()
33
+ self.mode = mode
34
+
35
+ self.filename_ls_path = filename_ls_path
36
+ self.dataset_dir = dataset_dir
37
+ self.disp_name = disp_name
38
+ self.has_filled_depth = has_filled_depth
39
+ self.name_mode: DepthFileNameMode = name_mode
40
+ self.min_depth = min_depth
41
+ self.max_depth = max_depth
42
+
43
+ self.depth_transform = depth_transform
44
+ self.augm_args = augmentation_args
45
+ self.resize_to_hw = resize_to_hw
46
+ self.prompt_type = prompt_type
47
+ # 设置默认的rgb_transform函数
48
+ if rgb_transform is None:
49
+ self.rgb_transform = self._default_rgb_transform
50
+ else:
51
+ self.rgb_transform = rgb_transform
52
+ self.move_invalid_to_far_plane = move_invalid_to_far_plane
53
+
54
+ # Load filenames
55
+ with open(self.filename_ls_path, "r") as f:
56
+ self.filenames = [s.split() for s in f.readlines()] # [['rgb.png', 'depth.tif'], [], ...]
57
+
58
+ # Tar dataset
59
+ self.tar_obj = None
60
+ self.tar_obj_pid = None
61
+ self.is_tar = (True if os.path.isfile(dataset_dir) and tarfile.is_tarfile(dataset_dir) else False)
62
+
63
+ def __len__(self):
64
+ return len(self.filenames)
65
+
66
+ def __getitem__(self, index):
67
+ rasters, other = self._get_data_item(index)
68
+ if DatasetMode.TRAIN == self.mode:
69
+ rasters = self._training_preprocess(rasters)
70
+ # merge
71
+ outputs = rasters
72
+ outputs.update(other)
73
+ return outputs
74
+
75
+ def _get_data_item(self, index):
76
+ rgb_rel_path, depth_rel_path, filled_rel_path, prompt = self._get_data_path(index=index)
77
+
78
+ rasters = {}
79
+
80
+ # RGB data
81
+ rasters.update(self._load_rgb_data(rgb_rel_path=rgb_rel_path))
82
+
83
+ # Depth data
84
+ if DatasetMode.RGB_ONLY != self.mode:
85
+ # load data
86
+ depth_data = self._load_depth_data(depth_rel_path=depth_rel_path, filled_rel_path=filled_rel_path)
87
+ rasters.update(depth_data)
88
+ # valid mask
89
+ rasters["valid_mask_raw"] = self._get_valid_mask(rasters["depth_raw_linear"]).clone()
90
+ rasters["valid_mask_filled"] = self._get_valid_mask(rasters["depth_filled_linear"]).clone()
91
+
92
+
93
+ other = {"index": index, "rgb_relative_path": rgb_rel_path, "prompt": prompt}
94
+
95
+ return rasters, other
96
+
97
+ def _load_rgb_data(self, rgb_rel_path):
98
+ # Read RGB data
99
+ _, rgb = self._read_image(rgb_rel_path)
100
+ rgb = self.input_process_image(rgb)
101
+ outputs = {"rgb": rgb}
102
+ return outputs
103
+
104
+ def input_process_image(self, image):
105
+ if isinstance(image, np.ndarray):
106
+ image = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0
107
+
108
+ return image
109
+ elif isinstance(image, Image.Image):
110
+ image = F.to_tensor(image.convert("RGB"))
111
+
112
+ return image
113
+ elif isinstance(image, torch.Tensor):
114
+ return image
115
+ elif isinstance(image, str):
116
+ image = F.to_tensor(Image.open(image).convert("RGB"))
117
+
118
+ return image
119
+ return image
120
+
121
+ def _load_depth_data(self, depth_rel_path, filled_rel_path):
122
+ # Read depth data
123
+ outputs = {}
124
+ depth_raw = self._read_depth_file(depth_rel_path).squeeze()
125
+ depth_raw_linear = torch.from_numpy(depth_raw).float().unsqueeze(0) # [1, H, W]
126
+ outputs["depth_raw_linear"] = depth_raw_linear.clone()
127
+
128
+ if self.has_filled_depth:
129
+ depth_filled = self._read_depth_file(filled_rel_path).squeeze()
130
+ depth_filled_linear = torch.from_numpy(depth_filled).float().unsqueeze(0)
131
+ outputs["depth_filled_linear"] = depth_filled_linear
132
+ else:
133
+ outputs["depth_filled_linear"] = depth_raw_linear.clone()
134
+
135
+ return outputs
136
+
137
+ def _get_data_path(self, index):
138
+ filename_line = self.filenames[index]
139
+ rgb_rel_path = filename_line[0]
140
+ depth_rel_path, filled_rel_path = None, None
141
+ if DatasetMode.RGB_ONLY != self.mode:
142
+ depth_rel_path = filename_line[1]
143
+ if self.has_filled_depth:
144
+ filled_rel_path = filename_line[2]
145
+
146
+ if self.prompt_type == "full":
147
+ if filename_line[2][0] in ["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "N", "O", "P", "Q", "R", "S", "T", "U", "V", "W", "X", "Y", "Z"]:
148
+ prompt = ' '.join(filename_line[2:])
149
+ else:
150
+ prompt = ' '.join(filename_line[3:])
151
+ else:
152
+ prompt = 1
153
+ return rgb_rel_path, depth_rel_path, filled_rel_path, prompt
154
+
155
+ def _read_image(self, img_rel_path):
156
+ if self.is_tar:
157
+ tar_obj = self._ensure_tar_obj()
158
+ image = tar_obj.extractfile("./" + img_rel_path)
159
+ image = image.read()
160
+ image = Image.open(io.BytesIO(image))
161
+ else:
162
+ img_path = os.path.join(self.dataset_dir, img_rel_path)
163
+ image = Image.open(img_path)
164
+ image_arr = np.asarray(image)
165
+ return image_arr, image
166
+
167
+ def _read_depth_file(self, rel_path):
168
+ depth_in, _ = self._read_image(rel_path)
169
+ # Replace code below to decode depth according to dataset definition
170
+ depth_decoded = depth_in
171
+
172
+ return depth_decoded
173
+
174
+ def _get_valid_mask(self, depth: torch.Tensor):
175
+ valid_mask = torch.logical_and((depth > self.min_depth), (depth < self.max_depth)).bool()
176
+ return valid_mask
177
+
178
+ def _training_preprocess(self, rasters):
179
+ # Augmentation
180
+ if self.augm_args is not None:
181
+ rasters = self._augment_data(rasters)
182
+
183
+ # Normalization
184
+ rasters["depth_raw_norm"] = self.depth_transform(rasters["depth_raw_linear"], rasters["valid_mask_raw"]).clone()
185
+ rasters["depth_filled_norm"] = self.depth_transform(rasters["depth_filled_linear"], rasters["valid_mask_filled"]).clone()
186
+
187
+ # Set invalid pixel to far plane
188
+ if self.move_invalid_to_far_plane:
189
+ if self.depth_transform.far_plane_at_max:
190
+ rasters["depth_filled_norm"][~rasters["valid_mask_filled"]] = self.depth_transform.norm_max
191
+ else:
192
+ rasters["depth_filled_norm"][~rasters["valid_mask_filled"]] = self.depth_transform.norm_min
193
+
194
+ # Resize
195
+ if self.resize_to_hw is not None:
196
+ resize_transform = Resize(size=self.resize_to_hw, interpolation=InterpolationMode.NEAREST_EXACT)
197
+ rasters = {k: resize_transform(v) for k, v in rasters.items()}
198
+
199
+ return rasters
200
+
201
+ def _augment_data(self, rasters_dict):
202
+ # lr flipping
203
+ lr_flip_p = self.augm_args.lr_flip_p
204
+ if random.random() < lr_flip_p:
205
+ rasters_dict = {k: v.flip(-1) for k, v in rasters_dict.items()}
206
+
207
+ return rasters_dict
208
+
209
+ def __del__(self):
210
+ if self.tar_obj is not None:
211
+ self.tar_obj.close()
212
+ self.tar_obj = None
213
+ self.tar_obj_pid = None
214
+
215
+ def _default_rgb_transform(self, x):
216
+ """默认的RGB变换函数: [0, 255] -> [-1, 1]"""
217
+ return x / 255.0 * 2 - 1
218
+
219
+ def _ensure_tar_obj(self):
220
+ """Ensure each process owns its own tar handle to avoid cross-process FD issues."""
221
+ if not self.is_tar:
222
+ return None
223
+ current_pid = os.getpid()
224
+ if self.tar_obj is None or self.tar_obj_pid != current_pid:
225
+ if self.tar_obj is not None:
226
+ try:
227
+ self.tar_obj.close()
228
+ except Exception:
229
+ pass
230
+ self.tar_obj = tarfile.open(self.dataset_dir)
231
+ self.tar_obj_pid = current_pid
232
+ return self.tar_obj
233
+
234
+
235
+ # Prediction file naming modes
236
+ class DepthFileNameMode(Enum):
237
+ id = 1 # id.png
238
+ rgb_id = 2 # rgb_id.png
239
+ i_d_rgb = 3 # i_d_1_rgb.png
240
+ rgb_i_d = 4
241
+
242
+
243
+ def get_pred_name(rgb_basename, name_mode, suffix=".png"):
244
+ if DepthFileNameMode.rgb_id == name_mode:
245
+ pred_basename = "pred_" + rgb_basename.split("_")[1]
246
+ elif DepthFileNameMode.i_d_rgb == name_mode:
247
+ pred_basename = rgb_basename.replace("_rgb.", "_pred.")
248
+ elif DepthFileNameMode.id == name_mode:
249
+ pred_basename = "pred_" + rgb_basename
250
+ elif DepthFileNameMode.rgb_i_d == name_mode:
251
+ pred_basename = "pred_" + "_".join(rgb_basename.split("_")[1:])
252
+ else:
253
+ raise NotImplementedError
254
+ # change suffix
255
+ pred_basename = os.path.splitext(pred_basename)[0] + suffix
256
+
257
+ return pred_basename
FE2E/infer/dataset/diode_dataset.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Bingxin Ke
2
+ # Last modified: 2024-02-26
3
+
4
+ import os
5
+ import tarfile
6
+ from io import BytesIO
7
+
8
+ import numpy as np
9
+ import torch
10
+
11
+ from .base_depth_dataset import BaseDepthDataset, DepthFileNameMode, DatasetMode
12
+
13
+
14
+ class DIODEDataset(BaseDepthDataset):
15
+ def __init__(
16
+ self,
17
+ **kwargs,
18
+ ) -> None:
19
+ super().__init__(
20
+ # DIODE data parameter
21
+ min_depth=0.6,
22
+ max_depth=350,
23
+ has_filled_depth=False,
24
+ name_mode=DepthFileNameMode.id,
25
+ **kwargs,
26
+ )
27
+
28
+ def _read_npy_file(self, rel_path):
29
+ if self.is_tar:
30
+ if self.tar_obj is None:
31
+ self.tar_obj = tarfile.open(self.dataset_dir)
32
+ fileobj = self.tar_obj.extractfile("./" + rel_path)
33
+ npy_path_or_content = BytesIO(fileobj.read())
34
+ else:
35
+ npy_path_or_content = os.path.join(self.dataset_dir, rel_path)
36
+ data = np.load(npy_path_or_content).squeeze()[np.newaxis, :, :]
37
+ return data
38
+
39
+ def _read_depth_file(self, rel_path):
40
+ depth = self._read_npy_file(rel_path)
41
+ return depth
42
+
43
+ def _get_data_path(self, index):
44
+ return self.filenames[index][0], self.filenames[index][1], self.filenames[index][2], self.filenames[index][3:] if len(self.filenames[index]) > 3 else 1
45
+
46
+ def _get_data_item(self, index):
47
+
48
+ rgb_rel_path, depth_rel_path, mask_rel_path, prompt = self._get_data_path(index=index)
49
+
50
+ rasters = {}
51
+
52
+ # RGB data
53
+ rasters.update(self._load_rgb_data(rgb_rel_path=rgb_rel_path))
54
+
55
+ # Depth data
56
+ if DatasetMode.RGB_ONLY != self.mode:
57
+ # load data
58
+ depth_data = self._load_depth_data(
59
+ depth_rel_path=depth_rel_path, filled_rel_path=None
60
+ )
61
+ rasters.update(depth_data)
62
+
63
+ # valid mask
64
+ mask = self._read_npy_file(mask_rel_path).astype(bool)
65
+ mask = torch.from_numpy(mask).bool()
66
+ rasters["valid_mask_raw"] = mask.clone()
67
+ rasters["valid_mask_filled"] = mask.clone()
68
+
69
+ other = {"index": index, "rgb_relative_path": rgb_rel_path, "prompt": prompt}
70
+
71
+ return rasters, other
FE2E/infer/dataset/drivingstereo_dataset.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import numpy as np
4
+ import PIL.Image as pil
5
+
6
+ from .mono_dataset import MonoDataset
7
+
8
+
9
+ class DrivingStereoDataset(MonoDataset):
10
+ RAW_HEIGHT = 800
11
+ RAW_WIDTH = 1762
12
+
13
+ def __init__(self, *args, **kwargs):
14
+ super(DrivingStereoDataset, self).__init__(*args, **kwargs)
15
+ self.forename = {"rainy": "2018-08-17-09-45-58_2018-08-17-10-", "foggy": "2018-10-25-07-37-26_2018-10-25-", "sunny": "2018-10-19-09-30-39_2018-10-19-", "cloudy": "2018-10-31-06-55-01_2018-10-31-"}
16
+
17
+ def get_color(self, weather, name, do_flip):
18
+ path, name = self.get_image_path(weather, name)
19
+ color = self.loader(path)
20
+
21
+ return color, name
22
+
23
+ def get_image_path(self, weather, frame_name):
24
+ folder = "left-image-full-size"
25
+ image_path = os.path.join(self.opts.data_path, weather, folder, frame_name)
26
+ image_name = os.path.join(weather, folder, frame_name)
27
+ if self.opts.debug >= 3:
28
+ print(image_name)
29
+ return image_path, image_name
30
+
31
+ def index_to_name(self, weather, index):
32
+ return self.forename[weather] + self.filenames[index] + ".png"
FE2E/infer/dataset/eth3d_dataset.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Bingxin Ke
2
+ # Last modified: 2024-02-08
3
+
4
+ import torch
5
+ import tarfile
6
+ import os
7
+ import numpy as np
8
+
9
+ from .base_depth_dataset import BaseDepthDataset, DepthFileNameMode
10
+
11
+
12
+ class ETH3DDataset(BaseDepthDataset):
13
+ HEIGHT, WIDTH = 4032, 6048
14
+
15
+ def __init__(
16
+ self,
17
+ **kwargs,
18
+ ) -> None:
19
+ super().__init__(
20
+ # ETH3D data parameter
21
+ min_depth=1e-5,
22
+ max_depth=torch.inf,
23
+ has_filled_depth=False,
24
+ name_mode=DepthFileNameMode.id,
25
+ **kwargs,
26
+ )
27
+
28
+ def _read_depth_file(self, rel_path):
29
+ # Read special binary data: https://www.eth3d.net/documentation#format-of-multi-view-data-image-formats
30
+ if self.is_tar:
31
+ tar_obj = self._ensure_tar_obj()
32
+ binary_data = tar_obj.extractfile("./" + rel_path)
33
+ binary_data = binary_data.read()
34
+
35
+ else:
36
+ depth_path = os.path.join(self.dataset_dir, rel_path)
37
+ with open(depth_path, "rb") as file:
38
+ binary_data = file.read()
39
+ # Convert the binary data to a numpy array of 32-bit floats
40
+ depth_decoded = np.frombuffer(binary_data, dtype=np.float32).copy()
41
+
42
+ depth_decoded[depth_decoded == torch.inf] = 0.0
43
+
44
+ depth_decoded = depth_decoded.reshape((self.HEIGHT, self.WIDTH))
45
+ return depth_decoded
FE2E/infer/dataset/kitti_dataset.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Bingxin Ke
2
+ # Last modified: 2024-02-08
3
+
4
+ import torch
5
+
6
+ from .base_depth_dataset import BaseDepthDataset, DepthFileNameMode
7
+
8
+
9
+ class KITTIDataset(BaseDepthDataset):
10
+ def __init__(
11
+ self,
12
+ kitti_bm_crop, # Crop to KITTI benchmark size
13
+ valid_mask_crop, # Evaluation mask. [None, garg or eigen]
14
+ **kwargs,
15
+ ) -> None:
16
+ super().__init__(
17
+ # KITTI data parameter
18
+ min_depth=1e-5,
19
+ max_depth=80,
20
+ has_filled_depth=False,
21
+ name_mode=DepthFileNameMode.id,
22
+ **kwargs,
23
+ )
24
+ self.kitti_bm_crop = kitti_bm_crop
25
+ self.valid_mask_crop = valid_mask_crop
26
+ assert self.valid_mask_crop in [
27
+ None,
28
+ "garg", # set evaluation mask according to Garg ECCV16
29
+ "eigen", # set evaluation mask according to Eigen NIPS14
30
+ ], f"Unknown crop type: {self.valid_mask_crop}"
31
+
32
+ # Filter out empty depth
33
+ self.filenames = [f for f in self.filenames if "None" != f[1]]
34
+
35
+ def _read_depth_file(self, rel_path):
36
+ depth_in,_ = self._read_image(rel_path)
37
+ # Decode KITTI depth
38
+ depth_decoded = depth_in / 256.0
39
+ return depth_decoded
40
+
41
+ def _load_rgb_data(self, rgb_rel_path):
42
+ rgb_data = super()._load_rgb_data(rgb_rel_path)
43
+ if self.kitti_bm_crop:
44
+ rgb_data = {k: self.kitti_benchmark_crop(v) for k, v in rgb_data.items()}
45
+ return rgb_data
46
+
47
+ def _load_depth_data(self, depth_rel_path, filled_rel_path):
48
+ depth_data = super()._load_depth_data(depth_rel_path, filled_rel_path)
49
+ if self.kitti_bm_crop:
50
+ depth_data = {
51
+ k: self.kitti_benchmark_crop(v) for k, v in depth_data.items()
52
+ }
53
+ return depth_data
54
+
55
+ @staticmethod
56
+ def kitti_benchmark_crop(input_img):
57
+ """
58
+ Crop images to KITTI benchmark size
59
+ Args:
60
+ `input_img` (torch.Tensor): Input image to be cropped.
61
+
62
+ Returns:
63
+ torch.Tensor:Cropped image.
64
+ """
65
+ KB_CROP_HEIGHT = 352
66
+ KB_CROP_WIDTH = 1216
67
+
68
+ height, width = input_img.shape[-2:]
69
+ top_margin = int(height - KB_CROP_HEIGHT)
70
+ left_margin = int((width - KB_CROP_WIDTH) / 2)
71
+ if 2 == len(input_img.shape):
72
+ out = input_img[
73
+ top_margin: top_margin + KB_CROP_HEIGHT,
74
+ left_margin: left_margin + KB_CROP_WIDTH,
75
+ ]
76
+ elif 3 == len(input_img.shape):
77
+ out = input_img[
78
+ :,
79
+ top_margin: top_margin + KB_CROP_HEIGHT,
80
+ left_margin: left_margin + KB_CROP_WIDTH,
81
+ ]
82
+ return out
83
+
84
+ def _get_valid_mask(self, depth: torch.Tensor):
85
+ # reference: https://github.com/cleinc/bts/blob/master/pytorch/bts_eval.py
86
+ valid_mask = super()._get_valid_mask(depth) # [1, H, W]
87
+
88
+ if self.valid_mask_crop is not None:
89
+ eval_mask = torch.zeros_like(valid_mask.squeeze()).bool()
90
+ gt_height, gt_width = eval_mask.shape
91
+
92
+ if "garg" == self.valid_mask_crop:
93
+ eval_mask[
94
+ int(0.40810811 * gt_height): int(0.99189189 * gt_height),
95
+ int(0.03594771 * gt_width): int(0.96405229 * gt_width),
96
+ ] = 1
97
+ elif "eigen" == self.valid_mask_crop:
98
+ eval_mask[
99
+ int(0.3324324 * gt_height): int(0.91351351 * gt_height),
100
+ int(0.0359477 * gt_width): int(0.96405229 * gt_width),
101
+ ] = 1
102
+
103
+ eval_mask.reshape(valid_mask.shape)
104
+ valid_mask = torch.logical_and(valid_mask, eval_mask)
105
+ return valid_mask
FE2E/infer/dataset/nyu_dataset.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Bingxin Ke
2
+ # Last modified: 2024-02-08
3
+
4
+
5
+ import torch
6
+
7
+ from .base_depth_dataset import BaseDepthDataset, DepthFileNameMode
8
+
9
+
10
+ class NYUDataset(BaseDepthDataset):
11
+ def __init__(
12
+ self,
13
+ eigen_valid_mask: bool,
14
+ **kwargs,
15
+ ) -> None:
16
+ super().__init__(
17
+ # NYUv2 dataset parameter
18
+ min_depth=1e-3,
19
+ max_depth=10.0,
20
+ has_filled_depth=True,
21
+ name_mode=DepthFileNameMode.rgb_id,
22
+ **kwargs,
23
+ )
24
+
25
+ self.eigen_valid_mask = eigen_valid_mask
26
+
27
+ def _read_depth_file(self, rel_path):
28
+ depth_in,_ = self._read_image(rel_path)
29
+ # Decode NYU depth
30
+ depth_decoded = depth_in / 1000.0
31
+ return depth_decoded
32
+
33
+ def _get_valid_mask(self, depth: torch.Tensor):
34
+ valid_mask = super()._get_valid_mask(depth)
35
+
36
+ # Eigen crop for evaluation
37
+ if self.eigen_valid_mask:
38
+ eval_mask = torch.zeros_like(valid_mask.squeeze()).bool()
39
+ eval_mask[45:471, 41:601] = 1
40
+ eval_mask.reshape(valid_mask.shape)
41
+ valid_mask = torch.logical_and(valid_mask, eval_mask)
42
+
43
+ return valid_mask
FE2E/infer/dataset/scannet_dataset.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Bingxin Ke
2
+ # Last modified: 2024-02-08
3
+
4
+ from .base_depth_dataset import BaseDepthDataset, DepthFileNameMode
5
+
6
+
7
+ class ScanNetDataset(BaseDepthDataset):
8
+ def __init__(
9
+ self,
10
+ **kwargs,
11
+ ) -> None:
12
+ super().__init__(
13
+ # ScanNet data parameter
14
+ min_depth=1e-3,
15
+ max_depth=10,
16
+ has_filled_depth=False,
17
+ name_mode=DepthFileNameMode.id,
18
+ **kwargs,
19
+ )
20
+
21
+ def _read_depth_file(self, rel_path):
22
+ depth_in,_ = self._read_image(rel_path)
23
+ # Decode ScanNet depth
24
+ depth_decoded = depth_in / 1000.0
25
+ return depth_decoded
FE2E/infer/dataset_normal/__init__.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ data sample
2
+ """
3
+ class Sample():
4
+ def __init__(self, img=None,
5
+ depth=None, depth_mask=None,
6
+ normal=None, normal_mask=None,
7
+ intrins=None, flipped=False,
8
+ dataset_name='dataset', scene_name='scene', img_name='img',
9
+ info={}):
10
+
11
+ self.img = img # input image
12
+
13
+ self.depth = depth # depth - GT
14
+ self.depth_mask = depth_mask # depth - valid_mask
15
+
16
+ self.normal = normal # surface normals - GT
17
+ self.normal_mask = normal_mask # surface normals - valid_mask
18
+
19
+ self.intrins = intrins # camera intrinsics
20
+ self.flipped = flipped # True when the image is flipped during augmentation
21
+
22
+ self.dataset_name = dataset_name
23
+ self.scene_name = scene_name
24
+ self.img_name = img_name
25
+
26
+ # other info (this is a dict containing any additional information)
27
+ self.info = info
FE2E/infer/dataset_normal/aug_basic.py ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ basic augmentations
2
+ """
3
+ import random
4
+ import numpy as np
5
+
6
+ import torch
7
+ from torchvision import transforms
8
+ import torch.nn.functional as F
9
+ import torchvision.transforms.functional as TF
10
+
11
+ import logging
12
+ logger = logging.getLogger('root')
13
+
14
+
15
+ def resize(sample, new_H, new_W):
16
+ _, orig_H, orig_W = sample.img.shape
17
+ sample.img = F.interpolate(sample.img.unsqueeze(0), size=(new_H, new_W), mode='bilinear', align_corners=False, antialias=True).squeeze(0)
18
+ if sample.depth is not None:
19
+ sample.depth = F.interpolate(sample.depth.unsqueeze(0), size=(new_H, new_W), mode='nearest').squeeze(0)
20
+ if sample.depth_mask is not None:
21
+ sample.depth_mask = F.interpolate(sample.depth_mask.unsqueeze(0).float(), size=(new_H, new_W), mode='nearest').squeeze(0) > 0.5
22
+ if sample.normal is not None:
23
+ sample.normal = F.interpolate(sample.normal.unsqueeze(0), size=(new_H, new_W), mode='nearest').squeeze(0)
24
+ if sample.normal_mask is not None:
25
+ sample.normal_mask = F.interpolate(sample.normal_mask.unsqueeze(0).float(), size=(new_H, new_W), mode='nearest').squeeze(0) > 0.5
26
+ if sample.intrins is not None:
27
+ # NOTE: top-left is (0,0)
28
+ sample.intrins[0, 0] = sample.intrins[0, 0] * (new_W / orig_W) # fx
29
+ sample.intrins[1, 1] = sample.intrins[1, 1] * (new_H / orig_H) # fy
30
+ sample.intrins[0, 2] = (sample.intrins[0, 2] + 0.5) * (new_W / orig_W) - 0.5 # cx
31
+ sample.intrins[1, 2] = (sample.intrins[1, 2] + 0.5) * (new_H / orig_H) - 0.5 # cy
32
+ return sample
33
+
34
+
35
+ def pad(sample, lrtb):
36
+ l, r, t, b = lrtb
37
+ sample.img = F.pad(sample.img, (l, r, t, b), mode="constant", value=0)
38
+ if sample.depth is not None:
39
+ sample.depth = F.pad(sample.depth, (l, r, t, b), mode="constant", value=0)
40
+ if sample.depth_mask is not None:
41
+ sample.depth_mask = F.pad(sample.depth_mask, (l, r, t, b), mode="constant", value=False)
42
+ if sample.normal is not None:
43
+ sample.normal = F.pad(sample.normal, (l, r, t, b), mode="constant", value=0)
44
+ if sample.normal_mask is not None:
45
+ sample.normal_mask = F.pad(sample.normal_mask, (l, r, t, b), mode="constant", value=False)
46
+ if sample.intrins is not None:
47
+ sample.intrins[0, 2] = sample.intrins[0, 2] + l
48
+ sample.intrins[1, 2] = sample.intrins[1, 2] + t
49
+ return sample
50
+
51
+
52
+ def crop(sample, y, H, x, W):
53
+ sample.img = sample.img[:, y:y+H, x:x+W]
54
+ if sample.depth is not None:
55
+ sample.depth = sample.depth[:, y:y+H, x:x+W]
56
+ if sample.depth_mask is not None:
57
+ sample.depth_mask = sample.depth_mask[:, y:y+H, x:x+W]
58
+ if sample.normal is not None:
59
+ sample.normal = sample.normal[:, y:y+H, x:x+W]
60
+ if sample.normal_mask is not None:
61
+ sample.normal_mask = sample.normal_mask[:, y:y+H, x:x+W]
62
+ if sample.intrins is not None:
63
+ sample.intrins[0, 2] = sample.intrins[0, 2] - x
64
+ sample.intrins[1, 2] = sample.intrins[1, 2] - y
65
+ return sample
66
+
67
+
68
+ class ToTensor():
69
+ """ numpy arrays to torch tensors
70
+ """
71
+ def __call__(self, sample):
72
+ sample.img = torch.from_numpy(sample.img).permute(2, 0, 1) # (3, H, W)
73
+ if sample.depth is not None:
74
+ sample.depth = torch.from_numpy(sample.depth).permute(2, 0, 1) # (1, H, W)
75
+ if sample.depth_mask is not None:
76
+ sample.depth_mask = torch.from_numpy(sample.depth_mask).permute(2, 0, 1) # (1, H, W)
77
+ if sample.normal is not None:
78
+ sample.normal = torch.from_numpy(sample.normal).permute(2, 0, 1) # (3, H, W)
79
+ if sample.normal_mask is not None:
80
+ sample.normal_mask = torch.from_numpy(sample.normal_mask).permute(2, 0, 1) # (1, H, W)
81
+ if sample.intrins is not None:
82
+ sample.intrins = torch.from_numpy(sample.intrins) # (3, 3)
83
+ return sample
84
+
85
+
86
+ class RandomIntrins():
87
+ """ randomize intrinsics
88
+ sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
89
+ """
90
+ def __call__(self, sample):
91
+ assert 'crop_H' in sample.info.keys()
92
+ assert 'crop_W' in sample.info.keys()
93
+ crop_H = sample.info['crop_H']
94
+ crop_W = sample.info['crop_W']
95
+
96
+ # height-based resizing
97
+ _, orig_H, orig_W = sample.img.shape
98
+ new_H = random.randrange(min(orig_H, crop_H), max(orig_H, crop_H)+1)
99
+ new_W = round((new_H / orig_H) * orig_W)
100
+ sample = resize(sample, new_H=new_H, new_W=new_W)
101
+
102
+ # pad if necessary
103
+ orig_H, orig_W = sample.img.shape[1], sample.img.shape[2]
104
+ l, r, t, b = 0, 0, 0, 0
105
+ if crop_H > orig_H:
106
+ t = b = crop_H - orig_H
107
+ if crop_W > orig_W:
108
+ l = r = crop_W - orig_W
109
+ sample = pad(sample, (l, r, t, b))
110
+
111
+ # crop
112
+ assert sample.img.shape[1] >= crop_H
113
+ assert sample.img.shape[2] >= crop_W
114
+ x = random.randint(0, sample.img.shape[2] - crop_W)
115
+ y = random.randint(0, sample.img.shape[1] - crop_H)
116
+ sample = crop(sample, y=y, H=crop_H, x=x, W=crop_W)
117
+
118
+ return sample
119
+
120
+
121
+ class Resize():
122
+ """ resize to (H, W)
123
+ sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
124
+ """
125
+ def __init__(self, H=480, W=640):
126
+ self.H = H
127
+ self.W = W
128
+
129
+ def __call__(self, sample):
130
+ return resize(sample, new_H=self.H, new_W=self.W)
131
+
132
+
133
+ class RandomCrop():
134
+ """ random crop
135
+ sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
136
+ """
137
+ def __init__(self, H=416, W=544):
138
+ self.H = H
139
+ self.W = W
140
+
141
+ def __call__(self, sample):
142
+ assert sample.img.shape[1] >= self.H
143
+ assert sample.img.shape[2] >= self.W
144
+ x = random.randint(0, sample.img.shape[2] - self.W)
145
+ y = random.randint(0, sample.img.shape[1] - self.H)
146
+ return crop(sample, y=y, H=self.H, x=x, W=self.W)
147
+
148
+
149
+ class NyuCrop():
150
+ """ crop image border for NYUv2 images
151
+ W = 43:608 / H = 45:472
152
+ sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
153
+ """
154
+ def __call__(self, sample):
155
+ return crop(sample, y=45, H=472-45, x=43, W=608-43)
156
+
157
+
158
+ class HorizontalFlip():
159
+ """ random horizontal flipping
160
+ sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
161
+ """
162
+ def __init__(self, p=0.5):
163
+ self.p = p
164
+
165
+ def __call__(self, sample):
166
+ if random.random() < self.p:
167
+ sample.img = TF.hflip(sample.img)
168
+ if sample.depth is not None:
169
+ sample.depth = TF.hflip(sample.depth)
170
+ if sample.depth_mask is not None:
171
+ sample.depth_mask = TF.hflip(sample.depth_mask)
172
+ if sample.normal is not None:
173
+ sample.normal = TF.hflip(sample.normal)
174
+ sample.normal[0, :, :] = -sample.normal[0, :, :]
175
+ if sample.normal_mask is not None:
176
+ sample.normal_mask = TF.hflip(sample.normal_mask)
177
+ if sample.intrins is not None:
178
+ # NOTE: top-left is (0,0)
179
+ _, H, W = sample.img.shape
180
+ sample.intrins[0, 2] = sample.intrins[0, 2] + 0.5 # top-left is (0.5, 0.5)
181
+ sample.intrins[0, 2] = W - sample.intrins[0, 2]
182
+ sample.intrins[0, 2] = sample.intrins[0, 2] - 0.5 # top-left is (0, 0)
183
+ sample.flipped = True
184
+ return sample
185
+
186
+
187
+ class ColorAugmentation():
188
+ """ color augmentation
189
+ sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
190
+ """
191
+ def __init__(self, gamma_range=(0.9, 1.1),
192
+ brightness_range=(0.75, 1.25),
193
+ color_range=(0.9, 1.1),
194
+ p=0.5):
195
+ self.gamma_range = gamma_range
196
+ self.brightness_range = brightness_range
197
+ self.color_range = color_range
198
+ self.p = p
199
+
200
+ def __call__(self, sample):
201
+ if random.random() < self.p:
202
+ # gamma augmentation
203
+ gamma = random.uniform(*self.gamma_range)
204
+ sample.img = sample.img ** gamma
205
+
206
+ # brightness augmentation
207
+ brightness = random.uniform(*self.brightness_range)
208
+ sample.img = sample.img * brightness
209
+
210
+ # color augmentation
211
+ colors = np.random.uniform(*self.color_range, size=3).astype(np.float32)
212
+ colors = torch.from_numpy(colors).view(3, 1, 1)
213
+ sample.img = sample.img * colors
214
+
215
+ # clip
216
+ sample.img = torch.clip(sample.img, 0, 1)
217
+
218
+ return sample
219
+
220
+
221
+ class Normalize():
222
+ """ mean & std: for image normalization
223
+ sample.img is a torch tensor of shape (3, H, W), normalized to [0, 1]
224
+ """
225
+ def __init__(self, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)):
226
+ self.normalize = transforms.Normalize(mean=mean, std=std)
227
+
228
+ def __call__(self, sample):
229
+ sample.img = self.normalize(torch.clip(sample.img, min=0.0, max=1.0))
230
+ return sample
231
+
232
+
233
+ class ToDict():
234
+ def __call__(self, sample):
235
+ data_dict = {}
236
+ for k, v in vars(sample).items():
237
+ if v is not None:
238
+ data_dict[k] = v
239
+ return data_dict
FE2E/infer/dataset_normal/hypersim/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Get samples from Hypersim dataset
2
+ Based on vkitti implementation
3
+ """
4
+ import os
5
+ import cv2
6
+ import numpy as np
7
+
8
+ from infer.dataset_normal import Sample
9
+
10
+
11
+ def get_sample(base_data_dir, sample_path, info):
12
+ # e.g. sample_path = "ai_001_001/rgb_cam_00_fr0000.png"
13
+ scene_name = sample_path.split('/')[0]
14
+ img_filename = sample_path.split('/')[1]
15
+
16
+ # Extract frame number from filename like "rgb_cam_00_fr0000.png"
17
+ frame_num = img_filename.split('_fr')[1].split('.')[0]
18
+
19
+ dataset_path = os.path.join(base_data_dir, 'dsine_eval', 'hypersim')
20
+ img_path = os.path.join(dataset_path, sample_path)
21
+
22
+ # Build corresponding normal path
23
+ normal_filename = f'depth_plane_cam_00_fr{frame_num}_1024x0768_normal_decoded_normal.png'
24
+ normal_path = os.path.join(dataset_path, scene_name, normal_filename)
25
+
26
+ assert os.path.exists(img_path), f"Image not found: {img_path}"
27
+ assert os.path.exists(normal_path), f"Normal not found: {normal_path}"
28
+
29
+ # read image (H, W, 3)
30
+ img = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
31
+ img = img.astype(np.float32) / 255.0
32
+
33
+ # read normal (H, W, 3)
34
+ normal = cv2.cvtColor(cv2.imread(normal_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
35
+ normal_mask = np.sum(normal, axis=2, keepdims=True) > 0
36
+ normal = (normal.astype(np.float32) / 255.0) * 2.0 - 1.0
37
+
38
+ # Create default intrinsics since hypersim doesn't provide them
39
+ # Using typical values for 1024x768 resolution
40
+ H, W = img.shape[:2]
41
+ fx = fy = 0.8 * W # Typical focal length assumption
42
+ cx = W / 2.0
43
+ cy = H / 2.0
44
+ intrins = np.array([
45
+ [fx, 0, cx],
46
+ [0, fy, cy],
47
+ [0, 0, 1]
48
+ ], dtype=np.float32)
49
+
50
+ # Extract img_name for compatibility
51
+ img_name = img_filename.replace('.png', '')
52
+
53
+ sample = Sample(
54
+ img=img,
55
+ normal=normal,
56
+ normal_mask=normal_mask,
57
+ intrins=intrins,
58
+
59
+ dataset_name='hypersim',
60
+ scene_name=scene_name,
61
+ img_name=img_name,
62
+ info=info
63
+ )
64
+
65
+ return sample
FE2E/infer/dataset_normal/hypersim/split/hypersim.txt ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ai_001_001/rgb_cam_00_fr0000.png
2
+ ai_001_001/rgb_cam_00_fr0001.png
3
+ ai_001_001/rgb_cam_00_fr0002.png
4
+ ai_001_001/rgb_cam_00_fr0003.png
5
+ ai_001_001/rgb_cam_00_fr0004.png
6
+ ai_001_001/rgb_cam_00_fr0005.png
7
+ ai_001_001/rgb_cam_00_fr0006.png
8
+ ai_001_001/rgb_cam_00_fr0007.png
9
+ ai_001_001/rgb_cam_00_fr0008.png
10
+ ai_001_001/rgb_cam_00_fr0009.png
11
+ ai_001_001/rgb_cam_00_fr0010.png
12
+ ai_001_001/rgb_cam_00_fr0011.png
13
+ ai_001_001/rgb_cam_00_fr0012.png
14
+ ai_001_001/rgb_cam_00_fr0013.png
15
+ ai_001_001/rgb_cam_00_fr0014.png
16
+ ai_001_001/rgb_cam_00_fr0015.png
17
+ ai_001_001/rgb_cam_00_fr0016.png
18
+ ai_001_001/rgb_cam_00_fr0017.png
19
+ ai_001_001/rgb_cam_00_fr0018.png
20
+ ai_001_001/rgb_cam_00_fr0019.png
21
+ ai_001_001/rgb_cam_00_fr0020.png
22
+ ai_001_001/rgb_cam_00_fr0021.png
23
+ ai_001_001/rgb_cam_00_fr0022.png
24
+ ai_001_001/rgb_cam_00_fr0023.png
25
+ ai_001_001/rgb_cam_00_fr0024.png
26
+ ai_001_001/rgb_cam_00_fr0025.png
27
+ ai_001_001/rgb_cam_00_fr0026.png
28
+ ai_001_001/rgb_cam_00_fr0027.png
29
+ ai_001_001/rgb_cam_00_fr0028.png
30
+ ai_001_001/rgb_cam_00_fr0029.png
31
+ ai_001_001/rgb_cam_00_fr0030.png
32
+ ai_001_001/rgb_cam_00_fr0031.png
33
+ ai_001_001/rgb_cam_00_fr0032.png
34
+ ai_001_001/rgb_cam_00_fr0033.png
35
+ ai_001_001/rgb_cam_00_fr0034.png
36
+ ai_001_001/rgb_cam_00_fr0035.png
37
+ ai_001_001/rgb_cam_00_fr0037.png
38
+ ai_001_001/rgb_cam_00_fr0038.png
39
+ ai_001_001/rgb_cam_00_fr0039.png
40
+ ai_001_001/rgb_cam_00_fr0040.png
41
+ ai_001_001/rgb_cam_00_fr0041.png
42
+ ai_001_001/rgb_cam_00_fr0042.png
43
+ ai_001_001/rgb_cam_00_fr0043.png
44
+ ai_001_001/rgb_cam_00_fr0044.png
45
+ ai_001_001/rgb_cam_00_fr0045.png
46
+ ai_001_001/rgb_cam_00_fr0046.png
47
+ ai_001_001/rgb_cam_00_fr0047.png
48
+ ai_001_001/rgb_cam_00_fr0048.png
49
+ ai_001_001/rgb_cam_00_fr0049.png
50
+ ai_001_001/rgb_cam_00_fr0050.png
51
+ ai_001_001/rgb_cam_00_fr0051.png
52
+ ai_001_001/rgb_cam_00_fr0052.png
53
+ ai_001_001/rgb_cam_00_fr0053.png
54
+ ai_001_001/rgb_cam_00_fr0054.png
55
+ ai_001_001/rgb_cam_00_fr0055.png
56
+ ai_001_001/rgb_cam_00_fr0056.png
57
+ ai_001_001/rgb_cam_00_fr0057.png
58
+ ai_001_001/rgb_cam_00_fr0058.png
59
+ ai_001_001/rgb_cam_00_fr0059.png
60
+ ai_001_001/rgb_cam_00_fr0060.png
61
+ ai_001_001/rgb_cam_00_fr0062.png
62
+ ai_001_001/rgb_cam_00_fr0063.png
63
+ ai_001_001/rgb_cam_00_fr0064.png
64
+ ai_001_001/rgb_cam_00_fr0065.png
65
+ ai_001_001/rgb_cam_00_fr0066.png
66
+ ai_001_001/rgb_cam_00_fr0067.png
67
+ ai_001_001/rgb_cam_00_fr0068.png
68
+ ai_001_001/rgb_cam_00_fr0069.png
69
+ ai_001_001/rgb_cam_00_fr0070.png
70
+ ai_001_001/rgb_cam_00_fr0071.png
71
+ ai_001_001/rgb_cam_00_fr0072.png
72
+ ai_001_001/rgb_cam_00_fr0073.png
73
+ ai_001_001/rgb_cam_00_fr0074.png
74
+ ai_001_001/rgb_cam_00_fr0075.png
75
+ ai_001_001/rgb_cam_00_fr0076.png
76
+ ai_001_001/rgb_cam_00_fr0077.png
77
+ ai_001_001/rgb_cam_00_fr0078.png
78
+ ai_001_001/rgb_cam_00_fr0079.png
79
+ ai_001_001/rgb_cam_00_fr0080.png
80
+ ai_001_001/rgb_cam_00_fr0081.png
81
+ ai_001_001/rgb_cam_00_fr0082.png
82
+ ai_001_001/rgb_cam_00_fr0083.png
83
+ ai_001_001/rgb_cam_00_fr0084.png
84
+ ai_001_001/rgb_cam_00_fr0085.png
85
+ ai_001_001/rgb_cam_00_fr0086.png
86
+ ai_001_001/rgb_cam_00_fr0087.png
87
+ ai_001_001/rgb_cam_00_fr0088.png
88
+ ai_001_001/rgb_cam_00_fr0089.png
89
+ ai_001_001/rgb_cam_00_fr0090.png
90
+ ai_001_001/rgb_cam_00_fr0091.png
91
+ ai_001_001/rgb_cam_00_fr0092.png
92
+ ai_001_001/rgb_cam_00_fr0093.png
93
+ ai_001_001/rgb_cam_00_fr0094.png
94
+ ai_001_001/rgb_cam_00_fr0095.png
95
+ ai_001_001/rgb_cam_00_fr0096.png
96
+ ai_001_001/rgb_cam_00_fr0097.png
97
+ ai_001_001/rgb_cam_00_fr0098.png
98
+ ai_001_001/rgb_cam_00_fr0099.png
FE2E/infer/dataset_normal/ibims/__init__.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Get samples from iBims-1 (https://paperswithcode.com/dataset/ibims-1)
2
+ NOTE: We computed the GT surface normals by doing discontinuity-aware plane fitting
3
+ """
4
+ import os
5
+ import cv2
6
+ import numpy as np
7
+ os.environ["OPENCV_IO_ENABLE_OPENEXR"]="1"
8
+
9
+ from infer.dataset_normal import Sample
10
+
11
+ def get_sample(base_data_dir, sample_path, info):
12
+ # e.g. sample_path = "ibims/corridor_01_img.png"
13
+ scene_name = sample_path.split('/')[0]
14
+ img_name, img_ext = sample_path.split('/')[1].split('_img')
15
+
16
+ dataset_path = os.path.join(base_data_dir, 'dsine_eval', 'ibims')
17
+ img_path = '%s/%s' % (dataset_path, sample_path)
18
+ normal_path = img_path.replace('_img'+img_ext, '_normal.exr')
19
+ intrins_path = img_path.replace('_img'+img_ext, '_intrins.npy')
20
+ assert os.path.exists(img_path)
21
+ assert os.path.exists(normal_path)
22
+ assert os.path.exists(intrins_path)
23
+
24
+ # read image (H, W, 3)
25
+ img = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
26
+ img = img.astype(np.float32) / 255.0
27
+
28
+ # read normal (H, W, 3)
29
+ normal = cv2.cvtColor(cv2.imread(normal_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
30
+ normal_mask = np.linalg.norm(normal, axis=2, keepdims=True) > 0.5
31
+
32
+ # read intrins (3, 3)
33
+ intrins = np.load(intrins_path)
34
+
35
+ sample = Sample(
36
+ img=img,
37
+ normal=normal,
38
+ normal_mask=normal_mask,
39
+ intrins=intrins,
40
+
41
+ dataset_name='ibims',
42
+ scene_name=scene_name,
43
+ img_name=img_name,
44
+ info=info
45
+ )
46
+
47
+ return sample
FE2E/infer/dataset_normal/ibims/split/ibims.txt ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ibims/corridor_01_img.png
2
+ ibims/corridor_02_img.png
3
+ ibims/corridor_03_img.png
4
+ ibims/corridor_04_img.png
5
+ ibims/corridor_05_img.png
6
+ ibims/corridor_06_img.png
7
+ ibims/corridor_07_img.png
8
+ ibims/corridor_08_img.png
9
+ ibims/corridor_09_img.png
10
+ ibims/corridor_10_img.png
11
+ ibims/factory_01_img.png
12
+ ibims/factory_02_img.png
13
+ ibims/factory_03_img.png
14
+ ibims/factory_04_img.png
15
+ ibims/factory_05_img.png
16
+ ibims/factory_06_img.png
17
+ ibims/factory_07_img.png
18
+ ibims/factory_08_img.png
19
+ ibims/kitchen_01_img.png
20
+ ibims/kitchen_02_img.png
21
+ ibims/kitchen_03_img.png
22
+ ibims/kitchen_04_img.png
23
+ ibims/kitchen_05_img.png
24
+ ibims/kitchen_06_img.png
25
+ ibims/kitchen_07_img.png
26
+ ibims/kitchen_08_img.png
27
+ ibims/lab_01_img.png
28
+ ibims/lab_02_img.png
29
+ ibims/lab_03_img.png
30
+ ibims/lab_04_img.png
31
+ ibims/lab_05_img.png
32
+ ibims/lab_06_img.png
33
+ ibims/lab_07_img.png
34
+ ibims/lab_08_img.png
35
+ ibims/lab_09_img.png
36
+ ibims/lab_10_img.png
37
+ ibims/lab_11_img.png
38
+ ibims/lectureroom_01_img.png
39
+ ibims/lectureroom_02_img.png
40
+ ibims/lectureroom_03_img.png
41
+ ibims/lectureroom_04_img.png
42
+ ibims/lectureroom_05_img.png
43
+ ibims/lectureroom_06_img.png
44
+ ibims/lectureroom_07_img.png
45
+ ibims/lectureroom_08_img.png
46
+ ibims/lectureroom_09_img.png
47
+ ibims/lectureroom_10_img.png
48
+ ibims/livingroom_01_img.png
49
+ ibims/livingroom_02_img.png
50
+ ibims/livingroom_03_img.png
51
+ ibims/livingroom_04_img.png
52
+ ibims/livingroom_05_img.png
53
+ ibims/livingroom_06_img.png
54
+ ibims/livingroom_07_img.png
55
+ ibims/livingroom_08_img.png
56
+ ibims/livingroom_09_img.png
57
+ ibims/livingroom_10_img.png
58
+ ibims/livingroom_11_img.png
59
+ ibims/livingroom_12_img.png
60
+ ibims/livingroom_13_img.png
61
+ ibims/livingroom_14_img.png
62
+ ibims/livingroom_15_img.png
63
+ ibims/meetingroom_01_img.png
64
+ ibims/meetingroom_02_img.png
65
+ ibims/meetingroom_03_img.png
66
+ ibims/meetingroom_04_img.png
67
+ ibims/meetingroom_05_img.png
68
+ ibims/meetingroom_06_img.png
69
+ ibims/meetingroom_07_img.png
70
+ ibims/meetingroom_08_img.png
71
+ ibims/office_01_img.png
72
+ ibims/office_02_img.png
73
+ ibims/office_03_img.png
74
+ ibims/office_04_img.png
75
+ ibims/office_05_img.png
76
+ ibims/office_06_img.png
77
+ ibims/office_07_img.png
78
+ ibims/office_08_img.png
79
+ ibims/restaurant_01_img.png
80
+ ibims/restaurant_02_img.png
81
+ ibims/restaurant_03_img.png
82
+ ibims/restaurant_04_img.png
83
+ ibims/restaurant_05_img.png
84
+ ibims/restaurant_06_img.png
85
+ ibims/restaurant_07_img.png
86
+ ibims/restaurant_08_img.png
87
+ ibims/restaurant_09_img.png
88
+ ibims/restaurant_10_img.png
89
+ ibims/restaurant_11_img.png
90
+ ibims/restaurant_12_img.png
91
+ ibims/restroom_01_img.png
92
+ ibims/restroom_02_img.png
93
+ ibims/storageroom_01_img.png
94
+ ibims/storageroom_02_img.png
95
+ ibims/storageroom_03_img.png
96
+ ibims/storageroom_04_img.png
97
+ ibims/storageroom_05_img.png
98
+ ibims/storageroom_06_img.png
99
+ ibims/storageroom_07_img.png
100
+ ibims/storageroom_08_img.png
FE2E/infer/dataset_normal/normal_dataloader.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+
4
+ import torch
5
+ from torch.utils.data import Dataset
6
+ from torch.utils.data import DataLoader
7
+ from torchvision import transforms
8
+
9
+ from . import aug_basic
10
+
11
+ import logging
12
+ logger = logging.getLogger('root')
13
+
14
+
15
+ def get_transform(dataset_name='hypersim', mode='test'):
16
+ assert mode in ['test']
17
+ logger.info('Defining %s transform for %s dataset' % (mode, dataset_name))
18
+ tf_list = [
19
+ aug_basic.ToTensor(),
20
+ ]
21
+ tf_list += [
22
+ # 选项1:使用标准归一化 (如果启用,需要相应调整可视化代码)
23
+ # aug_basic.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5]),
24
+ # 选项2:不使用归一化 (当前方案,配合修改后的unnormalize函数)
25
+ aug_basic.ToDict(),
26
+ ]
27
+ logger.info('Defining %s transform for %s dataset ... DONE' % (mode, dataset_name))
28
+ return transforms.Compose(tf_list)
29
+
30
+
31
+ class NormalDataset(Dataset):
32
+ def __init__(self, base_data_dir, dataset_split_path, dataset_name='nyuv2', split='test', mode='test', epoch=0):
33
+ self.split = split
34
+ self.mode = mode
35
+ self.base_data_dir = base_data_dir
36
+ assert mode in ['test']
37
+
38
+ # data split
39
+ split_path = os.path.join(dataset_split_path, dataset_name, 'split', split+'.txt') # dataset_split_path: eval/dataset_normal/
40
+ assert os.path.exists(split_path)
41
+ with open(split_path, 'r') as f:
42
+ self.filenames = [i.strip() for i in f.readlines()]
43
+ self.split_path = split_path
44
+
45
+ # get_sample function
46
+ if dataset_name == 'nyuv2':
47
+ from infer.dataset_normal.nyuv2 import get_sample
48
+ elif dataset_name == 'scannet':
49
+ from infer.dataset_normal.scannet import get_sample
50
+ elif dataset_name == 'ibims':
51
+ from infer.dataset_normal.ibims import get_sample
52
+ elif dataset_name == 'sintel':
53
+ from infer.dataset_normal.sintel import get_sample
54
+ elif dataset_name == 'oasis':
55
+ from infer.dataset_normal.oasis import get_sample
56
+ elif dataset_name == 'hypersim':
57
+ from infer.dataset_normal.hypersim import get_sample
58
+ else:
59
+ raise NotImplementedError(f"Unsupported normal dataset: {dataset_name}")
60
+ self.get_sample = get_sample
61
+
62
+ # data preprocessing/augmentation
63
+ self.transform = get_transform(dataset_name=dataset_name, mode=mode)
64
+
65
+ def __len__(self):
66
+ return len(self.filenames)
67
+
68
+ def __getitem__(self, index):
69
+ info = {}
70
+
71
+ sample = self.transform(self.get_sample(
72
+ base_data_dir = self.base_data_dir,
73
+ sample_path=self.filenames[index],
74
+ info=info)
75
+ )
76
+
77
+ return sample
78
+
79
+ class TestLoader(object):
80
+ def __init__(self, base_data_dir, dataset_split_path, dataset_name_test, test_split):
81
+ self.test_samples = NormalDataset(base_data_dir, dataset_split_path, dataset_name=dataset_name_test,
82
+ split=test_split, mode='test', epoch=0)
83
+ self.data = DataLoader(self.test_samples, 1, shuffle=False, num_workers=4, pin_memory=True)
FE2E/infer/dataset_normal/nyuv2/__init__.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Get samples from NYUv2 (https://cs.nyu.edu/~fergus/datasets/nyu_depth_v2.html)
2
+ NOTE: GT surface normals are from GeoNet (CVPR 2018) - https://github.com/xjqi/GeoNet
3
+ """
4
+ import os
5
+ import cv2
6
+ import numpy as np
7
+
8
+ from infer.dataset_normal import Sample
9
+
10
+
11
+ def get_sample(base_data_dir, sample_path, info):
12
+ # e.g. sample_path = "test/000000_img.png"
13
+ scene_name = sample_path.split('/')[0]
14
+ img_name, img_ext = sample_path.split('/')[1].split('_img')
15
+
16
+ dataset_path = os.path.join(base_data_dir, 'dsine_eval', 'nyuv2')
17
+ img_path = '%s/%s' % (dataset_path, sample_path)
18
+ normal_png_path = img_path.replace('_img'+img_ext, '_normal.png')
19
+ normal_npy_path = img_path.replace('_img'+img_ext, '_normal.npy')
20
+ intrins_path = img_path.replace('_img'+img_ext, '_intrins.npy')
21
+ assert os.path.exists(img_path)
22
+
23
+ # read image (H, W, 3)
24
+ img = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
25
+ img = img.astype(np.float32) / 255.0
26
+ #保存图像
27
+ # cv2.imwrite(os.path.join(base_data_dir, img_name+'_img.png'), img*255)
28
+
29
+ # read normal (H, W, 3)
30
+ if os.path.exists(normal_png_path):
31
+ normal = cv2.cvtColor(cv2.imread(normal_png_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
32
+ normal_mask = np.sum(normal, axis=2, keepdims=True) > 0
33
+ normal = (normal.astype(np.float32) / 255.0) * 2.0 - 1.0
34
+ elif os.path.exists(normal_npy_path):
35
+ normal = np.load(normal_npy_path).astype(np.float32)
36
+ assert normal.ndim == 3 and normal.shape[2] == 3, f"Unexpected normal shape: {normal.shape}"
37
+ # GeoNet npy normals use opposite x-axis convention for this evaluation codepath.
38
+ normal[:, :, 0] *= -1.0
39
+ normal_mask = np.linalg.norm(normal, axis=2, keepdims=True) > 1e-6
40
+ else:
41
+ raise FileNotFoundError(f"Missing NYUv2 normal file: {normal_png_path} or {normal_npy_path}")
42
+
43
+ # read intrins (3, 3)
44
+ if os.path.exists(intrins_path):
45
+ intrins = np.load(intrins_path)
46
+ else:
47
+ # Fallback to NYUv2 default intrinsics used by many benchmarks.
48
+ intrins = np.array([
49
+ [518.8579, 0.0, 325.5824],
50
+ [0.0, 519.4696, 253.7362],
51
+ [0.0, 0.0, 1.0],
52
+ ], dtype=np.float32)
53
+
54
+ sample = Sample(
55
+ img=img,
56
+ normal=normal,
57
+ normal_mask=normal_mask,
58
+ intrins=intrins,
59
+
60
+ dataset_name='nyuv2',
61
+ scene_name=scene_name,
62
+ img_name=img_name,
63
+ info=info
64
+ )
65
+
66
+ return sample
FE2E/infer/dataset_normal/nyuv2/split/test.txt ADDED
@@ -0,0 +1,654 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ test/000000_img.png
2
+ test/000001_img.png
3
+ test/000008_img.png
4
+ test/000013_img.png
5
+ test/000014_img.png
6
+ test/000015_img.png
7
+ test/000016_img.png
8
+ test/000017_img.png
9
+ test/000020_img.png
10
+ test/000027_img.png
11
+ test/000028_img.png
12
+ test/000029_img.png
13
+ test/000030_img.png
14
+ test/000031_img.png
15
+ test/000032_img.png
16
+ test/000033_img.png
17
+ test/000034_img.png
18
+ test/000035_img.png
19
+ test/000036_img.png
20
+ test/000037_img.png
21
+ test/000038_img.png
22
+ test/000039_img.png
23
+ test/000040_img.png
24
+ test/000041_img.png
25
+ test/000042_img.png
26
+ test/000045_img.png
27
+ test/000046_img.png
28
+ test/000055_img.png
29
+ test/000056_img.png
30
+ test/000058_img.png
31
+ test/000059_img.png
32
+ test/000060_img.png
33
+ test/000061_img.png
34
+ test/000062_img.png
35
+ test/000075_img.png
36
+ test/000076_img.png
37
+ test/000077_img.png
38
+ test/000078_img.png
39
+ test/000083_img.png
40
+ test/000084_img.png
41
+ test/000085_img.png
42
+ test/000086_img.png
43
+ test/000087_img.png
44
+ test/000088_img.png
45
+ test/000089_img.png
46
+ test/000090_img.png
47
+ test/000116_img.png
48
+ test/000117_img.png
49
+ test/000118_img.png
50
+ test/000124_img.png
51
+ test/000125_img.png
52
+ test/000126_img.png
53
+ test/000127_img.png
54
+ test/000128_img.png
55
+ test/000130_img.png
56
+ test/000131_img.png
57
+ test/000132_img.png
58
+ test/000133_img.png
59
+ test/000136_img.png
60
+ test/000152_img.png
61
+ test/000153_img.png
62
+ test/000154_img.png
63
+ test/000166_img.png
64
+ test/000167_img.png
65
+ test/000168_img.png
66
+ test/000170_img.png
67
+ test/000171_img.png
68
+ test/000172_img.png
69
+ test/000173_img.png
70
+ test/000174_img.png
71
+ test/000175_img.png
72
+ test/000179_img.png
73
+ test/000180_img.png
74
+ test/000181_img.png
75
+ test/000182_img.png
76
+ test/000183_img.png
77
+ test/000184_img.png
78
+ test/000185_img.png
79
+ test/000186_img.png
80
+ test/000187_img.png
81
+ test/000188_img.png
82
+ test/000189_img.png
83
+ test/000190_img.png
84
+ test/000191_img.png
85
+ test/000192_img.png
86
+ test/000193_img.png
87
+ test/000194_img.png
88
+ test/000195_img.png
89
+ test/000196_img.png
90
+ test/000197_img.png
91
+ test/000198_img.png
92
+ test/000199_img.png
93
+ test/000200_img.png
94
+ test/000201_img.png
95
+ test/000206_img.png
96
+ test/000207_img.png
97
+ test/000208_img.png
98
+ test/000209_img.png
99
+ test/000210_img.png
100
+ test/000211_img.png
101
+ test/000219_img.png
102
+ test/000220_img.png
103
+ test/000221_img.png
104
+ test/000249_img.png
105
+ test/000263_img.png
106
+ test/000270_img.png
107
+ test/000271_img.png
108
+ test/000272_img.png
109
+ test/000278_img.png
110
+ test/000279_img.png
111
+ test/000280_img.png
112
+ test/000281_img.png
113
+ test/000282_img.png
114
+ test/000283_img.png
115
+ test/000284_img.png
116
+ test/000295_img.png
117
+ test/000296_img.png
118
+ test/000297_img.png
119
+ test/000298_img.png
120
+ test/000299_img.png
121
+ test/000300_img.png
122
+ test/000301_img.png
123
+ test/000309_img.png
124
+ test/000310_img.png
125
+ test/000311_img.png
126
+ test/000314_img.png
127
+ test/000315_img.png
128
+ test/000316_img.png
129
+ test/000324_img.png
130
+ test/000325_img.png
131
+ test/000326_img.png
132
+ test/000327_img.png
133
+ test/000328_img.png
134
+ test/000329_img.png
135
+ test/000330_img.png
136
+ test/000331_img.png
137
+ test/000332_img.png
138
+ test/000333_img.png
139
+ test/000334_img.png
140
+ test/000350_img.png
141
+ test/000351_img.png
142
+ test/000354_img.png
143
+ test/000355_img.png
144
+ test/000356_img.png
145
+ test/000357_img.png
146
+ test/000358_img.png
147
+ test/000359_img.png
148
+ test/000360_img.png
149
+ test/000361_img.png
150
+ test/000362_img.png
151
+ test/000363_img.png
152
+ test/000383_img.png
153
+ test/000384_img.png
154
+ test/000385_img.png
155
+ test/000386_img.png
156
+ test/000387_img.png
157
+ test/000388_img.png
158
+ test/000389_img.png
159
+ test/000394_img.png
160
+ test/000395_img.png
161
+ test/000396_img.png
162
+ test/000410_img.png
163
+ test/000411_img.png
164
+ test/000412_img.png
165
+ test/000413_img.png
166
+ test/000429_img.png
167
+ test/000430_img.png
168
+ test/000431_img.png
169
+ test/000432_img.png
170
+ test/000433_img.png
171
+ test/000434_img.png
172
+ test/000440_img.png
173
+ test/000441_img.png
174
+ test/000442_img.png
175
+ test/000443_img.png
176
+ test/000444_img.png
177
+ test/000445_img.png
178
+ test/000446_img.png
179
+ test/000447_img.png
180
+ test/000461_img.png
181
+ test/000462_img.png
182
+ test/000463_img.png
183
+ test/000464_img.png
184
+ test/000465_img.png
185
+ test/000468_img.png
186
+ test/000469_img.png
187
+ test/000470_img.png
188
+ test/000471_img.png
189
+ test/000472_img.png
190
+ test/000473_img.png
191
+ test/000474_img.png
192
+ test/000475_img.png
193
+ test/000476_img.png
194
+ test/000507_img.png
195
+ test/000508_img.png
196
+ test/000509_img.png
197
+ test/000510_img.png
198
+ test/000511_img.png
199
+ test/000512_img.png
200
+ test/000514_img.png
201
+ test/000515_img.png
202
+ test/000516_img.png
203
+ test/000517_img.png
204
+ test/000518_img.png
205
+ test/000519_img.png
206
+ test/000520_img.png
207
+ test/000521_img.png
208
+ test/000522_img.png
209
+ test/000523_img.png
210
+ test/000524_img.png
211
+ test/000525_img.png
212
+ test/000530_img.png
213
+ test/000531_img.png
214
+ test/000532_img.png
215
+ test/000536_img.png
216
+ test/000537_img.png
217
+ test/000538_img.png
218
+ test/000548_img.png
219
+ test/000549_img.png
220
+ test/000550_img.png
221
+ test/000554_img.png
222
+ test/000555_img.png
223
+ test/000556_img.png
224
+ test/000557_img.png
225
+ test/000558_img.png
226
+ test/000559_img.png
227
+ test/000560_img.png
228
+ test/000561_img.png
229
+ test/000562_img.png
230
+ test/000563_img.png
231
+ test/000564_img.png
232
+ test/000565_img.png
233
+ test/000566_img.png
234
+ test/000567_img.png
235
+ test/000568_img.png
236
+ test/000569_img.png
237
+ test/000570_img.png
238
+ test/000578_img.png
239
+ test/000579_img.png
240
+ test/000580_img.png
241
+ test/000581_img.png
242
+ test/000582_img.png
243
+ test/000590_img.png
244
+ test/000591_img.png
245
+ test/000592_img.png
246
+ test/000593_img.png
247
+ test/000602_img.png
248
+ test/000603_img.png
249
+ test/000604_img.png
250
+ test/000605_img.png
251
+ test/000606_img.png
252
+ test/000611_img.png
253
+ test/000612_img.png
254
+ test/000616_img.png
255
+ test/000617_img.png
256
+ test/000618_img.png
257
+ test/000619_img.png
258
+ test/000620_img.png
259
+ test/000632_img.png
260
+ test/000633_img.png
261
+ test/000634_img.png
262
+ test/000635_img.png
263
+ test/000636_img.png
264
+ test/000637_img.png
265
+ test/000643_img.png
266
+ test/000644_img.png
267
+ test/000649_img.png
268
+ test/000650_img.png
269
+ test/000655_img.png
270
+ test/000656_img.png
271
+ test/000657_img.png
272
+ test/000662_img.png
273
+ test/000663_img.png
274
+ test/000667_img.png
275
+ test/000668_img.png
276
+ test/000669_img.png
277
+ test/000670_img.png
278
+ test/000671_img.png
279
+ test/000672_img.png
280
+ test/000675_img.png
281
+ test/000676_img.png
282
+ test/000677_img.png
283
+ test/000678_img.png
284
+ test/000679_img.png
285
+ test/000680_img.png
286
+ test/000685_img.png
287
+ test/000686_img.png
288
+ test/000687_img.png
289
+ test/000688_img.png
290
+ test/000689_img.png
291
+ test/000692_img.png
292
+ test/000693_img.png
293
+ test/000696_img.png
294
+ test/000697_img.png
295
+ test/000698_img.png
296
+ test/000705_img.png
297
+ test/000706_img.png
298
+ test/000707_img.png
299
+ test/000708_img.png
300
+ test/000709_img.png
301
+ test/000710_img.png
302
+ test/000711_img.png
303
+ test/000712_img.png
304
+ test/000716_img.png
305
+ test/000717_img.png
306
+ test/000723_img.png
307
+ test/000724_img.png
308
+ test/000725_img.png
309
+ test/000726_img.png
310
+ test/000727_img.png
311
+ test/000730_img.png
312
+ test/000731_img.png
313
+ test/000732_img.png
314
+ test/000733_img.png
315
+ test/000742_img.png
316
+ test/000743_img.png
317
+ test/000758_img.png
318
+ test/000759_img.png
319
+ test/000760_img.png
320
+ test/000761_img.png
321
+ test/000762_img.png
322
+ test/000763_img.png
323
+ test/000764_img.png
324
+ test/000765_img.png
325
+ test/000766_img.png
326
+ test/000767_img.png
327
+ test/000768_img.png
328
+ test/000769_img.png
329
+ test/000770_img.png
330
+ test/000771_img.png
331
+ test/000772_img.png
332
+ test/000773_img.png
333
+ test/000774_img.png
334
+ test/000775_img.png
335
+ test/000776_img.png
336
+ test/000777_img.png
337
+ test/000778_img.png
338
+ test/000779_img.png
339
+ test/000780_img.png
340
+ test/000781_img.png
341
+ test/000782_img.png
342
+ test/000783_img.png
343
+ test/000784_img.png
344
+ test/000785_img.png
345
+ test/000786_img.png
346
+ test/000799_img.png
347
+ test/000800_img.png
348
+ test/000801_img.png
349
+ test/000802_img.png
350
+ test/000803_img.png
351
+ test/000809_img.png
352
+ test/000810_img.png
353
+ test/000811_img.png
354
+ test/000812_img.png
355
+ test/000813_img.png
356
+ test/000820_img.png
357
+ test/000821_img.png
358
+ test/000822_img.png
359
+ test/000832_img.png
360
+ test/000833_img.png
361
+ test/000834_img.png
362
+ test/000835_img.png
363
+ test/000836_img.png
364
+ test/000837_img.png
365
+ test/000838_img.png
366
+ test/000839_img.png
367
+ test/000840_img.png
368
+ test/000841_img.png
369
+ test/000842_img.png
370
+ test/000843_img.png
371
+ test/000844_img.png
372
+ test/000845_img.png
373
+ test/000849_img.png
374
+ test/000850_img.png
375
+ test/000851_img.png
376
+ test/000856_img.png
377
+ test/000857_img.png
378
+ test/000858_img.png
379
+ test/000859_img.png
380
+ test/000860_img.png
381
+ test/000861_img.png
382
+ test/000868_img.png
383
+ test/000869_img.png
384
+ test/000870_img.png
385
+ test/000905_img.png
386
+ test/000906_img.png
387
+ test/000907_img.png
388
+ test/000916_img.png
389
+ test/000917_img.png
390
+ test/000918_img.png
391
+ test/000925_img.png
392
+ test/000926_img.png
393
+ test/000927_img.png
394
+ test/000931_img.png
395
+ test/000932_img.png
396
+ test/000933_img.png
397
+ test/000934_img.png
398
+ test/000944_img.png
399
+ test/000945_img.png
400
+ test/000946_img.png
401
+ test/000958_img.png
402
+ test/000959_img.png
403
+ test/000960_img.png
404
+ test/000961_img.png
405
+ test/000964_img.png
406
+ test/000965_img.png
407
+ test/000966_img.png
408
+ test/000969_img.png
409
+ test/000970_img.png
410
+ test/000971_img.png
411
+ test/000972_img.png
412
+ test/000973_img.png
413
+ test/000974_img.png
414
+ test/000975_img.png
415
+ test/000976_img.png
416
+ test/000990_img.png
417
+ test/000991_img.png
418
+ test/000992_img.png
419
+ test/000993_img.png
420
+ test/000994_img.png
421
+ test/001000_img.png
422
+ test/001001_img.png
423
+ test/001002_img.png
424
+ test/001003_img.png
425
+ test/001009_img.png
426
+ test/001010_img.png
427
+ test/001011_img.png
428
+ test/001020_img.png
429
+ test/001021_img.png
430
+ test/001022_img.png
431
+ test/001031_img.png
432
+ test/001032_img.png
433
+ test/001033_img.png
434
+ test/001037_img.png
435
+ test/001038_img.png
436
+ test/001047_img.png
437
+ test/001048_img.png
438
+ test/001051_img.png
439
+ test/001052_img.png
440
+ test/001056_img.png
441
+ test/001057_img.png
442
+ test/001074_img.png
443
+ test/001075_img.png
444
+ test/001076_img.png
445
+ test/001077_img.png
446
+ test/001078_img.png
447
+ test/001079_img.png
448
+ test/001080_img.png
449
+ test/001081_img.png
450
+ test/001082_img.png
451
+ test/001083_img.png
452
+ test/001087_img.png
453
+ test/001088_img.png
454
+ test/001089_img.png
455
+ test/001090_img.png
456
+ test/001091_img.png
457
+ test/001092_img.png
458
+ test/001093_img.png
459
+ test/001094_img.png
460
+ test/001095_img.png
461
+ test/001097_img.png
462
+ test/001098_img.png
463
+ test/001099_img.png
464
+ test/001100_img.png
465
+ test/001101_img.png
466
+ test/001102_img.png
467
+ test/001103_img.png
468
+ test/001105_img.png
469
+ test/001106_img.png
470
+ test/001107_img.png
471
+ test/001108_img.png
472
+ test/001116_img.png
473
+ test/001117_img.png
474
+ test/001118_img.png
475
+ test/001122_img.png
476
+ test/001123_img.png
477
+ test/001124_img.png
478
+ test/001125_img.png
479
+ test/001126_img.png
480
+ test/001127_img.png
481
+ test/001128_img.png
482
+ test/001129_img.png
483
+ test/001130_img.png
484
+ test/001134_img.png
485
+ test/001135_img.png
486
+ test/001143_img.png
487
+ test/001144_img.png
488
+ test/001145_img.png
489
+ test/001146_img.png
490
+ test/001147_img.png
491
+ test/001148_img.png
492
+ test/001149_img.png
493
+ test/001150_img.png
494
+ test/001151_img.png
495
+ test/001152_img.png
496
+ test/001153_img.png
497
+ test/001154_img.png
498
+ test/001155_img.png
499
+ test/001156_img.png
500
+ test/001157_img.png
501
+ test/001161_img.png
502
+ test/001162_img.png
503
+ test/001163_img.png
504
+ test/001164_img.png
505
+ test/001165_img.png
506
+ test/001166_img.png
507
+ test/001169_img.png
508
+ test/001170_img.png
509
+ test/001173_img.png
510
+ test/001174_img.png
511
+ test/001175_img.png
512
+ test/001178_img.png
513
+ test/001179_img.png
514
+ test/001180_img.png
515
+ test/001181_img.png
516
+ test/001182_img.png
517
+ test/001183_img.png
518
+ test/001191_img.png
519
+ test/001192_img.png
520
+ test/001193_img.png
521
+ test/001194_img.png
522
+ test/001195_img.png
523
+ test/001200_img.png
524
+ test/001201_img.png
525
+ test/001202_img.png
526
+ test/001203_img.png
527
+ test/001204_img.png
528
+ test/001205_img.png
529
+ test/001206_img.png
530
+ test/001207_img.png
531
+ test/001208_img.png
532
+ test/001209_img.png
533
+ test/001210_img.png
534
+ test/001211_img.png
535
+ test/001215_img.png
536
+ test/001216_img.png
537
+ test/001217_img.png
538
+ test/001218_img.png
539
+ test/001219_img.png
540
+ test/001225_img.png
541
+ test/001226_img.png
542
+ test/001227_img.png
543
+ test/001228_img.png
544
+ test/001229_img.png
545
+ test/001232_img.png
546
+ test/001233_img.png
547
+ test/001234_img.png
548
+ test/001246_img.png
549
+ test/001247_img.png
550
+ test/001248_img.png
551
+ test/001249_img.png
552
+ test/001253_img.png
553
+ test/001254_img.png
554
+ test/001255_img.png
555
+ test/001256_img.png
556
+ test/001257_img.png
557
+ test/001258_img.png
558
+ test/001259_img.png
559
+ test/001260_img.png
560
+ test/001261_img.png
561
+ test/001262_img.png
562
+ test/001263_img.png
563
+ test/001264_img.png
564
+ test/001274_img.png
565
+ test/001275_img.png
566
+ test/001276_img.png
567
+ test/001277_img.png
568
+ test/001278_img.png
569
+ test/001279_img.png
570
+ test/001284_img.png
571
+ test/001285_img.png
572
+ test/001286_img.png
573
+ test/001287_img.png
574
+ test/001288_img.png
575
+ test/001289_img.png
576
+ test/001290_img.png
577
+ test/001291_img.png
578
+ test/001292_img.png
579
+ test/001293_img.png
580
+ test/001294_img.png
581
+ test/001296_img.png
582
+ test/001297_img.png
583
+ test/001298_img.png
584
+ test/001301_img.png
585
+ test/001302_img.png
586
+ test/001303_img.png
587
+ test/001304_img.png
588
+ test/001305_img.png
589
+ test/001306_img.png
590
+ test/001307_img.png
591
+ test/001313_img.png
592
+ test/001314_img.png
593
+ test/001328_img.png
594
+ test/001329_img.png
595
+ test/001330_img.png
596
+ test/001331_img.png
597
+ test/001334_img.png
598
+ test/001335_img.png
599
+ test/001336_img.png
600
+ test/001337_img.png
601
+ test/001338_img.png
602
+ test/001339_img.png
603
+ test/001346_img.png
604
+ test/001347_img.png
605
+ test/001348_img.png
606
+ test/001352_img.png
607
+ test/001353_img.png
608
+ test/001354_img.png
609
+ test/001355_img.png
610
+ test/001363_img.png
611
+ test/001364_img.png
612
+ test/001367_img.png
613
+ test/001368_img.png
614
+ test/001383_img.png
615
+ test/001384_img.png
616
+ test/001385_img.png
617
+ test/001386_img.png
618
+ test/001387_img.png
619
+ test/001388_img.png
620
+ test/001389_img.png
621
+ test/001390_img.png
622
+ test/001393_img.png
623
+ test/001394_img.png
624
+ test/001395_img.png
625
+ test/001396_img.png
626
+ test/001397_img.png
627
+ test/001398_img.png
628
+ test/001399_img.png
629
+ test/001400_img.png
630
+ test/001406_img.png
631
+ test/001407_img.png
632
+ test/001408_img.png
633
+ test/001409_img.png
634
+ test/001410_img.png
635
+ test/001411_img.png
636
+ test/001412_img.png
637
+ test/001413_img.png
638
+ test/001420_img.png
639
+ test/001421_img.png
640
+ test/001422_img.png
641
+ test/001423_img.png
642
+ test/001429_img.png
643
+ test/001430_img.png
644
+ test/001431_img.png
645
+ test/001432_img.png
646
+ test/001440_img.png
647
+ test/001441_img.png
648
+ test/001442_img.png
649
+ test/001443_img.png
650
+ test/001444_img.png
651
+ test/001445_img.png
652
+ test/001446_img.png
653
+ test/001447_img.png
654
+ test/001448_img.png
FE2E/infer/dataset_normal/nyuv2/split/train.txt ADDED
@@ -0,0 +1,795 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ train/000002_img.png
2
+ train/000003_img.png
3
+ train/000004_img.png
4
+ train/000005_img.png
5
+ train/000006_img.png
6
+ train/000007_img.png
7
+ train/000009_img.png
8
+ train/000010_img.png
9
+ train/000011_img.png
10
+ train/000012_img.png
11
+ train/000018_img.png
12
+ train/000019_img.png
13
+ train/000021_img.png
14
+ train/000022_img.png
15
+ train/000023_img.png
16
+ train/000024_img.png
17
+ train/000025_img.png
18
+ train/000026_img.png
19
+ train/000043_img.png
20
+ train/000044_img.png
21
+ train/000047_img.png
22
+ train/000048_img.png
23
+ train/000049_img.png
24
+ train/000050_img.png
25
+ train/000051_img.png
26
+ train/000052_img.png
27
+ train/000053_img.png
28
+ train/000054_img.png
29
+ train/000057_img.png
30
+ train/000063_img.png
31
+ train/000064_img.png
32
+ train/000065_img.png
33
+ train/000066_img.png
34
+ train/000067_img.png
35
+ train/000068_img.png
36
+ train/000069_img.png
37
+ train/000070_img.png
38
+ train/000071_img.png
39
+ train/000072_img.png
40
+ train/000073_img.png
41
+ train/000074_img.png
42
+ train/000079_img.png
43
+ train/000080_img.png
44
+ train/000081_img.png
45
+ train/000082_img.png
46
+ train/000091_img.png
47
+ train/000092_img.png
48
+ train/000093_img.png
49
+ train/000094_img.png
50
+ train/000095_img.png
51
+ train/000096_img.png
52
+ train/000097_img.png
53
+ train/000098_img.png
54
+ train/000099_img.png
55
+ train/000100_img.png
56
+ train/000101_img.png
57
+ train/000102_img.png
58
+ train/000103_img.png
59
+ train/000104_img.png
60
+ train/000105_img.png
61
+ train/000106_img.png
62
+ train/000107_img.png
63
+ train/000108_img.png
64
+ train/000109_img.png
65
+ train/000110_img.png
66
+ train/000111_img.png
67
+ train/000112_img.png
68
+ train/000113_img.png
69
+ train/000114_img.png
70
+ train/000115_img.png
71
+ train/000119_img.png
72
+ train/000120_img.png
73
+ train/000121_img.png
74
+ train/000122_img.png
75
+ train/000123_img.png
76
+ train/000129_img.png
77
+ train/000134_img.png
78
+ train/000135_img.png
79
+ train/000137_img.png
80
+ train/000138_img.png
81
+ train/000139_img.png
82
+ train/000140_img.png
83
+ train/000141_img.png
84
+ train/000142_img.png
85
+ train/000143_img.png
86
+ train/000144_img.png
87
+ train/000145_img.png
88
+ train/000146_img.png
89
+ train/000147_img.png
90
+ train/000148_img.png
91
+ train/000149_img.png
92
+ train/000150_img.png
93
+ train/000151_img.png
94
+ train/000155_img.png
95
+ train/000156_img.png
96
+ train/000157_img.png
97
+ train/000158_img.png
98
+ train/000159_img.png
99
+ train/000160_img.png
100
+ train/000161_img.png
101
+ train/000162_img.png
102
+ train/000163_img.png
103
+ train/000164_img.png
104
+ train/000165_img.png
105
+ train/000169_img.png
106
+ train/000176_img.png
107
+ train/000177_img.png
108
+ train/000178_img.png
109
+ train/000202_img.png
110
+ train/000203_img.png
111
+ train/000204_img.png
112
+ train/000205_img.png
113
+ train/000212_img.png
114
+ train/000213_img.png
115
+ train/000214_img.png
116
+ train/000215_img.png
117
+ train/000216_img.png
118
+ train/000217_img.png
119
+ train/000218_img.png
120
+ train/000222_img.png
121
+ train/000223_img.png
122
+ train/000224_img.png
123
+ train/000225_img.png
124
+ train/000226_img.png
125
+ train/000227_img.png
126
+ train/000228_img.png
127
+ train/000229_img.png
128
+ train/000230_img.png
129
+ train/000231_img.png
130
+ train/000232_img.png
131
+ train/000233_img.png
132
+ train/000234_img.png
133
+ train/000235_img.png
134
+ train/000236_img.png
135
+ train/000237_img.png
136
+ train/000238_img.png
137
+ train/000239_img.png
138
+ train/000240_img.png
139
+ train/000241_img.png
140
+ train/000242_img.png
141
+ train/000243_img.png
142
+ train/000244_img.png
143
+ train/000245_img.png
144
+ train/000246_img.png
145
+ train/000247_img.png
146
+ train/000248_img.png
147
+ train/000250_img.png
148
+ train/000251_img.png
149
+ train/000252_img.png
150
+ train/000253_img.png
151
+ train/000254_img.png
152
+ train/000255_img.png
153
+ train/000256_img.png
154
+ train/000257_img.png
155
+ train/000258_img.png
156
+ train/000259_img.png
157
+ train/000260_img.png
158
+ train/000261_img.png
159
+ train/000262_img.png
160
+ train/000264_img.png
161
+ train/000265_img.png
162
+ train/000266_img.png
163
+ train/000267_img.png
164
+ train/000268_img.png
165
+ train/000269_img.png
166
+ train/000273_img.png
167
+ train/000274_img.png
168
+ train/000275_img.png
169
+ train/000276_img.png
170
+ train/000277_img.png
171
+ train/000285_img.png
172
+ train/000286_img.png
173
+ train/000287_img.png
174
+ train/000288_img.png
175
+ train/000289_img.png
176
+ train/000290_img.png
177
+ train/000291_img.png
178
+ train/000292_img.png
179
+ train/000293_img.png
180
+ train/000294_img.png
181
+ train/000302_img.png
182
+ train/000303_img.png
183
+ train/000304_img.png
184
+ train/000305_img.png
185
+ train/000306_img.png
186
+ train/000307_img.png
187
+ train/000308_img.png
188
+ train/000312_img.png
189
+ train/000313_img.png
190
+ train/000317_img.png
191
+ train/000318_img.png
192
+ train/000319_img.png
193
+ train/000320_img.png
194
+ train/000321_img.png
195
+ train/000322_img.png
196
+ train/000323_img.png
197
+ train/000335_img.png
198
+ train/000336_img.png
199
+ train/000337_img.png
200
+ train/000338_img.png
201
+ train/000339_img.png
202
+ train/000340_img.png
203
+ train/000341_img.png
204
+ train/000342_img.png
205
+ train/000343_img.png
206
+ train/000344_img.png
207
+ train/000345_img.png
208
+ train/000346_img.png
209
+ train/000347_img.png
210
+ train/000348_img.png
211
+ train/000349_img.png
212
+ train/000352_img.png
213
+ train/000353_img.png
214
+ train/000364_img.png
215
+ train/000365_img.png
216
+ train/000366_img.png
217
+ train/000367_img.png
218
+ train/000368_img.png
219
+ train/000369_img.png
220
+ train/000370_img.png
221
+ train/000371_img.png
222
+ train/000372_img.png
223
+ train/000373_img.png
224
+ train/000374_img.png
225
+ train/000375_img.png
226
+ train/000376_img.png
227
+ train/000377_img.png
228
+ train/000378_img.png
229
+ train/000379_img.png
230
+ train/000380_img.png
231
+ train/000381_img.png
232
+ train/000382_img.png
233
+ train/000390_img.png
234
+ train/000391_img.png
235
+ train/000392_img.png
236
+ train/000393_img.png
237
+ train/000397_img.png
238
+ train/000398_img.png
239
+ train/000399_img.png
240
+ train/000400_img.png
241
+ train/000401_img.png
242
+ train/000402_img.png
243
+ train/000403_img.png
244
+ train/000404_img.png
245
+ train/000405_img.png
246
+ train/000406_img.png
247
+ train/000407_img.png
248
+ train/000408_img.png
249
+ train/000409_img.png
250
+ train/000414_img.png
251
+ train/000415_img.png
252
+ train/000416_img.png
253
+ train/000417_img.png
254
+ train/000418_img.png
255
+ train/000419_img.png
256
+ train/000420_img.png
257
+ train/000421_img.png
258
+ train/000422_img.png
259
+ train/000423_img.png
260
+ train/000424_img.png
261
+ train/000425_img.png
262
+ train/000426_img.png
263
+ train/000427_img.png
264
+ train/000428_img.png
265
+ train/000435_img.png
266
+ train/000436_img.png
267
+ train/000437_img.png
268
+ train/000438_img.png
269
+ train/000439_img.png
270
+ train/000448_img.png
271
+ train/000449_img.png
272
+ train/000450_img.png
273
+ train/000451_img.png
274
+ train/000452_img.png
275
+ train/000453_img.png
276
+ train/000454_img.png
277
+ train/000455_img.png
278
+ train/000456_img.png
279
+ train/000457_img.png
280
+ train/000458_img.png
281
+ train/000459_img.png
282
+ train/000460_img.png
283
+ train/000466_img.png
284
+ train/000467_img.png
285
+ train/000477_img.png
286
+ train/000478_img.png
287
+ train/000479_img.png
288
+ train/000480_img.png
289
+ train/000481_img.png
290
+ train/000482_img.png
291
+ train/000483_img.png
292
+ train/000484_img.png
293
+ train/000485_img.png
294
+ train/000486_img.png
295
+ train/000487_img.png
296
+ train/000488_img.png
297
+ train/000489_img.png
298
+ train/000490_img.png
299
+ train/000491_img.png
300
+ train/000492_img.png
301
+ train/000493_img.png
302
+ train/000494_img.png
303
+ train/000495_img.png
304
+ train/000496_img.png
305
+ train/000497_img.png
306
+ train/000498_img.png
307
+ train/000499_img.png
308
+ train/000500_img.png
309
+ train/000501_img.png
310
+ train/000502_img.png
311
+ train/000503_img.png
312
+ train/000504_img.png
313
+ train/000505_img.png
314
+ train/000506_img.png
315
+ train/000513_img.png
316
+ train/000526_img.png
317
+ train/000527_img.png
318
+ train/000528_img.png
319
+ train/000529_img.png
320
+ train/000533_img.png
321
+ train/000534_img.png
322
+ train/000535_img.png
323
+ train/000539_img.png
324
+ train/000540_img.png
325
+ train/000541_img.png
326
+ train/000542_img.png
327
+ train/000543_img.png
328
+ train/000544_img.png
329
+ train/000545_img.png
330
+ train/000546_img.png
331
+ train/000547_img.png
332
+ train/000551_img.png
333
+ train/000552_img.png
334
+ train/000553_img.png
335
+ train/000571_img.png
336
+ train/000572_img.png
337
+ train/000573_img.png
338
+ train/000574_img.png
339
+ train/000575_img.png
340
+ train/000576_img.png
341
+ train/000577_img.png
342
+ train/000583_img.png
343
+ train/000584_img.png
344
+ train/000585_img.png
345
+ train/000586_img.png
346
+ train/000587_img.png
347
+ train/000588_img.png
348
+ train/000589_img.png
349
+ train/000594_img.png
350
+ train/000595_img.png
351
+ train/000596_img.png
352
+ train/000597_img.png
353
+ train/000598_img.png
354
+ train/000599_img.png
355
+ train/000600_img.png
356
+ train/000601_img.png
357
+ train/000607_img.png
358
+ train/000608_img.png
359
+ train/000609_img.png
360
+ train/000610_img.png
361
+ train/000613_img.png
362
+ train/000614_img.png
363
+ train/000615_img.png
364
+ train/000621_img.png
365
+ train/000622_img.png
366
+ train/000623_img.png
367
+ train/000624_img.png
368
+ train/000625_img.png
369
+ train/000626_img.png
370
+ train/000627_img.png
371
+ train/000628_img.png
372
+ train/000629_img.png
373
+ train/000630_img.png
374
+ train/000631_img.png
375
+ train/000638_img.png
376
+ train/000639_img.png
377
+ train/000640_img.png
378
+ train/000641_img.png
379
+ train/000642_img.png
380
+ train/000645_img.png
381
+ train/000646_img.png
382
+ train/000647_img.png
383
+ train/000648_img.png
384
+ train/000651_img.png
385
+ train/000652_img.png
386
+ train/000653_img.png
387
+ train/000654_img.png
388
+ train/000658_img.png
389
+ train/000659_img.png
390
+ train/000660_img.png
391
+ train/000661_img.png
392
+ train/000664_img.png
393
+ train/000665_img.png
394
+ train/000666_img.png
395
+ train/000673_img.png
396
+ train/000674_img.png
397
+ train/000681_img.png
398
+ train/000682_img.png
399
+ train/000683_img.png
400
+ train/000684_img.png
401
+ train/000690_img.png
402
+ train/000691_img.png
403
+ train/000694_img.png
404
+ train/000695_img.png
405
+ train/000699_img.png
406
+ train/000700_img.png
407
+ train/000701_img.png
408
+ train/000702_img.png
409
+ train/000703_img.png
410
+ train/000704_img.png
411
+ train/000713_img.png
412
+ train/000714_img.png
413
+ train/000715_img.png
414
+ train/000718_img.png
415
+ train/000719_img.png
416
+ train/000720_img.png
417
+ train/000721_img.png
418
+ train/000722_img.png
419
+ train/000728_img.png
420
+ train/000729_img.png
421
+ train/000734_img.png
422
+ train/000735_img.png
423
+ train/000736_img.png
424
+ train/000737_img.png
425
+ train/000738_img.png
426
+ train/000739_img.png
427
+ train/000740_img.png
428
+ train/000741_img.png
429
+ train/000744_img.png
430
+ train/000745_img.png
431
+ train/000746_img.png
432
+ train/000747_img.png
433
+ train/000748_img.png
434
+ train/000749_img.png
435
+ train/000750_img.png
436
+ train/000751_img.png
437
+ train/000752_img.png
438
+ train/000753_img.png
439
+ train/000754_img.png
440
+ train/000755_img.png
441
+ train/000756_img.png
442
+ train/000757_img.png
443
+ train/000787_img.png
444
+ train/000788_img.png
445
+ train/000789_img.png
446
+ train/000790_img.png
447
+ train/000791_img.png
448
+ train/000792_img.png
449
+ train/000793_img.png
450
+ train/000794_img.png
451
+ train/000795_img.png
452
+ train/000796_img.png
453
+ train/000797_img.png
454
+ train/000798_img.png
455
+ train/000804_img.png
456
+ train/000805_img.png
457
+ train/000806_img.png
458
+ train/000807_img.png
459
+ train/000808_img.png
460
+ train/000814_img.png
461
+ train/000815_img.png
462
+ train/000816_img.png
463
+ train/000817_img.png
464
+ train/000818_img.png
465
+ train/000819_img.png
466
+ train/000823_img.png
467
+ train/000824_img.png
468
+ train/000825_img.png
469
+ train/000826_img.png
470
+ train/000827_img.png
471
+ train/000828_img.png
472
+ train/000829_img.png
473
+ train/000830_img.png
474
+ train/000831_img.png
475
+ train/000846_img.png
476
+ train/000847_img.png
477
+ train/000848_img.png
478
+ train/000852_img.png
479
+ train/000853_img.png
480
+ train/000854_img.png
481
+ train/000855_img.png
482
+ train/000862_img.png
483
+ train/000863_img.png
484
+ train/000864_img.png
485
+ train/000865_img.png
486
+ train/000866_img.png
487
+ train/000867_img.png
488
+ train/000871_img.png
489
+ train/000872_img.png
490
+ train/000873_img.png
491
+ train/000874_img.png
492
+ train/000875_img.png
493
+ train/000876_img.png
494
+ train/000877_img.png
495
+ train/000878_img.png
496
+ train/000879_img.png
497
+ train/000880_img.png
498
+ train/000881_img.png
499
+ train/000882_img.png
500
+ train/000883_img.png
501
+ train/000884_img.png
502
+ train/000885_img.png
503
+ train/000886_img.png
504
+ train/000887_img.png
505
+ train/000888_img.png
506
+ train/000889_img.png
507
+ train/000890_img.png
508
+ train/000891_img.png
509
+ train/000892_img.png
510
+ train/000893_img.png
511
+ train/000894_img.png
512
+ train/000895_img.png
513
+ train/000896_img.png
514
+ train/000897_img.png
515
+ train/000898_img.png
516
+ train/000899_img.png
517
+ train/000900_img.png
518
+ train/000901_img.png
519
+ train/000902_img.png
520
+ train/000903_img.png
521
+ train/000904_img.png
522
+ train/000908_img.png
523
+ train/000909_img.png
524
+ train/000910_img.png
525
+ train/000911_img.png
526
+ train/000912_img.png
527
+ train/000913_img.png
528
+ train/000914_img.png
529
+ train/000915_img.png
530
+ train/000919_img.png
531
+ train/000920_img.png
532
+ train/000921_img.png
533
+ train/000922_img.png
534
+ train/000923_img.png
535
+ train/000924_img.png
536
+ train/000928_img.png
537
+ train/000929_img.png
538
+ train/000930_img.png
539
+ train/000935_img.png
540
+ train/000936_img.png
541
+ train/000937_img.png
542
+ train/000938_img.png
543
+ train/000939_img.png
544
+ train/000940_img.png
545
+ train/000941_img.png
546
+ train/000942_img.png
547
+ train/000943_img.png
548
+ train/000947_img.png
549
+ train/000948_img.png
550
+ train/000949_img.png
551
+ train/000950_img.png
552
+ train/000951_img.png
553
+ train/000952_img.png
554
+ train/000953_img.png
555
+ train/000954_img.png
556
+ train/000955_img.png
557
+ train/000956_img.png
558
+ train/000957_img.png
559
+ train/000962_img.png
560
+ train/000963_img.png
561
+ train/000967_img.png
562
+ train/000968_img.png
563
+ train/000977_img.png
564
+ train/000978_img.png
565
+ train/000979_img.png
566
+ train/000980_img.png
567
+ train/000981_img.png
568
+ train/000982_img.png
569
+ train/000983_img.png
570
+ train/000984_img.png
571
+ train/000985_img.png
572
+ train/000986_img.png
573
+ train/000987_img.png
574
+ train/000988_img.png
575
+ train/000989_img.png
576
+ train/000995_img.png
577
+ train/000996_img.png
578
+ train/000997_img.png
579
+ train/000998_img.png
580
+ train/000999_img.png
581
+ train/001004_img.png
582
+ train/001005_img.png
583
+ train/001006_img.png
584
+ train/001007_img.png
585
+ train/001008_img.png
586
+ train/001012_img.png
587
+ train/001013_img.png
588
+ train/001014_img.png
589
+ train/001015_img.png
590
+ train/001016_img.png
591
+ train/001017_img.png
592
+ train/001018_img.png
593
+ train/001019_img.png
594
+ train/001023_img.png
595
+ train/001024_img.png
596
+ train/001025_img.png
597
+ train/001026_img.png
598
+ train/001027_img.png
599
+ train/001028_img.png
600
+ train/001029_img.png
601
+ train/001030_img.png
602
+ train/001034_img.png
603
+ train/001035_img.png
604
+ train/001036_img.png
605
+ train/001039_img.png
606
+ train/001040_img.png
607
+ train/001041_img.png
608
+ train/001042_img.png
609
+ train/001043_img.png
610
+ train/001044_img.png
611
+ train/001045_img.png
612
+ train/001046_img.png
613
+ train/001049_img.png
614
+ train/001050_img.png
615
+ train/001053_img.png
616
+ train/001054_img.png
617
+ train/001055_img.png
618
+ train/001058_img.png
619
+ train/001059_img.png
620
+ train/001060_img.png
621
+ train/001061_img.png
622
+ train/001062_img.png
623
+ train/001063_img.png
624
+ train/001064_img.png
625
+ train/001065_img.png
626
+ train/001066_img.png
627
+ train/001067_img.png
628
+ train/001068_img.png
629
+ train/001069_img.png
630
+ train/001070_img.png
631
+ train/001071_img.png
632
+ train/001072_img.png
633
+ train/001073_img.png
634
+ train/001084_img.png
635
+ train/001085_img.png
636
+ train/001086_img.png
637
+ train/001096_img.png
638
+ train/001104_img.png
639
+ train/001109_img.png
640
+ train/001110_img.png
641
+ train/001111_img.png
642
+ train/001112_img.png
643
+ train/001113_img.png
644
+ train/001114_img.png
645
+ train/001115_img.png
646
+ train/001119_img.png
647
+ train/001120_img.png
648
+ train/001121_img.png
649
+ train/001131_img.png
650
+ train/001132_img.png
651
+ train/001133_img.png
652
+ train/001136_img.png
653
+ train/001137_img.png
654
+ train/001138_img.png
655
+ train/001139_img.png
656
+ train/001140_img.png
657
+ train/001141_img.png
658
+ train/001142_img.png
659
+ train/001158_img.png
660
+ train/001159_img.png
661
+ train/001160_img.png
662
+ train/001167_img.png
663
+ train/001168_img.png
664
+ train/001171_img.png
665
+ train/001172_img.png
666
+ train/001176_img.png
667
+ train/001177_img.png
668
+ train/001184_img.png
669
+ train/001185_img.png
670
+ train/001186_img.png
671
+ train/001187_img.png
672
+ train/001188_img.png
673
+ train/001189_img.png
674
+ train/001190_img.png
675
+ train/001196_img.png
676
+ train/001197_img.png
677
+ train/001198_img.png
678
+ train/001199_img.png
679
+ train/001212_img.png
680
+ train/001213_img.png
681
+ train/001214_img.png
682
+ train/001220_img.png
683
+ train/001221_img.png
684
+ train/001222_img.png
685
+ train/001223_img.png
686
+ train/001224_img.png
687
+ train/001230_img.png
688
+ train/001231_img.png
689
+ train/001235_img.png
690
+ train/001236_img.png
691
+ train/001237_img.png
692
+ train/001238_img.png
693
+ train/001239_img.png
694
+ train/001240_img.png
695
+ train/001241_img.png
696
+ train/001242_img.png
697
+ train/001243_img.png
698
+ train/001244_img.png
699
+ train/001245_img.png
700
+ train/001250_img.png
701
+ train/001251_img.png
702
+ train/001252_img.png
703
+ train/001265_img.png
704
+ train/001266_img.png
705
+ train/001267_img.png
706
+ train/001268_img.png
707
+ train/001269_img.png
708
+ train/001270_img.png
709
+ train/001271_img.png
710
+ train/001272_img.png
711
+ train/001273_img.png
712
+ train/001280_img.png
713
+ train/001281_img.png
714
+ train/001282_img.png
715
+ train/001283_img.png
716
+ train/001295_img.png
717
+ train/001299_img.png
718
+ train/001300_img.png
719
+ train/001308_img.png
720
+ train/001309_img.png
721
+ train/001310_img.png
722
+ train/001311_img.png
723
+ train/001312_img.png
724
+ train/001315_img.png
725
+ train/001316_img.png
726
+ train/001317_img.png
727
+ train/001318_img.png
728
+ train/001319_img.png
729
+ train/001320_img.png
730
+ train/001321_img.png
731
+ train/001322_img.png
732
+ train/001323_img.png
733
+ train/001324_img.png
734
+ train/001325_img.png
735
+ train/001326_img.png
736
+ train/001327_img.png
737
+ train/001332_img.png
738
+ train/001333_img.png
739
+ train/001340_img.png
740
+ train/001341_img.png
741
+ train/001342_img.png
742
+ train/001343_img.png
743
+ train/001344_img.png
744
+ train/001345_img.png
745
+ train/001349_img.png
746
+ train/001350_img.png
747
+ train/001351_img.png
748
+ train/001356_img.png
749
+ train/001357_img.png
750
+ train/001358_img.png
751
+ train/001359_img.png
752
+ train/001360_img.png
753
+ train/001361_img.png
754
+ train/001362_img.png
755
+ train/001365_img.png
756
+ train/001366_img.png
757
+ train/001369_img.png
758
+ train/001370_img.png
759
+ train/001371_img.png
760
+ train/001372_img.png
761
+ train/001373_img.png
762
+ train/001374_img.png
763
+ train/001375_img.png
764
+ train/001376_img.png
765
+ train/001377_img.png
766
+ train/001378_img.png
767
+ train/001379_img.png
768
+ train/001380_img.png
769
+ train/001381_img.png
770
+ train/001382_img.png
771
+ train/001391_img.png
772
+ train/001392_img.png
773
+ train/001401_img.png
774
+ train/001402_img.png
775
+ train/001403_img.png
776
+ train/001404_img.png
777
+ train/001405_img.png
778
+ train/001414_img.png
779
+ train/001415_img.png
780
+ train/001416_img.png
781
+ train/001417_img.png
782
+ train/001418_img.png
783
+ train/001419_img.png
784
+ train/001424_img.png
785
+ train/001425_img.png
786
+ train/001426_img.png
787
+ train/001427_img.png
788
+ train/001428_img.png
789
+ train/001433_img.png
790
+ train/001434_img.png
791
+ train/001435_img.png
792
+ train/001436_img.png
793
+ train/001437_img.png
794
+ train/001438_img.png
795
+ train/001439_img.png
FE2E/infer/dataset_normal/oasis/__init__.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Get samples from OASIS validation set (https://pvl.cs.princeton.edu/OASIS/)
2
+ """
3
+ import os
4
+ import cv2
5
+ import numpy as np
6
+ import pickle
7
+
8
+ from infer.dataset_normal import Sample
9
+
10
+
11
+ def read_normal(path, h, w):
12
+ normal_dict = pickle.load(open(path, 'rb'))
13
+
14
+ mask = np.zeros((h,w))
15
+ normal = np.zeros((h,w,3))
16
+
17
+ # Stuff ROI normal into bounding box
18
+ min_y = normal_dict['min_y']
19
+ max_y = normal_dict['max_y']
20
+ min_x = normal_dict['min_x']
21
+ max_x = normal_dict['max_x']
22
+ roi_normal = normal_dict['normal']
23
+
24
+ # to LUB
25
+ normal[min_y:max_y+1, min_x:max_x+1, :] = roi_normal
26
+ normal = normal.astype(np.float32)
27
+ normal[:,:,0] *= -1
28
+ normal[:,:,1] *= -1
29
+
30
+ # Make mask
31
+ roi_mask = np.logical_or(np.logical_or(roi_normal[:,:,0] != 0, roi_normal[:,:,1] != 0), roi_normal[:,:,2] != 0).astype(np.float32)
32
+ mask[min_y:max_y+1, min_x:max_x+1] = roi_mask
33
+ mask = mask[:, :, None]
34
+ mask = mask > 0.5
35
+
36
+ return normal, mask
37
+
38
+
39
+ def get_sample(base_data_dir, sample_path, info):
40
+ # e.g. sample_path = "val/100277_DT_img.png"
41
+ scene_name = sample_path.split('/')[0]
42
+ img_name, img_ext = sample_path.split('/')[-1].split('_img')
43
+
44
+ dataset_path = os.path.join(base_data_dir, 'dsine_eval', 'oasis')
45
+ img_path = '%s/%s' % (dataset_path, sample_path)
46
+ normal_path = img_path.replace('_img'+img_ext, '_normal.pkl')
47
+ intrins_path = img_path.replace('_img'+img_ext, '_intrins.npy')
48
+ assert os.path.exists(img_path)
49
+ assert os.path.exists(normal_path)
50
+ assert os.path.exists(intrins_path)
51
+
52
+ # read image (H, W, 3)
53
+ img = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
54
+ img = img.astype(np.float32) / 255.0
55
+
56
+ # read normal (H, W, 3)
57
+ h = img.shape[0]
58
+ w = img.shape[1]
59
+ normal, normal_mask = read_normal(normal_path, h, w)
60
+
61
+ # read intrins (3, 3)
62
+ intrins = np.load(intrins_path)
63
+
64
+ sample = Sample(
65
+ img=img,
66
+ normal=normal,
67
+ normal_mask=normal_mask,
68
+ intrins=intrins,
69
+
70
+ dataset_name='oasis',
71
+ scene_name=scene_name,
72
+ img_name=img_name,
73
+ info=info
74
+ )
75
+
76
+ return sample
FE2E/infer/dataset_normal/oasis/split/val.txt ADDED
The diff for this file is too large to render. See raw diff
 
FE2E/infer/dataset_normal/scannet/__init__.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Get samples from ScanNet (https://github.com/ScanNet/ScanNet)
2
+ NOTE: GT surface normals and data split are from FrameNet (ICCV 2019) - https://github.com/hjwdzh/FrameNet
3
+ """
4
+ import os
5
+ import cv2
6
+ import numpy as np
7
+
8
+ from infer.dataset_normal import Sample
9
+
10
+
11
+ def get_sample(base_data_dir, sample_path, info):
12
+ # e.g. sample_path = "scene0532_00/000000_img.png"
13
+ scene_name = sample_path.split('/')[0]
14
+ img_name, img_ext = sample_path.split('/')[1].split('_img')
15
+
16
+ dataset_path = os.path.join(base_data_dir, 'dsine_eval', 'scannet')
17
+ img_path = '%s/%s' % (dataset_path, sample_path)
18
+ normal_png_path = img_path.replace('_img'+img_ext, '_normal.png')
19
+ normal_npy_path = img_path.replace('_img'+img_ext, '_normal.npy')
20
+ intrins_path = img_path.replace('_img'+img_ext, '_intrins.npy')
21
+ assert os.path.exists(img_path)
22
+
23
+ # read image (H, W, 3)
24
+ img = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
25
+ img = img.astype(np.float32) / 255.0
26
+
27
+ # read normal (H, W, 3)
28
+ if os.path.exists(normal_png_path):
29
+ normal = cv2.cvtColor(cv2.imread(normal_png_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
30
+ normal_mask = np.sum(normal, axis=2, keepdims=True) > 0
31
+ normal = (normal.astype(np.float32) / 255.0) * 2.0 - 1.0
32
+ elif os.path.exists(normal_npy_path):
33
+ normal = np.load(normal_npy_path).astype(np.float32)
34
+ assert normal.ndim == 3 and normal.shape[2] == 3, f"Unexpected normal shape: {normal.shape}"
35
+ # FrameNet npy normals use opposite x-axis convention for this evaluation codepath.
36
+ normal[:, :, 0] *= -1.0
37
+ normal_mask = np.linalg.norm(normal, axis=2, keepdims=True) > 1e-6
38
+ else:
39
+ raise FileNotFoundError(f"Missing ScanNet normal file: {normal_png_path} or {normal_npy_path}")
40
+
41
+ # read intrins (3, 3)
42
+ if os.path.exists(intrins_path):
43
+ intrins = np.load(intrins_path)
44
+ else:
45
+ # Fallback intrinsics for ScanNet benchmark-sized frames.
46
+ intrins = np.array([
47
+ [577.870605, 0.0, 319.5],
48
+ [0.0, 577.870605, 239.5],
49
+ [0.0, 0.0, 1.0],
50
+ ], dtype=np.float32)
51
+
52
+ sample = Sample(
53
+ img=img,
54
+ normal=normal,
55
+ normal_mask=normal_mask,
56
+ intrins=intrins,
57
+
58
+ dataset_name='scannet',
59
+ scene_name=scene_name,
60
+ img_name=img_name,
61
+ info=info
62
+ )
63
+
64
+ return sample
FE2E/infer/dataset_normal/scannet/split/test.txt ADDED
@@ -0,0 +1,300 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ scene0532_00/000000_img.png
2
+ scene0220_02/000450_img.png
3
+ scene0061_00/000420_img.png
4
+ scene0656_03/000490_img.png
5
+ scene0525_01/000960_img.png
6
+ scene0310_01/000630_img.png
7
+ scene0628_00/000100_img.png
8
+ scene0481_01/000900_img.png
9
+ scene0608_02/001690_img.png
10
+ scene0415_00/000750_img.png
11
+ scene0603_00/001590_img.png
12
+ scene0740_00/000510_img.png
13
+ scene0740_00/002510_img.png
14
+ scene0177_00/000550_img.png
15
+ scene0395_01/000160_img.png
16
+ scene0395_01/002160_img.png
17
+ scene0343_00/000210_img.png
18
+ scene0241_00/000970_img.png
19
+ scene0717_00/000010_img.png
20
+ scene0362_00/000890_img.png
21
+ scene0457_02/000070_img.png
22
+ scene0296_00/000040_img.png
23
+ scene0157_00/000300_img.png
24
+ scene0502_02/001130_img.png
25
+ scene0101_01/000410_img.png
26
+ scene0610_02/000730_img.png
27
+ scene0610_02/002780_img.png
28
+ scene0416_00/000950_img.png
29
+ scene0685_02/000840_img.png
30
+ scene0685_02/003050_img.png
31
+ scene0662_01/001810_img.png
32
+ scene0670_01/000840_img.png
33
+ scene0670_01/002840_img.png
34
+ scene0012_01/000160_img.png
35
+ scene0012_01/002160_img.png
36
+ scene0320_03/000790_img.png
37
+ scene0780_00/000450_img.png
38
+ scene0575_01/000510_img.png
39
+ scene0575_01/002510_img.png
40
+ scene0168_01/000100_img.png
41
+ scene0335_02/000980_img.png
42
+ scene0691_00/000400_img.png
43
+ scene0778_00/001230_img.png
44
+ scene0019_01/000490_img.png
45
+ scene0554_00/001960_img.png
46
+ scene0114_00/000260_img.png
47
+ scene0457_01/000350_img.png
48
+ scene0548_01/000150_img.png
49
+ scene0548_01/002150_img.png
50
+ scene0378_00/000610_img.png
51
+ scene0505_00/000710_img.png
52
+ scene0505_00/002710_img.png
53
+ scene0695_01/001200_img.png
54
+ scene0424_02/001060_img.png
55
+ scene0627_00/001470_img.png
56
+ scene0279_00/001200_img.png
57
+ scene0291_01/001280_img.png
58
+ scene0768_00/000330_img.png
59
+ scene0768_00/002330_img.png
60
+ scene0696_01/000340_img.png
61
+ scene0145_00/000420_img.png
62
+ scene0185_00/000370_img.png
63
+ scene0185_00/002370_img.png
64
+ scene0806_00/001070_img.png
65
+ scene0261_03/000410_img.png
66
+ scene0068_00/000570_img.png
67
+ scene0804_00/000510_img.png
68
+ scene0221_01/000490_img.png
69
+ scene0630_03/000880_img.png
70
+ scene0617_00/001270_img.png
71
+ scene0206_02/001640_img.png
72
+ scene0582_02/000590_img.png
73
+ scene0031_01/000100_img.png
74
+ scene0031_01/002100_img.png
75
+ scene0505_04/001100_img.png
76
+ scene0092_01/000600_img.png
77
+ scene0542_00/000570_img.png
78
+ scene0525_02/000480_img.png
79
+ scene0756_00/000870_img.png
80
+ scene0756_00/002870_img.png
81
+ scene0061_01/001360_img.png
82
+ scene0588_01/000940_img.png
83
+ scene0256_02/000610_img.png
84
+ scene0300_00/001430_img.png
85
+ scene0451_05/001110_img.png
86
+ scene0397_01/001390_img.png
87
+ scene0372_00/000250_img.png
88
+ scene0372_00/002280_img.png
89
+ scene0647_01/000790_img.png
90
+ scene0474_01/001600_img.png
91
+ scene0404_00/000360_img.png
92
+ scene0404_00/002360_img.png
93
+ scene0702_01/000010_img.png
94
+ scene0114_02/001420_img.png
95
+ scene0460_00/000820_img.png
96
+ scene0151_01/000390_img.png
97
+ scene0151_01/002390_img.png
98
+ scene0078_02/000520_img.png
99
+ scene0772_00/001530_img.png
100
+ scene0705_01/000490_img.png
101
+ scene0408_01/000590_img.png
102
+ scene0538_00/001430_img.png
103
+ scene0651_00/000080_img.png
104
+ scene0210_01/001040_img.png
105
+ scene0002_00/000240_img.png
106
+ scene0002_00/002240_img.png
107
+ scene0002_00/004240_img.png
108
+ scene0347_00/001040_img.png
109
+ scene0072_02/000210_img.png
110
+ scene0371_00/000410_img.png
111
+ scene0211_00/001120_img.png
112
+ scene0514_01/000050_img.png
113
+ scene0473_01/000910_img.png
114
+ scene0362_02/001970_img.png
115
+ scene0024_02/001180_img.png
116
+ scene0673_00/000280_img.png
117
+ scene0176_00/000030_img.png
118
+ scene0073_00/000400_img.png
119
+ scene0186_01/000230_img.png
120
+ scene0236_00/000490_img.png
121
+ scene0210_00/000780_img.png
122
+ scene0325_01/000920_img.png
123
+ scene0044_01/000170_img.png
124
+ scene0197_00/001300_img.png
125
+ scene0472_02/001190_img.png
126
+ scene0611_01/000010_img.png
127
+ scene0758_00/000710_img.png
128
+ scene0650_00/000870_img.png
129
+ scene0728_00/000160_img.png
130
+ scene0217_00/000450_img.png
131
+ scene0335_01/000070_img.png
132
+ scene0335_01/002070_img.png
133
+ scene0504_00/001230_img.png
134
+ scene0669_01/000780_img.png
135
+ scene0143_01/000760_img.png
136
+ scene0320_01/000460_img.png
137
+ scene0645_01/000340_img.png
138
+ scene0645_01/002340_img.png
139
+ scene0645_01/004340_img.png
140
+ scene0248_01/000300_img.png
141
+ scene0416_04/000580_img.png
142
+ scene0416_04/002580_img.png
143
+ scene0534_00/000780_img.png
144
+ scene0489_02/001010_img.png
145
+ scene0379_00/000500_img.png
146
+ scene0500_01/000480_img.png
147
+ scene0561_01/000850_img.png
148
+ scene0580_00/001200_img.png
149
+ scene0580_00/003200_img.png
150
+ scene0552_01/000430_img.png
151
+ scene0744_00/000020_img.png
152
+ scene0744_00/002020_img.png
153
+ scene0044_00/001030_img.png
154
+ scene0301_02/000140_img.png
155
+ scene0422_00/000940_img.png
156
+ scene0655_01/000700_img.png
157
+ scene0803_00/001770_img.png
158
+ scene0629_02/000530_img.png
159
+ scene0609_01/000030_img.png
160
+ scene0623_01/000930_img.png
161
+ scene0596_00/000660_img.png
162
+ scene0150_01/000150_img.png
163
+ scene0501_02/001770_img.png
164
+ scene0560_00/000420_img.png
165
+ scene0406_02/000220_img.png
166
+ scene0587_02/001010_img.png
167
+ scene0540_00/000730_img.png
168
+ scene0074_01/001390_img.png
169
+ scene0620_01/000200_img.png
170
+ scene0486_00/000320_img.png
171
+ scene0486_00/002320_img.png
172
+ scene0600_02/001350_img.png
173
+ scene0085_00/001800_img.png
174
+ scene0084_00/000560_img.png
175
+ scene0319_00/000580_img.png
176
+ scene0400_01/000890_img.png
177
+ scene0548_02/000020_img.png
178
+ scene0548_02/002110_img.png
179
+ scene0244_01/000360_img.png
180
+ scene0785_00/000630_img.png
181
+ scene0785_00/002630_img.png
182
+ scene0479_02/000530_img.png
183
+ scene0121_00/000310_img.png
184
+ scene0107_00/000410_img.png
185
+ scene0328_00/000040_img.png
186
+ scene0196_00/000350_img.png
187
+ scene0404_01/000720_img.png
188
+ scene0404_01/002720_img.png
189
+ scene0081_02/000710_img.png
190
+ scene0666_00/000360_img.png
191
+ scene0367_00/000330_img.png
192
+ scene0340_01/000460_img.png
193
+ scene0300_01/000980_img.png
194
+ scene0275_00/001230_img.png
195
+ scene0036_00/000440_img.png
196
+ scene0520_01/001350_img.png
197
+ scene0113_01/000000_img.png
198
+ scene0541_01/000020_img.png
199
+ scene0034_01/001070_img.png
200
+ scene0030_01/000160_img.png
201
+ scene0438_00/000510_img.png
202
+ scene0679_00/001210_img.png
203
+ scene0546_00/000400_img.png
204
+ scene0223_00/000600_img.png
205
+ scene0403_00/001570_img.png
206
+ scene0001_01/001170_img.png
207
+ scene0014_00/001880_img.png
208
+ scene0673_02/001480_img.png
209
+ scene0794_00/000000_img.png
210
+ scene0209_02/000210_img.png
211
+ scene0801_00/000180_img.png
212
+ scene0086_00/000800_img.png
213
+ scene0501_00/001370_img.png
214
+ scene0412_01/000070_img.png
215
+ scene0339_00/000730_img.png
216
+ scene0724_00/000500_img.png
217
+ scene0654_01/001080_img.png
218
+ scene0081_01/000460_img.png
219
+ scene0576_02/001030_img.png
220
+ scene0589_01/001010_img.png
221
+ scene0428_00/001470_img.png
222
+ scene0199_00/000110_img.png
223
+ scene0513_00/000440_img.png
224
+ scene0512_00/000110_img.png
225
+ scene0508_00/000840_img.png
226
+ scene0009_02/000360_img.png
227
+ scene0303_00/001250_img.png
228
+ scene0533_01/001240_img.png
229
+ scene0445_01/000640_img.png
230
+ scene0392_00/001870_img.png
231
+ scene0111_01/001560_img.png
232
+ scene0192_02/000620_img.png
233
+ scene0396_00/000170_img.png
234
+ scene0376_02/000320_img.png
235
+ scene0100_01/000880_img.png
236
+ scene0578_01/000920_img.png
237
+ scene0765_00/000400_img.png
238
+ scene0784_00/000420_img.png
239
+ scene0784_00/002430_img.png
240
+ scene0784_00/004430_img.png
241
+ scene0065_02/000740_img.png
242
+ scene0207_02/000040_img.png
243
+ scene0207_02/002040_img.png
244
+ scene0440_01/000500_img.png
245
+ scene0220_01/000450_img.png
246
+ scene0713_00/000000_img.png
247
+ scene0713_00/002000_img.png
248
+ scene0697_02/000070_img.png
249
+ scene0697_02/002070_img.png
250
+ scene0286_03/001170_img.png
251
+ scene0393_02/000810_img.png
252
+ scene0370_02/000080_img.png
253
+ scene0370_02/002080_img.png
254
+ scene0452_02/000950_img.png
255
+ scene0226_00/001270_img.png
256
+ scene0474_03/001610_img.png
257
+ scene0304_00/000600_img.png
258
+ scene0250_01/000820_img.png
259
+ scene0250_01/002820_img.png
260
+ scene0419_02/001480_img.png
261
+ scene0548_00/000000_img.png
262
+ scene0548_00/002050_img.png
263
+ scene0568_00/000150_img.png
264
+ scene0324_00/000080_img.png
265
+ scene0567_00/000070_img.png
266
+ scene0567_00/002070_img.png
267
+ scene0659_01/001460_img.png
268
+ scene0540_02/000120_img.png
269
+ scene0488_01/000680_img.png
270
+ scene0146_01/000260_img.png
271
+ scene0580_01/001130_img.png
272
+ scene0580_01/003130_img.png
273
+ scene0421_02/001320_img.png
274
+ scene0657_00/000090_img.png
275
+ scene0588_02/001380_img.png
276
+ scene0471_02/000570_img.png
277
+ scene0186_00/000260_img.png
278
+ scene0186_00/002260_img.png
279
+ scene0065_01/000800_img.png
280
+ scene0456_01/000710_img.png
281
+ scene0524_01/001680_img.png
282
+ scene0062_00/000690_img.png
283
+ scene0134_01/000780_img.png
284
+ scene0294_00/000280_img.png
285
+ scene0294_00/002280_img.png
286
+ scene0452_01/001090_img.png
287
+ scene0142_00/000770_img.png
288
+ scene0135_00/000330_img.png
289
+ scene0279_02/000780_img.png
290
+ scene0698_01/000390_img.png
291
+ scene0520_00/000820_img.png
292
+ scene0448_01/000850_img.png
293
+ scene0276_00/000320_img.png
294
+ scene0731_00/000220_img.png
295
+ scene0599_01/001090_img.png
296
+ scene0631_02/000210_img.png
297
+ scene0748_00/000890_img.png
298
+ scene0170_02/000190_img.png
299
+ scene0215_01/000400_img.png
300
+ scene0178_00/001330_img.png
FE2E/infer/dataset_normal/sintel/__init__.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Get samples from Sintel (http://sintel.is.tue.mpg.de/)
2
+ NOTE: We computed the GT surface normals by doing discontinuity-aware plane fitting
3
+ """
4
+ import os
5
+ import cv2
6
+ import numpy as np
7
+ os.environ["OPENCV_IO_ENABLE_OPENEXR"]="1"
8
+
9
+ from infer.dataset_normal import Sample
10
+
11
+
12
+
13
+ def get_sample(base_data_dir, sample_path, info):
14
+ # e.g. sample_path = "alley_1/frame_0001_img.png"
15
+ scene_name = sample_path.split('/')[0]
16
+ img_name, img_ext = sample_path.split('/')[1].split('_img')
17
+
18
+ dataset_path = os.path.join(base_data_dir, 'dsine_eval', 'sintel')
19
+ img_path = '%s/%s' % (dataset_path, sample_path)
20
+ normal_path = img_path.replace('_img'+img_ext, '_normal.exr')
21
+ intrins_path = img_path.replace('_img'+img_ext, '_intrins.npy')
22
+ assert os.path.exists(img_path)
23
+ assert os.path.exists(normal_path)
24
+ assert os.path.exists(intrins_path)
25
+
26
+ # read image (H, W, 3)
27
+ img = cv2.cvtColor(cv2.imread(img_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
28
+ img = img.astype(np.float32) / 255.0
29
+
30
+ # read normal (H, W, 3)
31
+ normal = cv2.cvtColor(cv2.imread(normal_path, cv2.IMREAD_UNCHANGED), cv2.COLOR_BGR2RGB)
32
+ normal_mask = np.linalg.norm(normal, axis=2, keepdims=True) > 0.5
33
+
34
+ # read intrins (3, 3)
35
+ intrins = np.load(intrins_path)
36
+
37
+ sample = Sample(
38
+ img=img,
39
+ normal=normal,
40
+ normal_mask=normal_mask,
41
+ intrins=intrins,
42
+
43
+ dataset_name='sintel',
44
+ scene_name=scene_name,
45
+ img_name=img_name,
46
+ info=info
47
+ )
48
+
49
+ return sample
FE2E/infer/dataset_normal/sintel/split/sintel.txt ADDED
@@ -0,0 +1,1064 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ alley_1/frame_0001_img.png
2
+ alley_1/frame_0002_img.png
3
+ alley_1/frame_0003_img.png
4
+ alley_1/frame_0004_img.png
5
+ alley_1/frame_0005_img.png
6
+ alley_1/frame_0006_img.png
7
+ alley_1/frame_0007_img.png
8
+ alley_1/frame_0008_img.png
9
+ alley_1/frame_0009_img.png
10
+ alley_1/frame_0010_img.png
11
+ alley_1/frame_0011_img.png
12
+ alley_1/frame_0012_img.png
13
+ alley_1/frame_0013_img.png
14
+ alley_1/frame_0014_img.png
15
+ alley_1/frame_0015_img.png
16
+ alley_1/frame_0016_img.png
17
+ alley_1/frame_0017_img.png
18
+ alley_1/frame_0018_img.png
19
+ alley_1/frame_0019_img.png
20
+ alley_1/frame_0020_img.png
21
+ alley_1/frame_0021_img.png
22
+ alley_1/frame_0022_img.png
23
+ alley_1/frame_0023_img.png
24
+ alley_1/frame_0024_img.png
25
+ alley_1/frame_0025_img.png
26
+ alley_1/frame_0026_img.png
27
+ alley_1/frame_0027_img.png
28
+ alley_1/frame_0028_img.png
29
+ alley_1/frame_0029_img.png
30
+ alley_1/frame_0030_img.png
31
+ alley_1/frame_0031_img.png
32
+ alley_1/frame_0032_img.png
33
+ alley_1/frame_0033_img.png
34
+ alley_1/frame_0034_img.png
35
+ alley_1/frame_0035_img.png
36
+ alley_1/frame_0036_img.png
37
+ alley_1/frame_0037_img.png
38
+ alley_1/frame_0038_img.png
39
+ alley_1/frame_0039_img.png
40
+ alley_1/frame_0040_img.png
41
+ alley_1/frame_0041_img.png
42
+ alley_1/frame_0042_img.png
43
+ alley_1/frame_0043_img.png
44
+ alley_1/frame_0044_img.png
45
+ alley_1/frame_0045_img.png
46
+ alley_1/frame_0046_img.png
47
+ alley_1/frame_0047_img.png
48
+ alley_1/frame_0048_img.png
49
+ alley_1/frame_0049_img.png
50
+ alley_1/frame_0050_img.png
51
+ alley_2/frame_0001_img.png
52
+ alley_2/frame_0002_img.png
53
+ alley_2/frame_0003_img.png
54
+ alley_2/frame_0004_img.png
55
+ alley_2/frame_0005_img.png
56
+ alley_2/frame_0006_img.png
57
+ alley_2/frame_0007_img.png
58
+ alley_2/frame_0008_img.png
59
+ alley_2/frame_0009_img.png
60
+ alley_2/frame_0010_img.png
61
+ alley_2/frame_0011_img.png
62
+ alley_2/frame_0012_img.png
63
+ alley_2/frame_0013_img.png
64
+ alley_2/frame_0014_img.png
65
+ alley_2/frame_0015_img.png
66
+ alley_2/frame_0016_img.png
67
+ alley_2/frame_0017_img.png
68
+ alley_2/frame_0018_img.png
69
+ alley_2/frame_0019_img.png
70
+ alley_2/frame_0020_img.png
71
+ alley_2/frame_0021_img.png
72
+ alley_2/frame_0022_img.png
73
+ alley_2/frame_0023_img.png
74
+ alley_2/frame_0024_img.png
75
+ alley_2/frame_0025_img.png
76
+ alley_2/frame_0026_img.png
77
+ alley_2/frame_0027_img.png
78
+ alley_2/frame_0028_img.png
79
+ alley_2/frame_0029_img.png
80
+ alley_2/frame_0030_img.png
81
+ alley_2/frame_0031_img.png
82
+ alley_2/frame_0032_img.png
83
+ alley_2/frame_0033_img.png
84
+ alley_2/frame_0034_img.png
85
+ alley_2/frame_0035_img.png
86
+ alley_2/frame_0036_img.png
87
+ alley_2/frame_0037_img.png
88
+ alley_2/frame_0038_img.png
89
+ alley_2/frame_0039_img.png
90
+ alley_2/frame_0040_img.png
91
+ alley_2/frame_0041_img.png
92
+ alley_2/frame_0042_img.png
93
+ alley_2/frame_0043_img.png
94
+ alley_2/frame_0044_img.png
95
+ alley_2/frame_0045_img.png
96
+ alley_2/frame_0046_img.png
97
+ alley_2/frame_0047_img.png
98
+ alley_2/frame_0048_img.png
99
+ alley_2/frame_0049_img.png
100
+ alley_2/frame_0050_img.png
101
+ ambush_2/frame_0001_img.png
102
+ ambush_2/frame_0002_img.png
103
+ ambush_2/frame_0003_img.png
104
+ ambush_2/frame_0004_img.png
105
+ ambush_2/frame_0005_img.png
106
+ ambush_2/frame_0006_img.png
107
+ ambush_2/frame_0007_img.png
108
+ ambush_2/frame_0008_img.png
109
+ ambush_2/frame_0009_img.png
110
+ ambush_2/frame_0010_img.png
111
+ ambush_2/frame_0011_img.png
112
+ ambush_2/frame_0012_img.png
113
+ ambush_2/frame_0013_img.png
114
+ ambush_2/frame_0014_img.png
115
+ ambush_2/frame_0015_img.png
116
+ ambush_2/frame_0016_img.png
117
+ ambush_2/frame_0017_img.png
118
+ ambush_2/frame_0018_img.png
119
+ ambush_2/frame_0019_img.png
120
+ ambush_2/frame_0020_img.png
121
+ ambush_2/frame_0021_img.png
122
+ ambush_4/frame_0001_img.png
123
+ ambush_4/frame_0002_img.png
124
+ ambush_4/frame_0003_img.png
125
+ ambush_4/frame_0004_img.png
126
+ ambush_4/frame_0005_img.png
127
+ ambush_4/frame_0006_img.png
128
+ ambush_4/frame_0007_img.png
129
+ ambush_4/frame_0008_img.png
130
+ ambush_4/frame_0009_img.png
131
+ ambush_4/frame_0010_img.png
132
+ ambush_4/frame_0011_img.png
133
+ ambush_4/frame_0012_img.png
134
+ ambush_4/frame_0013_img.png
135
+ ambush_4/frame_0014_img.png
136
+ ambush_4/frame_0015_img.png
137
+ ambush_4/frame_0016_img.png
138
+ ambush_4/frame_0017_img.png
139
+ ambush_4/frame_0018_img.png
140
+ ambush_4/frame_0019_img.png
141
+ ambush_4/frame_0020_img.png
142
+ ambush_4/frame_0021_img.png
143
+ ambush_4/frame_0022_img.png
144
+ ambush_4/frame_0023_img.png
145
+ ambush_4/frame_0024_img.png
146
+ ambush_4/frame_0025_img.png
147
+ ambush_4/frame_0026_img.png
148
+ ambush_4/frame_0027_img.png
149
+ ambush_4/frame_0028_img.png
150
+ ambush_4/frame_0029_img.png
151
+ ambush_4/frame_0030_img.png
152
+ ambush_4/frame_0031_img.png
153
+ ambush_4/frame_0032_img.png
154
+ ambush_4/frame_0033_img.png
155
+ ambush_5/frame_0001_img.png
156
+ ambush_5/frame_0002_img.png
157
+ ambush_5/frame_0003_img.png
158
+ ambush_5/frame_0004_img.png
159
+ ambush_5/frame_0005_img.png
160
+ ambush_5/frame_0006_img.png
161
+ ambush_5/frame_0007_img.png
162
+ ambush_5/frame_0008_img.png
163
+ ambush_5/frame_0009_img.png
164
+ ambush_5/frame_0010_img.png
165
+ ambush_5/frame_0011_img.png
166
+ ambush_5/frame_0012_img.png
167
+ ambush_5/frame_0013_img.png
168
+ ambush_5/frame_0014_img.png
169
+ ambush_5/frame_0015_img.png
170
+ ambush_5/frame_0016_img.png
171
+ ambush_5/frame_0017_img.png
172
+ ambush_5/frame_0018_img.png
173
+ ambush_5/frame_0019_img.png
174
+ ambush_5/frame_0020_img.png
175
+ ambush_5/frame_0021_img.png
176
+ ambush_5/frame_0022_img.png
177
+ ambush_5/frame_0023_img.png
178
+ ambush_5/frame_0024_img.png
179
+ ambush_5/frame_0025_img.png
180
+ ambush_5/frame_0026_img.png
181
+ ambush_5/frame_0027_img.png
182
+ ambush_5/frame_0028_img.png
183
+ ambush_5/frame_0029_img.png
184
+ ambush_5/frame_0030_img.png
185
+ ambush_5/frame_0031_img.png
186
+ ambush_5/frame_0032_img.png
187
+ ambush_5/frame_0033_img.png
188
+ ambush_5/frame_0034_img.png
189
+ ambush_5/frame_0035_img.png
190
+ ambush_5/frame_0036_img.png
191
+ ambush_5/frame_0037_img.png
192
+ ambush_5/frame_0038_img.png
193
+ ambush_5/frame_0039_img.png
194
+ ambush_5/frame_0040_img.png
195
+ ambush_5/frame_0041_img.png
196
+ ambush_5/frame_0042_img.png
197
+ ambush_5/frame_0043_img.png
198
+ ambush_5/frame_0044_img.png
199
+ ambush_5/frame_0045_img.png
200
+ ambush_5/frame_0046_img.png
201
+ ambush_5/frame_0047_img.png
202
+ ambush_5/frame_0048_img.png
203
+ ambush_5/frame_0049_img.png
204
+ ambush_5/frame_0050_img.png
205
+ ambush_6/frame_0001_img.png
206
+ ambush_6/frame_0002_img.png
207
+ ambush_6/frame_0003_img.png
208
+ ambush_6/frame_0004_img.png
209
+ ambush_6/frame_0005_img.png
210
+ ambush_6/frame_0006_img.png
211
+ ambush_6/frame_0007_img.png
212
+ ambush_6/frame_0008_img.png
213
+ ambush_6/frame_0009_img.png
214
+ ambush_6/frame_0010_img.png
215
+ ambush_6/frame_0011_img.png
216
+ ambush_6/frame_0012_img.png
217
+ ambush_6/frame_0013_img.png
218
+ ambush_6/frame_0014_img.png
219
+ ambush_6/frame_0015_img.png
220
+ ambush_6/frame_0016_img.png
221
+ ambush_6/frame_0017_img.png
222
+ ambush_6/frame_0018_img.png
223
+ ambush_6/frame_0019_img.png
224
+ ambush_6/frame_0020_img.png
225
+ ambush_7/frame_0001_img.png
226
+ ambush_7/frame_0002_img.png
227
+ ambush_7/frame_0003_img.png
228
+ ambush_7/frame_0004_img.png
229
+ ambush_7/frame_0005_img.png
230
+ ambush_7/frame_0006_img.png
231
+ ambush_7/frame_0007_img.png
232
+ ambush_7/frame_0008_img.png
233
+ ambush_7/frame_0009_img.png
234
+ ambush_7/frame_0010_img.png
235
+ ambush_7/frame_0011_img.png
236
+ ambush_7/frame_0012_img.png
237
+ ambush_7/frame_0013_img.png
238
+ ambush_7/frame_0014_img.png
239
+ ambush_7/frame_0015_img.png
240
+ ambush_7/frame_0016_img.png
241
+ ambush_7/frame_0017_img.png
242
+ ambush_7/frame_0018_img.png
243
+ ambush_7/frame_0019_img.png
244
+ ambush_7/frame_0020_img.png
245
+ ambush_7/frame_0021_img.png
246
+ ambush_7/frame_0022_img.png
247
+ ambush_7/frame_0023_img.png
248
+ ambush_7/frame_0024_img.png
249
+ ambush_7/frame_0025_img.png
250
+ ambush_7/frame_0026_img.png
251
+ ambush_7/frame_0027_img.png
252
+ ambush_7/frame_0028_img.png
253
+ ambush_7/frame_0029_img.png
254
+ ambush_7/frame_0030_img.png
255
+ ambush_7/frame_0031_img.png
256
+ ambush_7/frame_0032_img.png
257
+ ambush_7/frame_0033_img.png
258
+ ambush_7/frame_0034_img.png
259
+ ambush_7/frame_0035_img.png
260
+ ambush_7/frame_0036_img.png
261
+ ambush_7/frame_0037_img.png
262
+ ambush_7/frame_0038_img.png
263
+ ambush_7/frame_0039_img.png
264
+ ambush_7/frame_0040_img.png
265
+ ambush_7/frame_0041_img.png
266
+ ambush_7/frame_0042_img.png
267
+ ambush_7/frame_0043_img.png
268
+ ambush_7/frame_0044_img.png
269
+ ambush_7/frame_0045_img.png
270
+ ambush_7/frame_0046_img.png
271
+ ambush_7/frame_0047_img.png
272
+ ambush_7/frame_0048_img.png
273
+ ambush_7/frame_0049_img.png
274
+ ambush_7/frame_0050_img.png
275
+ bamboo_1/frame_0001_img.png
276
+ bamboo_1/frame_0002_img.png
277
+ bamboo_1/frame_0003_img.png
278
+ bamboo_1/frame_0004_img.png
279
+ bamboo_1/frame_0005_img.png
280
+ bamboo_1/frame_0006_img.png
281
+ bamboo_1/frame_0007_img.png
282
+ bamboo_1/frame_0008_img.png
283
+ bamboo_1/frame_0009_img.png
284
+ bamboo_1/frame_0010_img.png
285
+ bamboo_1/frame_0011_img.png
286
+ bamboo_1/frame_0012_img.png
287
+ bamboo_1/frame_0013_img.png
288
+ bamboo_1/frame_0014_img.png
289
+ bamboo_1/frame_0015_img.png
290
+ bamboo_1/frame_0016_img.png
291
+ bamboo_1/frame_0017_img.png
292
+ bamboo_1/frame_0018_img.png
293
+ bamboo_1/frame_0019_img.png
294
+ bamboo_1/frame_0020_img.png
295
+ bamboo_1/frame_0021_img.png
296
+ bamboo_1/frame_0022_img.png
297
+ bamboo_1/frame_0023_img.png
298
+ bamboo_1/frame_0024_img.png
299
+ bamboo_1/frame_0025_img.png
300
+ bamboo_1/frame_0026_img.png
301
+ bamboo_1/frame_0027_img.png
302
+ bamboo_1/frame_0028_img.png
303
+ bamboo_1/frame_0029_img.png
304
+ bamboo_1/frame_0030_img.png
305
+ bamboo_1/frame_0031_img.png
306
+ bamboo_1/frame_0032_img.png
307
+ bamboo_1/frame_0033_img.png
308
+ bamboo_1/frame_0034_img.png
309
+ bamboo_1/frame_0035_img.png
310
+ bamboo_1/frame_0036_img.png
311
+ bamboo_1/frame_0037_img.png
312
+ bamboo_1/frame_0038_img.png
313
+ bamboo_1/frame_0039_img.png
314
+ bamboo_1/frame_0040_img.png
315
+ bamboo_1/frame_0041_img.png
316
+ bamboo_1/frame_0042_img.png
317
+ bamboo_1/frame_0043_img.png
318
+ bamboo_1/frame_0044_img.png
319
+ bamboo_1/frame_0045_img.png
320
+ bamboo_1/frame_0046_img.png
321
+ bamboo_1/frame_0047_img.png
322
+ bamboo_1/frame_0048_img.png
323
+ bamboo_1/frame_0049_img.png
324
+ bamboo_1/frame_0050_img.png
325
+ bamboo_2/frame_0001_img.png
326
+ bamboo_2/frame_0002_img.png
327
+ bamboo_2/frame_0003_img.png
328
+ bamboo_2/frame_0004_img.png
329
+ bamboo_2/frame_0005_img.png
330
+ bamboo_2/frame_0006_img.png
331
+ bamboo_2/frame_0007_img.png
332
+ bamboo_2/frame_0008_img.png
333
+ bamboo_2/frame_0009_img.png
334
+ bamboo_2/frame_0010_img.png
335
+ bamboo_2/frame_0011_img.png
336
+ bamboo_2/frame_0012_img.png
337
+ bamboo_2/frame_0013_img.png
338
+ bamboo_2/frame_0014_img.png
339
+ bamboo_2/frame_0015_img.png
340
+ bamboo_2/frame_0016_img.png
341
+ bamboo_2/frame_0017_img.png
342
+ bamboo_2/frame_0018_img.png
343
+ bamboo_2/frame_0019_img.png
344
+ bamboo_2/frame_0020_img.png
345
+ bamboo_2/frame_0021_img.png
346
+ bamboo_2/frame_0022_img.png
347
+ bamboo_2/frame_0023_img.png
348
+ bamboo_2/frame_0024_img.png
349
+ bamboo_2/frame_0025_img.png
350
+ bamboo_2/frame_0026_img.png
351
+ bamboo_2/frame_0027_img.png
352
+ bamboo_2/frame_0028_img.png
353
+ bamboo_2/frame_0029_img.png
354
+ bamboo_2/frame_0030_img.png
355
+ bamboo_2/frame_0031_img.png
356
+ bamboo_2/frame_0032_img.png
357
+ bamboo_2/frame_0033_img.png
358
+ bamboo_2/frame_0034_img.png
359
+ bamboo_2/frame_0035_img.png
360
+ bamboo_2/frame_0036_img.png
361
+ bamboo_2/frame_0037_img.png
362
+ bamboo_2/frame_0038_img.png
363
+ bamboo_2/frame_0039_img.png
364
+ bamboo_2/frame_0040_img.png
365
+ bamboo_2/frame_0041_img.png
366
+ bamboo_2/frame_0042_img.png
367
+ bamboo_2/frame_0043_img.png
368
+ bamboo_2/frame_0044_img.png
369
+ bamboo_2/frame_0045_img.png
370
+ bamboo_2/frame_0046_img.png
371
+ bamboo_2/frame_0047_img.png
372
+ bamboo_2/frame_0048_img.png
373
+ bamboo_2/frame_0049_img.png
374
+ bamboo_2/frame_0050_img.png
375
+ bandage_1/frame_0001_img.png
376
+ bandage_1/frame_0002_img.png
377
+ bandage_1/frame_0003_img.png
378
+ bandage_1/frame_0004_img.png
379
+ bandage_1/frame_0005_img.png
380
+ bandage_1/frame_0006_img.png
381
+ bandage_1/frame_0007_img.png
382
+ bandage_1/frame_0008_img.png
383
+ bandage_1/frame_0009_img.png
384
+ bandage_1/frame_0010_img.png
385
+ bandage_1/frame_0011_img.png
386
+ bandage_1/frame_0012_img.png
387
+ bandage_1/frame_0013_img.png
388
+ bandage_1/frame_0014_img.png
389
+ bandage_1/frame_0015_img.png
390
+ bandage_1/frame_0016_img.png
391
+ bandage_1/frame_0017_img.png
392
+ bandage_1/frame_0018_img.png
393
+ bandage_1/frame_0019_img.png
394
+ bandage_1/frame_0020_img.png
395
+ bandage_1/frame_0021_img.png
396
+ bandage_1/frame_0022_img.png
397
+ bandage_1/frame_0023_img.png
398
+ bandage_1/frame_0024_img.png
399
+ bandage_1/frame_0025_img.png
400
+ bandage_1/frame_0026_img.png
401
+ bandage_1/frame_0027_img.png
402
+ bandage_1/frame_0028_img.png
403
+ bandage_1/frame_0029_img.png
404
+ bandage_1/frame_0030_img.png
405
+ bandage_1/frame_0031_img.png
406
+ bandage_1/frame_0032_img.png
407
+ bandage_1/frame_0033_img.png
408
+ bandage_1/frame_0034_img.png
409
+ bandage_1/frame_0035_img.png
410
+ bandage_1/frame_0036_img.png
411
+ bandage_1/frame_0037_img.png
412
+ bandage_1/frame_0038_img.png
413
+ bandage_1/frame_0039_img.png
414
+ bandage_1/frame_0040_img.png
415
+ bandage_1/frame_0041_img.png
416
+ bandage_1/frame_0042_img.png
417
+ bandage_1/frame_0043_img.png
418
+ bandage_1/frame_0044_img.png
419
+ bandage_1/frame_0045_img.png
420
+ bandage_1/frame_0046_img.png
421
+ bandage_1/frame_0047_img.png
422
+ bandage_1/frame_0048_img.png
423
+ bandage_1/frame_0049_img.png
424
+ bandage_1/frame_0050_img.png
425
+ bandage_2/frame_0001_img.png
426
+ bandage_2/frame_0002_img.png
427
+ bandage_2/frame_0003_img.png
428
+ bandage_2/frame_0004_img.png
429
+ bandage_2/frame_0005_img.png
430
+ bandage_2/frame_0006_img.png
431
+ bandage_2/frame_0007_img.png
432
+ bandage_2/frame_0008_img.png
433
+ bandage_2/frame_0009_img.png
434
+ bandage_2/frame_0010_img.png
435
+ bandage_2/frame_0011_img.png
436
+ bandage_2/frame_0012_img.png
437
+ bandage_2/frame_0013_img.png
438
+ bandage_2/frame_0014_img.png
439
+ bandage_2/frame_0015_img.png
440
+ bandage_2/frame_0016_img.png
441
+ bandage_2/frame_0017_img.png
442
+ bandage_2/frame_0018_img.png
443
+ bandage_2/frame_0019_img.png
444
+ bandage_2/frame_0020_img.png
445
+ bandage_2/frame_0021_img.png
446
+ bandage_2/frame_0022_img.png
447
+ bandage_2/frame_0023_img.png
448
+ bandage_2/frame_0024_img.png
449
+ bandage_2/frame_0025_img.png
450
+ bandage_2/frame_0026_img.png
451
+ bandage_2/frame_0027_img.png
452
+ bandage_2/frame_0028_img.png
453
+ bandage_2/frame_0029_img.png
454
+ bandage_2/frame_0030_img.png
455
+ bandage_2/frame_0031_img.png
456
+ bandage_2/frame_0032_img.png
457
+ bandage_2/frame_0033_img.png
458
+ bandage_2/frame_0034_img.png
459
+ bandage_2/frame_0035_img.png
460
+ bandage_2/frame_0036_img.png
461
+ bandage_2/frame_0037_img.png
462
+ bandage_2/frame_0038_img.png
463
+ bandage_2/frame_0039_img.png
464
+ bandage_2/frame_0040_img.png
465
+ bandage_2/frame_0041_img.png
466
+ bandage_2/frame_0042_img.png
467
+ bandage_2/frame_0043_img.png
468
+ bandage_2/frame_0044_img.png
469
+ bandage_2/frame_0045_img.png
470
+ bandage_2/frame_0046_img.png
471
+ bandage_2/frame_0047_img.png
472
+ bandage_2/frame_0048_img.png
473
+ bandage_2/frame_0049_img.png
474
+ bandage_2/frame_0050_img.png
475
+ cave_2/frame_0001_img.png
476
+ cave_2/frame_0002_img.png
477
+ cave_2/frame_0003_img.png
478
+ cave_2/frame_0004_img.png
479
+ cave_2/frame_0005_img.png
480
+ cave_2/frame_0006_img.png
481
+ cave_2/frame_0007_img.png
482
+ cave_2/frame_0008_img.png
483
+ cave_2/frame_0009_img.png
484
+ cave_2/frame_0010_img.png
485
+ cave_2/frame_0011_img.png
486
+ cave_2/frame_0012_img.png
487
+ cave_2/frame_0013_img.png
488
+ cave_2/frame_0014_img.png
489
+ cave_2/frame_0015_img.png
490
+ cave_2/frame_0016_img.png
491
+ cave_2/frame_0017_img.png
492
+ cave_2/frame_0018_img.png
493
+ cave_2/frame_0019_img.png
494
+ cave_2/frame_0020_img.png
495
+ cave_2/frame_0021_img.png
496
+ cave_2/frame_0022_img.png
497
+ cave_2/frame_0023_img.png
498
+ cave_2/frame_0024_img.png
499
+ cave_2/frame_0025_img.png
500
+ cave_2/frame_0026_img.png
501
+ cave_2/frame_0027_img.png
502
+ cave_2/frame_0028_img.png
503
+ cave_2/frame_0029_img.png
504
+ cave_2/frame_0030_img.png
505
+ cave_2/frame_0031_img.png
506
+ cave_2/frame_0032_img.png
507
+ cave_2/frame_0033_img.png
508
+ cave_2/frame_0034_img.png
509
+ cave_2/frame_0035_img.png
510
+ cave_2/frame_0036_img.png
511
+ cave_2/frame_0037_img.png
512
+ cave_2/frame_0038_img.png
513
+ cave_2/frame_0039_img.png
514
+ cave_2/frame_0040_img.png
515
+ cave_2/frame_0041_img.png
516
+ cave_2/frame_0042_img.png
517
+ cave_2/frame_0043_img.png
518
+ cave_2/frame_0044_img.png
519
+ cave_2/frame_0045_img.png
520
+ cave_2/frame_0046_img.png
521
+ cave_2/frame_0047_img.png
522
+ cave_2/frame_0048_img.png
523
+ cave_2/frame_0049_img.png
524
+ cave_2/frame_0050_img.png
525
+ cave_4/frame_0001_img.png
526
+ cave_4/frame_0002_img.png
527
+ cave_4/frame_0003_img.png
528
+ cave_4/frame_0004_img.png
529
+ cave_4/frame_0005_img.png
530
+ cave_4/frame_0006_img.png
531
+ cave_4/frame_0007_img.png
532
+ cave_4/frame_0008_img.png
533
+ cave_4/frame_0009_img.png
534
+ cave_4/frame_0010_img.png
535
+ cave_4/frame_0011_img.png
536
+ cave_4/frame_0012_img.png
537
+ cave_4/frame_0013_img.png
538
+ cave_4/frame_0014_img.png
539
+ cave_4/frame_0015_img.png
540
+ cave_4/frame_0016_img.png
541
+ cave_4/frame_0017_img.png
542
+ cave_4/frame_0018_img.png
543
+ cave_4/frame_0019_img.png
544
+ cave_4/frame_0020_img.png
545
+ cave_4/frame_0021_img.png
546
+ cave_4/frame_0022_img.png
547
+ cave_4/frame_0023_img.png
548
+ cave_4/frame_0024_img.png
549
+ cave_4/frame_0025_img.png
550
+ cave_4/frame_0026_img.png
551
+ cave_4/frame_0027_img.png
552
+ cave_4/frame_0028_img.png
553
+ cave_4/frame_0029_img.png
554
+ cave_4/frame_0030_img.png
555
+ cave_4/frame_0031_img.png
556
+ cave_4/frame_0032_img.png
557
+ cave_4/frame_0033_img.png
558
+ cave_4/frame_0034_img.png
559
+ cave_4/frame_0035_img.png
560
+ cave_4/frame_0036_img.png
561
+ cave_4/frame_0037_img.png
562
+ cave_4/frame_0038_img.png
563
+ cave_4/frame_0039_img.png
564
+ cave_4/frame_0040_img.png
565
+ cave_4/frame_0041_img.png
566
+ cave_4/frame_0042_img.png
567
+ cave_4/frame_0043_img.png
568
+ cave_4/frame_0044_img.png
569
+ cave_4/frame_0045_img.png
570
+ cave_4/frame_0046_img.png
571
+ cave_4/frame_0047_img.png
572
+ cave_4/frame_0048_img.png
573
+ cave_4/frame_0049_img.png
574
+ cave_4/frame_0050_img.png
575
+ market_2/frame_0001_img.png
576
+ market_2/frame_0002_img.png
577
+ market_2/frame_0003_img.png
578
+ market_2/frame_0004_img.png
579
+ market_2/frame_0005_img.png
580
+ market_2/frame_0006_img.png
581
+ market_2/frame_0007_img.png
582
+ market_2/frame_0008_img.png
583
+ market_2/frame_0009_img.png
584
+ market_2/frame_0010_img.png
585
+ market_2/frame_0011_img.png
586
+ market_2/frame_0012_img.png
587
+ market_2/frame_0013_img.png
588
+ market_2/frame_0014_img.png
589
+ market_2/frame_0015_img.png
590
+ market_2/frame_0016_img.png
591
+ market_2/frame_0017_img.png
592
+ market_2/frame_0018_img.png
593
+ market_2/frame_0019_img.png
594
+ market_2/frame_0020_img.png
595
+ market_2/frame_0021_img.png
596
+ market_2/frame_0022_img.png
597
+ market_2/frame_0023_img.png
598
+ market_2/frame_0024_img.png
599
+ market_2/frame_0025_img.png
600
+ market_2/frame_0026_img.png
601
+ market_2/frame_0027_img.png
602
+ market_2/frame_0028_img.png
603
+ market_2/frame_0029_img.png
604
+ market_2/frame_0030_img.png
605
+ market_2/frame_0031_img.png
606
+ market_2/frame_0032_img.png
607
+ market_2/frame_0033_img.png
608
+ market_2/frame_0034_img.png
609
+ market_2/frame_0035_img.png
610
+ market_2/frame_0036_img.png
611
+ market_2/frame_0037_img.png
612
+ market_2/frame_0038_img.png
613
+ market_2/frame_0039_img.png
614
+ market_2/frame_0040_img.png
615
+ market_2/frame_0041_img.png
616
+ market_2/frame_0042_img.png
617
+ market_2/frame_0043_img.png
618
+ market_2/frame_0044_img.png
619
+ market_2/frame_0045_img.png
620
+ market_2/frame_0046_img.png
621
+ market_2/frame_0047_img.png
622
+ market_2/frame_0048_img.png
623
+ market_2/frame_0049_img.png
624
+ market_2/frame_0050_img.png
625
+ market_5/frame_0001_img.png
626
+ market_5/frame_0002_img.png
627
+ market_5/frame_0003_img.png
628
+ market_5/frame_0004_img.png
629
+ market_5/frame_0005_img.png
630
+ market_5/frame_0006_img.png
631
+ market_5/frame_0007_img.png
632
+ market_5/frame_0008_img.png
633
+ market_5/frame_0009_img.png
634
+ market_5/frame_0010_img.png
635
+ market_5/frame_0011_img.png
636
+ market_5/frame_0012_img.png
637
+ market_5/frame_0013_img.png
638
+ market_5/frame_0014_img.png
639
+ market_5/frame_0015_img.png
640
+ market_5/frame_0016_img.png
641
+ market_5/frame_0017_img.png
642
+ market_5/frame_0018_img.png
643
+ market_5/frame_0019_img.png
644
+ market_5/frame_0020_img.png
645
+ market_5/frame_0021_img.png
646
+ market_5/frame_0022_img.png
647
+ market_5/frame_0023_img.png
648
+ market_5/frame_0024_img.png
649
+ market_5/frame_0025_img.png
650
+ market_5/frame_0026_img.png
651
+ market_5/frame_0027_img.png
652
+ market_5/frame_0028_img.png
653
+ market_5/frame_0029_img.png
654
+ market_5/frame_0030_img.png
655
+ market_5/frame_0031_img.png
656
+ market_5/frame_0032_img.png
657
+ market_5/frame_0033_img.png
658
+ market_5/frame_0034_img.png
659
+ market_5/frame_0035_img.png
660
+ market_5/frame_0036_img.png
661
+ market_5/frame_0037_img.png
662
+ market_5/frame_0038_img.png
663
+ market_5/frame_0039_img.png
664
+ market_5/frame_0040_img.png
665
+ market_5/frame_0041_img.png
666
+ market_5/frame_0042_img.png
667
+ market_5/frame_0043_img.png
668
+ market_5/frame_0044_img.png
669
+ market_5/frame_0045_img.png
670
+ market_5/frame_0046_img.png
671
+ market_5/frame_0047_img.png
672
+ market_5/frame_0048_img.png
673
+ market_5/frame_0049_img.png
674
+ market_5/frame_0050_img.png
675
+ market_6/frame_0001_img.png
676
+ market_6/frame_0002_img.png
677
+ market_6/frame_0003_img.png
678
+ market_6/frame_0004_img.png
679
+ market_6/frame_0005_img.png
680
+ market_6/frame_0006_img.png
681
+ market_6/frame_0007_img.png
682
+ market_6/frame_0008_img.png
683
+ market_6/frame_0009_img.png
684
+ market_6/frame_0010_img.png
685
+ market_6/frame_0011_img.png
686
+ market_6/frame_0012_img.png
687
+ market_6/frame_0013_img.png
688
+ market_6/frame_0014_img.png
689
+ market_6/frame_0015_img.png
690
+ market_6/frame_0016_img.png
691
+ market_6/frame_0017_img.png
692
+ market_6/frame_0018_img.png
693
+ market_6/frame_0019_img.png
694
+ market_6/frame_0020_img.png
695
+ market_6/frame_0021_img.png
696
+ market_6/frame_0022_img.png
697
+ market_6/frame_0023_img.png
698
+ market_6/frame_0024_img.png
699
+ market_6/frame_0025_img.png
700
+ market_6/frame_0026_img.png
701
+ market_6/frame_0027_img.png
702
+ market_6/frame_0028_img.png
703
+ market_6/frame_0029_img.png
704
+ market_6/frame_0030_img.png
705
+ market_6/frame_0031_img.png
706
+ market_6/frame_0032_img.png
707
+ market_6/frame_0033_img.png
708
+ market_6/frame_0034_img.png
709
+ market_6/frame_0035_img.png
710
+ market_6/frame_0036_img.png
711
+ market_6/frame_0037_img.png
712
+ market_6/frame_0038_img.png
713
+ market_6/frame_0039_img.png
714
+ market_6/frame_0040_img.png
715
+ mountain_1/frame_0001_img.png
716
+ mountain_1/frame_0002_img.png
717
+ mountain_1/frame_0003_img.png
718
+ mountain_1/frame_0004_img.png
719
+ mountain_1/frame_0005_img.png
720
+ mountain_1/frame_0006_img.png
721
+ mountain_1/frame_0007_img.png
722
+ mountain_1/frame_0008_img.png
723
+ mountain_1/frame_0009_img.png
724
+ mountain_1/frame_0010_img.png
725
+ mountain_1/frame_0011_img.png
726
+ mountain_1/frame_0012_img.png
727
+ mountain_1/frame_0013_img.png
728
+ mountain_1/frame_0014_img.png
729
+ mountain_1/frame_0015_img.png
730
+ mountain_1/frame_0016_img.png
731
+ mountain_1/frame_0017_img.png
732
+ mountain_1/frame_0018_img.png
733
+ mountain_1/frame_0019_img.png
734
+ mountain_1/frame_0020_img.png
735
+ mountain_1/frame_0021_img.png
736
+ mountain_1/frame_0022_img.png
737
+ mountain_1/frame_0023_img.png
738
+ mountain_1/frame_0024_img.png
739
+ mountain_1/frame_0025_img.png
740
+ mountain_1/frame_0026_img.png
741
+ mountain_1/frame_0027_img.png
742
+ mountain_1/frame_0028_img.png
743
+ mountain_1/frame_0029_img.png
744
+ mountain_1/frame_0030_img.png
745
+ mountain_1/frame_0031_img.png
746
+ mountain_1/frame_0032_img.png
747
+ mountain_1/frame_0033_img.png
748
+ mountain_1/frame_0034_img.png
749
+ mountain_1/frame_0035_img.png
750
+ mountain_1/frame_0036_img.png
751
+ mountain_1/frame_0037_img.png
752
+ mountain_1/frame_0038_img.png
753
+ mountain_1/frame_0039_img.png
754
+ mountain_1/frame_0040_img.png
755
+ mountain_1/frame_0041_img.png
756
+ mountain_1/frame_0042_img.png
757
+ mountain_1/frame_0043_img.png
758
+ mountain_1/frame_0044_img.png
759
+ mountain_1/frame_0045_img.png
760
+ mountain_1/frame_0046_img.png
761
+ mountain_1/frame_0047_img.png
762
+ mountain_1/frame_0048_img.png
763
+ mountain_1/frame_0049_img.png
764
+ mountain_1/frame_0050_img.png
765
+ shaman_2/frame_0001_img.png
766
+ shaman_2/frame_0002_img.png
767
+ shaman_2/frame_0003_img.png
768
+ shaman_2/frame_0004_img.png
769
+ shaman_2/frame_0005_img.png
770
+ shaman_2/frame_0006_img.png
771
+ shaman_2/frame_0007_img.png
772
+ shaman_2/frame_0008_img.png
773
+ shaman_2/frame_0009_img.png
774
+ shaman_2/frame_0010_img.png
775
+ shaman_2/frame_0011_img.png
776
+ shaman_2/frame_0012_img.png
777
+ shaman_2/frame_0013_img.png
778
+ shaman_2/frame_0014_img.png
779
+ shaman_2/frame_0015_img.png
780
+ shaman_2/frame_0016_img.png
781
+ shaman_2/frame_0017_img.png
782
+ shaman_2/frame_0018_img.png
783
+ shaman_2/frame_0019_img.png
784
+ shaman_2/frame_0020_img.png
785
+ shaman_2/frame_0021_img.png
786
+ shaman_2/frame_0022_img.png
787
+ shaman_2/frame_0023_img.png
788
+ shaman_2/frame_0024_img.png
789
+ shaman_2/frame_0025_img.png
790
+ shaman_2/frame_0026_img.png
791
+ shaman_2/frame_0027_img.png
792
+ shaman_2/frame_0028_img.png
793
+ shaman_2/frame_0029_img.png
794
+ shaman_2/frame_0030_img.png
795
+ shaman_2/frame_0031_img.png
796
+ shaman_2/frame_0032_img.png
797
+ shaman_2/frame_0033_img.png
798
+ shaman_2/frame_0034_img.png
799
+ shaman_2/frame_0035_img.png
800
+ shaman_2/frame_0036_img.png
801
+ shaman_2/frame_0037_img.png
802
+ shaman_2/frame_0038_img.png
803
+ shaman_2/frame_0039_img.png
804
+ shaman_2/frame_0040_img.png
805
+ shaman_2/frame_0041_img.png
806
+ shaman_2/frame_0042_img.png
807
+ shaman_2/frame_0043_img.png
808
+ shaman_2/frame_0044_img.png
809
+ shaman_2/frame_0045_img.png
810
+ shaman_2/frame_0046_img.png
811
+ shaman_2/frame_0047_img.png
812
+ shaman_2/frame_0048_img.png
813
+ shaman_2/frame_0049_img.png
814
+ shaman_2/frame_0050_img.png
815
+ shaman_3/frame_0001_img.png
816
+ shaman_3/frame_0002_img.png
817
+ shaman_3/frame_0003_img.png
818
+ shaman_3/frame_0004_img.png
819
+ shaman_3/frame_0005_img.png
820
+ shaman_3/frame_0006_img.png
821
+ shaman_3/frame_0007_img.png
822
+ shaman_3/frame_0008_img.png
823
+ shaman_3/frame_0009_img.png
824
+ shaman_3/frame_0010_img.png
825
+ shaman_3/frame_0011_img.png
826
+ shaman_3/frame_0012_img.png
827
+ shaman_3/frame_0013_img.png
828
+ shaman_3/frame_0014_img.png
829
+ shaman_3/frame_0015_img.png
830
+ shaman_3/frame_0016_img.png
831
+ shaman_3/frame_0017_img.png
832
+ shaman_3/frame_0018_img.png
833
+ shaman_3/frame_0019_img.png
834
+ shaman_3/frame_0020_img.png
835
+ shaman_3/frame_0021_img.png
836
+ shaman_3/frame_0022_img.png
837
+ shaman_3/frame_0023_img.png
838
+ shaman_3/frame_0024_img.png
839
+ shaman_3/frame_0025_img.png
840
+ shaman_3/frame_0026_img.png
841
+ shaman_3/frame_0027_img.png
842
+ shaman_3/frame_0028_img.png
843
+ shaman_3/frame_0029_img.png
844
+ shaman_3/frame_0030_img.png
845
+ shaman_3/frame_0031_img.png
846
+ shaman_3/frame_0032_img.png
847
+ shaman_3/frame_0033_img.png
848
+ shaman_3/frame_0034_img.png
849
+ shaman_3/frame_0035_img.png
850
+ shaman_3/frame_0036_img.png
851
+ shaman_3/frame_0037_img.png
852
+ shaman_3/frame_0038_img.png
853
+ shaman_3/frame_0039_img.png
854
+ shaman_3/frame_0040_img.png
855
+ shaman_3/frame_0041_img.png
856
+ shaman_3/frame_0042_img.png
857
+ shaman_3/frame_0043_img.png
858
+ shaman_3/frame_0044_img.png
859
+ shaman_3/frame_0045_img.png
860
+ shaman_3/frame_0046_img.png
861
+ shaman_3/frame_0047_img.png
862
+ shaman_3/frame_0048_img.png
863
+ shaman_3/frame_0049_img.png
864
+ shaman_3/frame_0050_img.png
865
+ sleeping_1/frame_0001_img.png
866
+ sleeping_1/frame_0002_img.png
867
+ sleeping_1/frame_0003_img.png
868
+ sleeping_1/frame_0004_img.png
869
+ sleeping_1/frame_0005_img.png
870
+ sleeping_1/frame_0006_img.png
871
+ sleeping_1/frame_0007_img.png
872
+ sleeping_1/frame_0008_img.png
873
+ sleeping_1/frame_0009_img.png
874
+ sleeping_1/frame_0010_img.png
875
+ sleeping_1/frame_0011_img.png
876
+ sleeping_1/frame_0012_img.png
877
+ sleeping_1/frame_0013_img.png
878
+ sleeping_1/frame_0014_img.png
879
+ sleeping_1/frame_0015_img.png
880
+ sleeping_1/frame_0016_img.png
881
+ sleeping_1/frame_0017_img.png
882
+ sleeping_1/frame_0018_img.png
883
+ sleeping_1/frame_0019_img.png
884
+ sleeping_1/frame_0020_img.png
885
+ sleeping_1/frame_0021_img.png
886
+ sleeping_1/frame_0022_img.png
887
+ sleeping_1/frame_0023_img.png
888
+ sleeping_1/frame_0024_img.png
889
+ sleeping_1/frame_0025_img.png
890
+ sleeping_1/frame_0026_img.png
891
+ sleeping_1/frame_0027_img.png
892
+ sleeping_1/frame_0028_img.png
893
+ sleeping_1/frame_0029_img.png
894
+ sleeping_1/frame_0030_img.png
895
+ sleeping_1/frame_0031_img.png
896
+ sleeping_1/frame_0032_img.png
897
+ sleeping_1/frame_0033_img.png
898
+ sleeping_1/frame_0034_img.png
899
+ sleeping_1/frame_0035_img.png
900
+ sleeping_1/frame_0036_img.png
901
+ sleeping_1/frame_0037_img.png
902
+ sleeping_1/frame_0038_img.png
903
+ sleeping_1/frame_0039_img.png
904
+ sleeping_1/frame_0040_img.png
905
+ sleeping_1/frame_0041_img.png
906
+ sleeping_1/frame_0042_img.png
907
+ sleeping_1/frame_0043_img.png
908
+ sleeping_1/frame_0044_img.png
909
+ sleeping_1/frame_0045_img.png
910
+ sleeping_1/frame_0046_img.png
911
+ sleeping_1/frame_0047_img.png
912
+ sleeping_1/frame_0048_img.png
913
+ sleeping_1/frame_0049_img.png
914
+ sleeping_1/frame_0050_img.png
915
+ sleeping_2/frame_0001_img.png
916
+ sleeping_2/frame_0002_img.png
917
+ sleeping_2/frame_0003_img.png
918
+ sleeping_2/frame_0004_img.png
919
+ sleeping_2/frame_0005_img.png
920
+ sleeping_2/frame_0006_img.png
921
+ sleeping_2/frame_0007_img.png
922
+ sleeping_2/frame_0008_img.png
923
+ sleeping_2/frame_0009_img.png
924
+ sleeping_2/frame_0010_img.png
925
+ sleeping_2/frame_0011_img.png
926
+ sleeping_2/frame_0012_img.png
927
+ sleeping_2/frame_0013_img.png
928
+ sleeping_2/frame_0014_img.png
929
+ sleeping_2/frame_0015_img.png
930
+ sleeping_2/frame_0016_img.png
931
+ sleeping_2/frame_0017_img.png
932
+ sleeping_2/frame_0018_img.png
933
+ sleeping_2/frame_0019_img.png
934
+ sleeping_2/frame_0020_img.png
935
+ sleeping_2/frame_0021_img.png
936
+ sleeping_2/frame_0022_img.png
937
+ sleeping_2/frame_0023_img.png
938
+ sleeping_2/frame_0024_img.png
939
+ sleeping_2/frame_0025_img.png
940
+ sleeping_2/frame_0026_img.png
941
+ sleeping_2/frame_0027_img.png
942
+ sleeping_2/frame_0028_img.png
943
+ sleeping_2/frame_0029_img.png
944
+ sleeping_2/frame_0030_img.png
945
+ sleeping_2/frame_0031_img.png
946
+ sleeping_2/frame_0032_img.png
947
+ sleeping_2/frame_0033_img.png
948
+ sleeping_2/frame_0034_img.png
949
+ sleeping_2/frame_0035_img.png
950
+ sleeping_2/frame_0036_img.png
951
+ sleeping_2/frame_0037_img.png
952
+ sleeping_2/frame_0038_img.png
953
+ sleeping_2/frame_0039_img.png
954
+ sleeping_2/frame_0040_img.png
955
+ sleeping_2/frame_0041_img.png
956
+ sleeping_2/frame_0042_img.png
957
+ sleeping_2/frame_0043_img.png
958
+ sleeping_2/frame_0044_img.png
959
+ sleeping_2/frame_0045_img.png
960
+ sleeping_2/frame_0046_img.png
961
+ sleeping_2/frame_0047_img.png
962
+ sleeping_2/frame_0048_img.png
963
+ sleeping_2/frame_0049_img.png
964
+ sleeping_2/frame_0050_img.png
965
+ temple_2/frame_0001_img.png
966
+ temple_2/frame_0002_img.png
967
+ temple_2/frame_0003_img.png
968
+ temple_2/frame_0004_img.png
969
+ temple_2/frame_0005_img.png
970
+ temple_2/frame_0006_img.png
971
+ temple_2/frame_0007_img.png
972
+ temple_2/frame_0008_img.png
973
+ temple_2/frame_0009_img.png
974
+ temple_2/frame_0010_img.png
975
+ temple_2/frame_0011_img.png
976
+ temple_2/frame_0012_img.png
977
+ temple_2/frame_0013_img.png
978
+ temple_2/frame_0014_img.png
979
+ temple_2/frame_0015_img.png
980
+ temple_2/frame_0016_img.png
981
+ temple_2/frame_0017_img.png
982
+ temple_2/frame_0018_img.png
983
+ temple_2/frame_0019_img.png
984
+ temple_2/frame_0020_img.png
985
+ temple_2/frame_0021_img.png
986
+ temple_2/frame_0022_img.png
987
+ temple_2/frame_0023_img.png
988
+ temple_2/frame_0024_img.png
989
+ temple_2/frame_0025_img.png
990
+ temple_2/frame_0026_img.png
991
+ temple_2/frame_0027_img.png
992
+ temple_2/frame_0028_img.png
993
+ temple_2/frame_0029_img.png
994
+ temple_2/frame_0030_img.png
995
+ temple_2/frame_0031_img.png
996
+ temple_2/frame_0032_img.png
997
+ temple_2/frame_0033_img.png
998
+ temple_2/frame_0034_img.png
999
+ temple_2/frame_0035_img.png
1000
+ temple_2/frame_0036_img.png
1001
+ temple_2/frame_0037_img.png
1002
+ temple_2/frame_0038_img.png
1003
+ temple_2/frame_0039_img.png
1004
+ temple_2/frame_0040_img.png
1005
+ temple_2/frame_0041_img.png
1006
+ temple_2/frame_0042_img.png
1007
+ temple_2/frame_0043_img.png
1008
+ temple_2/frame_0044_img.png
1009
+ temple_2/frame_0045_img.png
1010
+ temple_2/frame_0046_img.png
1011
+ temple_2/frame_0047_img.png
1012
+ temple_2/frame_0048_img.png
1013
+ temple_2/frame_0049_img.png
1014
+ temple_2/frame_0050_img.png
1015
+ temple_3/frame_0001_img.png
1016
+ temple_3/frame_0002_img.png
1017
+ temple_3/frame_0003_img.png
1018
+ temple_3/frame_0004_img.png
1019
+ temple_3/frame_0005_img.png
1020
+ temple_3/frame_0006_img.png
1021
+ temple_3/frame_0007_img.png
1022
+ temple_3/frame_0008_img.png
1023
+ temple_3/frame_0009_img.png
1024
+ temple_3/frame_0010_img.png
1025
+ temple_3/frame_0011_img.png
1026
+ temple_3/frame_0012_img.png
1027
+ temple_3/frame_0013_img.png
1028
+ temple_3/frame_0014_img.png
1029
+ temple_3/frame_0015_img.png
1030
+ temple_3/frame_0016_img.png
1031
+ temple_3/frame_0017_img.png
1032
+ temple_3/frame_0018_img.png
1033
+ temple_3/frame_0019_img.png
1034
+ temple_3/frame_0020_img.png
1035
+ temple_3/frame_0021_img.png
1036
+ temple_3/frame_0022_img.png
1037
+ temple_3/frame_0023_img.png
1038
+ temple_3/frame_0024_img.png
1039
+ temple_3/frame_0025_img.png
1040
+ temple_3/frame_0026_img.png
1041
+ temple_3/frame_0027_img.png
1042
+ temple_3/frame_0028_img.png
1043
+ temple_3/frame_0029_img.png
1044
+ temple_3/frame_0030_img.png
1045
+ temple_3/frame_0031_img.png
1046
+ temple_3/frame_0032_img.png
1047
+ temple_3/frame_0033_img.png
1048
+ temple_3/frame_0034_img.png
1049
+ temple_3/frame_0035_img.png
1050
+ temple_3/frame_0036_img.png
1051
+ temple_3/frame_0037_img.png
1052
+ temple_3/frame_0038_img.png
1053
+ temple_3/frame_0039_img.png
1054
+ temple_3/frame_0040_img.png
1055
+ temple_3/frame_0041_img.png
1056
+ temple_3/frame_0042_img.png
1057
+ temple_3/frame_0043_img.png
1058
+ temple_3/frame_0044_img.png
1059
+ temple_3/frame_0045_img.png
1060
+ temple_3/frame_0046_img.png
1061
+ temple_3/frame_0047_img.png
1062
+ temple_3/frame_0048_img.png
1063
+ temple_3/frame_0049_img.png
1064
+ temple_3/frame_0050_img.png
FE2E/infer/image_utils.py ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import matplotlib
3
+ import numpy as np
4
+
5
+ from typing import List, Union
6
+ import PIL.Image
7
+
8
+ import torch
9
+ from torchvision.transforms import InterpolationMode
10
+ from torchvision.transforms.functional import resize
11
+
12
+ def concatenate_images(*image_lists):
13
+ # Ensure at least one image list is provided
14
+ if not image_lists or not image_lists[0]:
15
+ raise ValueError("At least one non-empty image list must be provided")
16
+
17
+ # Determine the maximum width of any single row and the total height
18
+ max_width = 0
19
+ total_height = 0
20
+ row_widths = []
21
+ row_heights = []
22
+
23
+ # Compute dimensions for each row
24
+ for image_list in image_lists:
25
+ if image_list: # Ensure the list is not empty
26
+ width = sum(img.width for img in image_list)
27
+ height = image_list[0].height # Assuming all images in the list have the same height
28
+ max_width = max(max_width, width)
29
+ total_height += height
30
+ row_widths.append(width)
31
+ row_heights.append(height)
32
+
33
+ # Create a new image to concatenate everything into
34
+ new_image = Image.new('RGB', (max_width, total_height))
35
+
36
+ # Concatenate each row of images
37
+ y_offset = 0
38
+ for i, image_list in enumerate(image_lists):
39
+ x_offset = 0
40
+ for img in image_list:
41
+ new_image.paste(img, (x_offset, y_offset))
42
+ x_offset += img.width
43
+ y_offset += row_heights[i] # Move the offset down to the next row
44
+
45
+ return new_image
46
+
47
+
48
+ def colorize_depth_map(depth, mask=None, reverse_color=False):
49
+ cm = matplotlib.colormaps["Spectral"]
50
+ # normalize
51
+ depth = ((depth - depth.min()) / (depth.max() - depth.min()))
52
+ # colorize
53
+ if reverse_color:
54
+ img_colored_np = cm(1 - depth, bytes=False)[:, :, 0:3] # Invert the depth values before applying colormap
55
+ else:
56
+ img_colored_np = cm(depth, bytes=False)[:, :, 0:3] # (h,w,3)
57
+
58
+ depth_colored = (img_colored_np * 255).astype(np.uint8)
59
+ if mask is not None:
60
+ masked_image = np.zeros_like(depth_colored)
61
+ masked_image[mask.numpy()] = depth_colored[mask.numpy()]
62
+ depth_colored_img = Image.fromarray(masked_image)
63
+ else:
64
+ depth_colored_img = Image.fromarray(depth_colored)
65
+ return depth_colored_img
66
+
67
+
68
+ def resize_max_res(
69
+ img: torch.Tensor,
70
+ max_edge_resolution: int,
71
+ resample_method: InterpolationMode = InterpolationMode.BILINEAR,
72
+ ) -> torch.Tensor:
73
+ """
74
+ Resize image to limit maximum edge length while keeping aspect ratio.
75
+
76
+ Args:
77
+ img (`torch.Tensor`):
78
+ Image tensor to be resized. Expected shape: [B, C, H, W]
79
+ max_edge_resolution (`int`):
80
+ Maximum edge length (pixel).
81
+ resample_method (`PIL.Image.Resampling`):
82
+ Resampling method used to resize images.
83
+
84
+ Returns:
85
+ `torch.Tensor`: Resized image.
86
+ """
87
+ assert 4 == img.dim(), f"Invalid input shape {img.shape}"
88
+
89
+ original_height, original_width = img.shape[-2:]
90
+ downscale_factor = min(
91
+ max_edge_resolution / original_width, max_edge_resolution / original_height
92
+ )
93
+
94
+ new_width = int(original_width * downscale_factor)
95
+ new_height = int(original_height * downscale_factor)
96
+
97
+ resized_img = resize(img, (new_height, new_width), resample_method, antialias=True)
98
+ return resized_img
99
+
100
+ def resize_back(
101
+ img: Union[torch.Tensor, np.ndarray, PIL.Image.Image, List[PIL.Image.Image]],
102
+ target_size: Union[int, tuple[int, int]],
103
+ resample_method: Union[InterpolationMode, int] = InterpolationMode.BILINEAR,
104
+ ) -> Union[torch.Tensor, np.ndarray, PIL.Image.Image, List[PIL.Image.Image]]:
105
+ """
106
+ Resize image to target size.
107
+
108
+ Args:
109
+ img (`Union[torch.Tensor, np.ndarray, PIL.Image.Image, List[PIL.Image.Image]]`):
110
+ Image to be resized.
111
+ target_size (`Union[int, tuple[int, int]]`):
112
+ Target size of the resized image.
113
+ resample_method (`Union[InterpolationMode, int]`):
114
+ Resampling method used to resize images.
115
+
116
+ Returns:
117
+ `Union[torch.Tensor, np.ndarray, PIL.Image.Image, List[PIL.Image.Image]]`: Resized image.
118
+ """
119
+ if isinstance(img, torch.Tensor): # [B, C, H, W]
120
+ resized_img = resize(img, target_size, resample_method, antialias=True)
121
+ if isinstance(img, np.ndarray): # [B, H, W, C]
122
+ # Conver to Torch.Tensor
123
+ img = torch.tensor(img).permute(0, 3, 1, 2)
124
+ resized_img = resize(img, target_size, resample_method, antialias=True)
125
+ # Convert back to np.ndarray
126
+ resized_img = resized_img.permute(0, 2, 3, 1).numpy()
127
+ elif isinstance(img, PIL.Image.Image):
128
+ target_size = (target_size[1], target_size[0]) # PIL uses (width, height)
129
+ resized_img = img.resize(target_size, resample_method)
130
+ elif isinstance(img, list) and all(isinstance(i, PIL.Image.Image) for i in img):
131
+ target_size = (target_size[1], target_size[0]) # PIL uses (width, height)
132
+ resized_img = [i.resize(target_size, resample_method) for i in img]
133
+ return resized_img
134
+
135
+ def get_pil_resample_method(method_str: str) -> int:
136
+ resample_method_dict = {
137
+ "bilinear": Image.BILINEAR,
138
+ "bicubic": Image.BICUBIC,
139
+ "nearest": Image.NEAREST,
140
+ }
141
+ resample_method = resample_method_dict.get(method_str, None)
142
+ if resample_method is None:
143
+ raise ValueError(f"Unknown resampling method: {resample_method}")
144
+ else:
145
+ return resample_method
146
+
147
+ def get_tv_resample_method(method_str: str) -> InterpolationMode:
148
+ resample_method_dict = {
149
+ "bilinear": InterpolationMode.BILINEAR,
150
+ "bicubic": InterpolationMode.BICUBIC,
151
+ "nearest": InterpolationMode.NEAREST_EXACT,
152
+ "nearest-exact": InterpolationMode.NEAREST_EXACT,
153
+ }
154
+ resample_method = resample_method_dict.get(method_str, None)
155
+ if resample_method is None:
156
+ raise ValueError(f"Unknown resampling method: {resample_method}")
157
+ else:
158
+ return resample_method
FE2E/infer/inference.py ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import itertools
4
+ import math
5
+ import os
6
+ import sys
7
+ import time
8
+ from pathlib import Path
9
+
10
+ # 添加父目录到系统路径
11
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
12
+
13
+ import numpy as np
14
+ import torch
15
+ from einops import rearrange, repeat
16
+ from PIL import Image
17
+ from safetensors.torch import load_file
18
+ from torchvision.transforms import functional as F
19
+ from tqdm import tqdm
20
+ import torch.nn.functional as Func
21
+
22
+ import infer.sampling as sampling
23
+ from modules.autoencoder import AutoEncoder
24
+ from modules.model_edit import Step1XParams, Step1XEdit
25
+
26
+ REPO_ROOT = Path(__file__).resolve().parents[1]
27
+ DEFAULT_QWEN_DIR = REPO_ROOT / "Qwen"
28
+ EMPTY_PROMPT_LATENT_PATH = REPO_ROOT / "latent" / "no_info.npz"
29
+
30
+
31
+ def cudagc():
32
+ torch.cuda.empty_cache()
33
+ torch.cuda.ipc_collect()
34
+
35
+
36
+ def load_state_dict(model, ckpt_path, device="cuda", strict=False, assign=True):
37
+ if Path(ckpt_path).suffix == ".safetensors":
38
+ state_dict = load_file(ckpt_path, device)
39
+ else:
40
+ state_dict = torch.load(ckpt_path, map_location="cpu")
41
+
42
+ missing, unexpected = model.load_state_dict(state_dict, strict=strict, assign=assign)
43
+ if len(missing) > 0 and len(unexpected) > 0:
44
+ print(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing))
45
+ print("\n" + "-" * 79 + "\n")
46
+ print(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected))
47
+ elif len(missing) > 0:
48
+ print(f"Got {len(missing)} missing keys:\n\t" + "\n\t".join(missing))
49
+ elif len(unexpected) > 0:
50
+ print(f"Got {len(unexpected)} unexpected keys:\n\t" + "\n\t".join(unexpected))
51
+ return model
52
+
53
+
54
+ def load_models(dit_path=None, ae_path=None, qwen2vl_model_path=None, device="cuda", max_length=256, dtype=torch.bfloat16, args=None):
55
+ empty_llm = args is not None and hasattr(args, 'prompt_type') and args.prompt_type == 'empty'
56
+ if empty_llm:
57
+ print("[INFO] prompt_type=empty, 跳过Qwen模型加载")
58
+ qwen2vl_encoder = None
59
+ else:
60
+ # Lazy import to avoid pulling transformers/vision stack during evaluation with prompt_type=empty.
61
+ from modules.conditioner import Qwen25VL_7b_Embedder as Qwen2VLEmbedder
62
+ qwen2vl_encoder = Qwen2VLEmbedder(
63
+ qwen2vl_model_path,
64
+ device=device,
65
+ max_length=max_length,
66
+ dtype=dtype,
67
+ args=args,
68
+ )
69
+
70
+ with torch.device("meta"):
71
+ ae = AutoEncoder(
72
+ resolution=256,
73
+ in_channels=3,
74
+ ch=128,
75
+ out_ch=3,
76
+ ch_mult=[1, 2, 4, 4],
77
+ num_res_blocks=2,
78
+ z_channels=16,
79
+ scale_factor=0.3611,
80
+ shift_factor=0.1159,
81
+ )
82
+
83
+ step1x_params = Step1XParams(
84
+ in_channels=64,
85
+ out_channels=64,
86
+ vec_in_dim=768,
87
+ context_in_dim=4096,
88
+ hidden_size=3072,
89
+ mlp_ratio=4.0,
90
+ num_heads=24,
91
+ depth=19,
92
+ depth_single_blocks=38,
93
+ axes_dim=[16, 56, 56],
94
+ theta=10_000,
95
+ qkv_bias=True,
96
+ )
97
+ dit = Step1XEdit(step1x_params)
98
+
99
+ ae = load_state_dict(ae, ae_path, 'cpu')
100
+ dit = load_state_dict(dit, dit_path, 'cpu')
101
+
102
+ ae = ae.to(dtype=torch.float32)
103
+
104
+ return ae, dit, qwen2vl_encoder
105
+
106
+
107
+ def equip_dit_with_lora_sd_scripts(ae, text_encoders, dit, lora, device='cuda'):
108
+ from safetensors.torch import load_file
109
+ weights_sd = load_file(lora)
110
+ is_lora = True
111
+ from library import lora_module
112
+ module = lora_module
113
+ lora_model, _ = module.create_network_from_weights(1.0, None, ae, text_encoders, dit, weights_sd, True)
114
+ lora_model.merge_to(text_encoders, dit, weights_sd)
115
+
116
+ lora_model.set_multiplier(1.0)
117
+ return lora_model
118
+
119
+ class ImageGenerator:
120
+
121
+ def __init__(
122
+ self,
123
+ dit_path=None,
124
+ ae_path=None,
125
+ qwen2vl_model_path=None,
126
+ device="cuda",
127
+ max_length=640,
128
+ dtype=torch.bfloat16,
129
+ quantized=False,
130
+ offload=False,
131
+ lora=None,
132
+ args=None,
133
+ ) -> None:
134
+ self.device = torch.device(device)
135
+ self.args = args
136
+ self.ae, self.dit, self.llm_encoder = load_models(
137
+ dit_path=dit_path,
138
+ ae_path=ae_path,
139
+ qwen2vl_model_path=qwen2vl_model_path,
140
+ max_length=max_length,
141
+ dtype=dtype,
142
+ device=device,
143
+ args=args,
144
+ )
145
+ if not quantized:
146
+ self.dit = self.dit.to(dtype=torch.bfloat16)
147
+ else:
148
+ self.dit = self.dit.to(dtype=torch.float8_e4m3fn)
149
+ if not offload:
150
+ self.dit = self.dit.to(device=self.device)
151
+ self.ae = self.ae.to(device=self.device)
152
+ self.quantized = quantized
153
+ self.offload = offload
154
+ if lora is not None:
155
+ self.lora_module = equip_dit_with_lora_sd_scripts(
156
+ self.ae,
157
+ [self.llm_encoder],
158
+ self.dit,
159
+ lora,
160
+ device=self.dit.device,
161
+ )
162
+ else:
163
+ self.lora_module = None
164
+
165
+ def prepare(self, prompt, img, ref_image, ref_image_raw, empty_llm=False):
166
+ bs, _, h, w = img.shape
167
+ bs, _, ref_h, ref_w = ref_image.shape
168
+
169
+ assert h == ref_h and w == ref_w
170
+
171
+ if bs == 1 and not isinstance(prompt, str):
172
+ bs = len(prompt)
173
+ elif bs >= 1 and isinstance(prompt, str):
174
+ prompt = [prompt] * bs
175
+
176
+ img = rearrange(img, "b c (h ph) (w pw) -> b (h w) (c ph pw)", ph=2, pw=2) #2,16,82,110->2,2255,64
177
+ ref_img = rearrange(ref_image, "b c (ref_h ph) (ref_w pw) -> b (ref_h ref_w) (c ph pw)", ph=2, pw=2) # 将二维图像"压平"成一维序列 这是为 Transformer 模型准备的,因为它处理的是序列数据
178
+ if img.shape[0] == 1 and bs > 1:
179
+ img = repeat(img, "1 ... -> bs ...", bs=bs)
180
+ ref_img = repeat(ref_img, "1 ... -> bs ...", bs=bs)
181
+ #img 和 ref_img 已经不再是二维的图片了,而是变成了一个 "patches" (图像块) 的序列。一个块是64维度的。Transformer不知道这2255个图像块哪个在左上角,哪个在右下角。
182
+ img_ids = torch.zeros(h // 2, w // 2, 3) #41,55,3 # h 和 w 是潜在空间的高和宽,但 rearrange 操作把 2x2 的小块合并了# 所以实际的网格大小是 h/2 x w/2# 最后的 3 代表每个坐标有3个分量 (一个预留, Y坐标, X坐标)
183
+ img_ids[..., 1] = img_ids[..., 1] + torch.arange(h // 2)[:, None]
184
+ img_ids[..., 2] = img_ids[..., 2] + torch.arange(w // 2)[None, :] #通过广播机制,第 i 行的所有点的第二个分量都被赋值为 i
185
+ img_ids = repeat(img_ids, "h w c -> b (h w) c", b=bs) #将二维坐标网格"压平"成一维序列,并复制到对应的batch size
186
+
187
+ ref_img_ids = torch.zeros(ref_h // 2, ref_w // 2, 3)
188
+
189
+ ref_img_ids[..., 1] = ref_img_ids[..., 1] + torch.arange(ref_h // 2)[:, None]
190
+ ref_img_ids[..., 2] = ref_img_ids[..., 2] + torch.arange(ref_w // 2)[None, :]
191
+ ref_img_ids = repeat(ref_img_ids, "ref_h ref_w c -> b (ref_h ref_w) c", b=bs)
192
+
193
+ if isinstance(prompt, str):
194
+ prompt = [prompt]
195
+
196
+ if self.offload:
197
+ self.llm_encoder = self.llm_encoder.to(self.device)
198
+
199
+ if empty_llm:
200
+ empty_prompt_cache = getattr(self.args, "empty_prompt_cache", None) if self.args is not None else None
201
+ cache_path = Path(empty_prompt_cache) if empty_prompt_cache else EMPTY_PROMPT_LATENT_PATH
202
+ data = np.load(cache_path)
203
+ txt = torch.from_numpy(data['embeds']).to(img.device).unsqueeze(0)
204
+ txt = torch.cat([txt, txt], dim=0)
205
+ mask = torch.from_numpy(data['masks']).to(img.device).unsqueeze(0)
206
+ mask = torch.cat([mask, mask], dim=0)
207
+ else:
208
+ txt, mask = self.llm_encoder(prompt, ref_image_raw) #之所以都要复制一份,是因为有正负两种prompt
209
+
210
+ if self.offload:
211
+ self.llm_encoder = self.llm_encoder.cpu()
212
+ cudagc()
213
+
214
+ txt_ids = torch.zeros(bs, txt.shape[1], 3)
215
+
216
+ img = torch.cat([img, ref_img.to(device=img.device, dtype=img.dtype)], dim=-2) #2,4550,64 在patch上concat???
217
+ img_ids = torch.cat([img_ids, ref_img_ids], dim=-2)
218
+
219
+ return {
220
+ "img": img,
221
+ "mask": mask,
222
+ "img_ids": img_ids.to(img.device), #图像坐标
223
+ "llm_embedding": txt.to(img.device), #文字向量
224
+ "txt_ids": txt_ids.to(img.device), #文字坐标
225
+ }
226
+
227
+ @staticmethod
228
+ def process_diff_norm(diff_norm, k):
229
+ pow_result = torch.pow(diff_norm, k)
230
+
231
+ result = torch.where(
232
+ diff_norm > 1.0,
233
+ pow_result,
234
+ torch.where(diff_norm < 1.0, torch.ones_like(diff_norm), diff_norm),
235
+ )
236
+ return result
237
+
238
+ def denoise(
239
+ self,
240
+ img: torch.Tensor,
241
+ img_ids: torch.Tensor,
242
+ llm_embedding: torch.Tensor,
243
+ txt_ids: torch.Tensor,
244
+ timesteps: list[float],
245
+ cfg_guidance: float = 6.0,
246
+ mask=None,
247
+ show_progress=False,
248
+ timesteps_truncate=1.0,
249
+ ):
250
+ if self.offload:
251
+ self.dit = self.dit.to(self.device)
252
+ if show_progress:
253
+ pbar = tqdm(itertools.pairwise(timesteps), desc='denoising...')
254
+ else:
255
+ pbar = itertools.pairwise(timesteps)
256
+ '''
257
+ Cond 0 RGB
258
+ Uncd 0 RGB
259
+ '''
260
+ for t_curr, t_prev in pbar:
261
+ '''
262
+ 若输入维度是2,无所谓,维度是1则:
263
+ imgN D RGB
264
+ imgN D RGB
265
+ '''
266
+ if img.shape[0] == 1 and cfg_guidance != -1:
267
+ img = torch.cat([img, img], dim=0)
268
+ t_vec = torch.full((img.shape[0],), t_curr, dtype=img.dtype, device=img.device)
269
+
270
+ pred, feat = self.dit(
271
+ img=img,
272
+ img_ids=img_ids,
273
+ txt_ids=txt_ids,
274
+ timesteps=t_vec,
275
+ llm_embedding=llm_embedding,
276
+ t_vec=t_vec,
277
+ mask=mask,
278
+ )
279
+
280
+ assert cfg_guidance != -1, " cfg_guidance must not be -1 NOW!!!!"
281
+ cond, uncond = (
282
+ pred[0:pred.shape[0] // 2, :],
283
+ pred[pred.shape[0] // 2:, :],
284
+ )
285
+ '''
286
+ Cond D ??? <- pred
287
+ Uncd D ???
288
+ '''
289
+ pred = uncond + cfg_guidance * (cond - uncond) #1,4608,64
290
+ pred1 = cond #todo only support single denoise!!!
291
+ '''
292
+ Cond 0 RGB
293
+ + pred D ???
294
+ temI D ???
295
+ '''
296
+ tem_img = img[0:img.shape[0] // 2, :] + (t_prev - t_curr) * pred #1,4608,64
297
+ img_input_length = img.shape[1] // 2
298
+ '''
299
+ tmpI [D](√) ???(x)
300
+ cat Cond 0(x) [RGB](√)
301
+ imgN [D] [RGB]
302
+ '''
303
+ img = torch.cat(
304
+ [
305
+ tem_img[:, :img_input_length], #1,2304,64
306
+ img[:img.shape[0] // 2, img_input_length:], #1,2304,64
307
+ ],
308
+ dim=1) #1,4608,64
309
+ if self.offload:
310
+ self.dit = self.dit.cpu()
311
+ cudagc()
312
+
313
+ return img[:, :img.shape[1] // 2], pred1[:, img.shape[1] // 2:]
314
+
315
+ def double_denoise(self,img,img_ids,llm_embedding,txt_ids,timesteps,cfg_guidance=6.0,mask=None,height=None,width=None):
316
+ if img.shape[0] == 1 and cfg_guidance != -1:
317
+ img = torch.cat([img, img], dim=0)
318
+
319
+ t_vec = torch.full((img.shape[0],), 1.0, dtype=img.dtype, device=img.device)
320
+
321
+ pred, _ = self.dit(
322
+ img=img,
323
+ img_ids=img_ids,
324
+ txt_ids=txt_ids,
325
+ timesteps=t_vec,
326
+ llm_embedding=llm_embedding,
327
+ t_vec=t_vec,
328
+ mask=mask,
329
+ )
330
+
331
+ assert cfg_guidance != -1, " cfg_guidance must not be -1 NOW!!!!"
332
+ pred, uncond = (
333
+ pred[0:pred.shape[0] // 2, :],
334
+ pred[pred.shape[0] // 2:, :],
335
+ )
336
+ Lpred,Rpred = self.unpack_latents(pred, height//16, width//16)
337
+ return Lpred.to(torch.float32),Rpred.to(torch.float32)
338
+
339
+ @staticmethod
340
+ def unpack(x: torch.Tensor, height: int, width: int) -> torch.Tensor:
341
+ return rearrange(
342
+ x,
343
+ "b (h w) (c ph pw) -> b c (h ph) (w pw)",
344
+ h=math.ceil(height / 16),
345
+ w=math.ceil(width / 16),
346
+ ph=2,
347
+ pw=2,
348
+ )
349
+
350
+ @staticmethod
351
+ def unpack_latents(x: torch.Tensor, packed_latent_height: int, packed_latent_width: int):
352
+ """
353
+ x: [b (h w) (c ph pw)] -> [b c (h ph) (w pw)], ph=2, pw=2
354
+ """
355
+ import einops
356
+ x = einops.rearrange(x, "b (p h w) (c ph pw) -> b p c (h ph) (w pw)", h=packed_latent_height, w=packed_latent_width, ph=2, pw=2, p=2)
357
+ return x[:, 0], x[:, 1]
358
+
359
+ @staticmethod
360
+ def load_image(image):
361
+ from PIL import Image
362
+
363
+ if isinstance(image, np.ndarray):
364
+ image = torch.from_numpy(image).permute(2, 0, 1).float() / 255.0
365
+ image = image.unsqueeze(0)
366
+ return image
367
+ elif isinstance(image, Image.Image):
368
+ image = F.to_tensor(image.convert("RGB"))
369
+ image = image.unsqueeze(0)
370
+ return image
371
+ elif isinstance(image, torch.Tensor):
372
+ return image
373
+ elif isinstance(image, str):
374
+ image = F.to_tensor(Image.open(image).convert("RGB"))
375
+ image = image.unsqueeze(0)
376
+ return image
377
+ else:
378
+ raise ValueError(f"Unsupported image type: {type(image)}")
379
+
380
+ def output_process_image(self, resize_img, image_size):
381
+ res_image = resize_img.resize(image_size)
382
+ return res_image
383
+
384
+ def input_process_image(self, img):
385
+ if isinstance(img, torch.Tensor):
386
+ w, h = img.shape[-1], img.shape[-2]
387
+ elif isinstance(img, Image.Image):
388
+ w, h = img.size
389
+
390
+ if w <= 1024 and h <= 768:
391
+ w_new, h_new = 1024, 768
392
+ elif w <= 1280 and h <= 960:
393
+ w_new, h_new = 1216, 352
394
+ elif w <= 6048 and h <= 4032:
395
+ w_new, h_new = 864, 576
396
+ else:
397
+ w_new, h_new = w, h
398
+
399
+ if isinstance(img, torch.Tensor):
400
+ img_resized = Func.interpolate(img, (h_new, w_new), mode='bilinear', align_corners=False)
401
+ img_resized = img_resized.clamp(0, 1)
402
+ else:
403
+ img_resized = img.resize((w_new, h_new))
404
+
405
+ return img_resized, (w_new, h_new)
406
+
407
+ @torch.inference_mode()
408
+ def generate_image(
409
+ self,prompt,negative_prompt,ref_images,num_steps,cfg_guidance,seed,num_samples=1,init_image=None,image2image_strength=0.0,show_progress=False,size_level=512,args=None,judge=None,name=None
410
+ ):
411
+ assert num_samples == 1, "num_samples > 1 is not supported yet."
412
+
413
+ ref_images_raw, img_info = self.input_process_image(ref_images)
414
+ if isinstance(ref_images, Image.Image):
415
+ ref_images_raw = self.load_image(ref_images_raw)
416
+
417
+ height, width = ref_images_raw.shape[-2], ref_images_raw.shape[-1]
418
+
419
+ ref_images_raw = ref_images_raw.to(self.device)
420
+ if self.offload:
421
+ self.ae = self.ae.to(self.device)
422
+ ref_images = self.ae.encode(ref_images_raw.to(self.device) * 2 - 1) #bs,3,656,880 -> 1,16,82,110
423
+ #加入cache
424
+
425
+ if self.offload:
426
+ self.ae = self.ae.cpu()
427
+ cudagc()
428
+
429
+ seed = int(seed)
430
+ seed = torch.Generator(device="cpu").seed() if seed < 0 else seed
431
+
432
+ t0 = time.perf_counter()
433
+
434
+ if init_image is not None:
435
+ init_image = self.load_image(init_image)
436
+ init_image = init_image.to(self.device)
437
+ init_image = torch.nn.functional.interpolate(init_image, (height, width))
438
+ if self.offload:
439
+ self.ae = self.ae.to(self.device)
440
+ init_image = self.ae.encode(init_image.to() * 2 - 1)
441
+ if self.offload:
442
+ self.ae = self.ae.cpu()
443
+ cudagc()
444
+
445
+ if args is not None and hasattr(args, 'single_denoise') and not args.single_denoise:
446
+ x = torch.randn(num_samples,16,height // 8,width // 8,device=self.device,dtype=torch.bfloat16,generator=torch.Generator(device=self.device).manual_seed(seed),)
447
+ else:
448
+ x= torch.zeros(num_samples,16,height // 8,width // 8,device=self.device,dtype=torch.bfloat16,)
449
+
450
+
451
+ timesteps = sampling.get_schedule(num_steps, x.shape[-1] * x.shape[-2] // 4, shift=True)
452
+
453
+ if init_image is not None:
454
+ t_idx = int((1 - image2image_strength) * num_steps)
455
+ t = timesteps[t_idx]
456
+ timesteps = timesteps[t_idx:]
457
+ x = t * x + (1.0 - t) * init_image.to(x.dtype)
458
+
459
+ x = torch.cat([x, x], dim=0)
460
+ ref_images = torch.cat([ref_images, ref_images], dim=0) #这里是为了有无prompt
461
+ ref_images_raw = torch.cat([ref_images_raw, ref_images_raw], dim=0)
462
+
463
+ # 检查args和prompt_type属性
464
+ empty_llm = args is not None and hasattr(args, 'prompt_type') and args.prompt_type == 'empty'
465
+
466
+ inputs = self.prepare(
467
+ [prompt, negative_prompt],
468
+ x, #img这个gt给的是全噪声在推理
469
+ ref_image=ref_images,
470
+ ref_image_raw=ref_images_raw,
471
+ empty_llm=empty_llm)
472
+
473
+ with torch.autocast(device_type=self.device.type,dtype=torch.bfloat16):
474
+ # Lpred,Rpred = self.double_denoise(**inputs,cfg_guidance=cfg_guidance,timesteps=timesteps,height=height,width=width)#图像中包括ref image
475
+ Lpred,Rpred = self.denoise(**inputs,cfg_guidance=cfg_guidance,timesteps=timesteps,show_progress=show_progress,timesteps_truncate=1.0,)#图像中包括ref image
476
+ Lpred=self.unpack(Lpred.float(),height,width)
477
+ Rpred=self.unpack(Rpred.float(),height,width)
478
+ if judge is not None:
479
+ judge = Func.interpolate(judge, (height, width), mode='bilinear', align_corners=False)
480
+ training_gt=self.ae.encode(judge)
481
+ traing_loss = torch.nn.functional.mse_loss(Rpred,training_gt)
482
+ print(f"training_loss with rgb2: {traing_loss}")
483
+
484
+ norm = torch.linalg.norm(judge, dim=1, keepdim=True)
485
+ norm[norm < 1e-9] = 1e-9
486
+ judge = judge / norm
487
+ training_gt =self.ae.encode(judge)
488
+ training_loss = torch.nn.functional.mse_loss(Rpred,training_gt)
489
+ print(f"training_loss with normed_rgb: {training_loss}")
490
+ Lpred = self.ae.decode(Lpred)
491
+ Rpred = self.ae.decode(Rpred)
492
+
493
+
494
+ Lpred = Lpred.clamp(-1, 1)
495
+ Lpred = Lpred.mul(0.5).add(0.5)
496
+ Rpred = Rpred.clamp(-1, 1)
497
+ # Rpred = Rpred.mul(0.5).add(0.5)
498
+
499
+ images_list = []
500
+ for img in Rpred.float():
501
+ images_list.append(self.output_process_image(F.to_pil_image(img), img_info))
502
+ return images_list, Lpred.float(), Rpred.float()
503
+
504
+
505
+ def main():
506
+
507
+ parser = argparse.ArgumentParser()
508
+ parser.add_argument('--model_path', type=str, required=True, help='Path to the model checkpoint')
509
+ parser.add_argument('--input_dir', type=str, required=True, help='Path to the input image directory')
510
+ parser.add_argument('--output_dir', type=str, required=True, help='Path to the output image directory')
511
+ parser.add_argument('--json_path', type=str, required=True, help='Path to the JSON file containing image names and prompts')
512
+ parser.add_argument('--seed', type=int, default=42, help='Random seed for generation')
513
+ parser.add_argument('--num_steps', type=int, default=28, help='Number of diffusion steps')
514
+ parser.add_argument('--cfg_guidance', type=float, default=6.0, help='CFG guidance strength')
515
+ parser.add_argument('--size_level', default=512, type=int)
516
+ parser.add_argument('--offload', action='store_true', help='Use offload for large models')
517
+ parser.add_argument('--quantized', action='store_true', help='Use fp8 model weights')
518
+ parser.add_argument('--lora', type=str, default=None)
519
+ parser.add_argument('--qwen2vl_model_path', type=str, default=str(DEFAULT_QWEN_DIR), help='Path to the local Qwen2.5-VL model directory')
520
+ parser.add_argument('--empty_prompt_cache', type=str, default=str(EMPTY_PROMPT_LATENT_PATH), help='Path to the empty-prompt latent cache')
521
+ args = parser.parse_args()
522
+
523
+ assert os.path.exists(args.input_dir), f"Input directory {args.input_dir} does not exist."
524
+ assert os.path.exists(args.json_path), f"JSON file {args.json_path} does not exist."
525
+
526
+ args.output_dir = args.output_dir.rstrip('/') + ('-offload' if args.offload else "") + ('-quantized' if args.quantized else "") + f"-{args.size_level}"
527
+ os.makedirs(args.output_dir, exist_ok=True)
528
+
529
+ image_and_prompts = json.load(open(args.json_path, 'r'))
530
+
531
+ image_edit = ImageGenerator(
532
+ ae_path=os.path.join(args.model_path, 'vae.safetensors'),
533
+ dit_path=os.path.join(args.model_path, "step1x-edit-i1258-FP8.safetensors" if args.quantized else "step1x-edit-i1258.safetensors"),
534
+ qwen2vl_model_path=args.qwen2vl_model_path,
535
+ max_length=640,
536
+ quantized=args.quantized,
537
+ offload=args.offload,
538
+ lora=args.lora,
539
+ )
540
+
541
+ time_list = []
542
+ for image_name, prompt in image_and_prompts.items():
543
+ image_path = os.path.join(args.input_dir, image_name)
544
+ output_path = os.path.join(args.output_dir, image_name)
545
+ os.makedirs(os.path.dirname(output_path), exist_ok=True)
546
+ start_time = time.time()
547
+
548
+ images, _, _ = image_edit.generate_image(
549
+ prompt,
550
+ negative_prompt="",
551
+ ref_images=Image.open(image_path).convert("RGB"),
552
+ num_samples=1,
553
+ num_steps=args.num_steps,
554
+ cfg_guidance=args.cfg_guidance,
555
+ seed=args.seed,
556
+ show_progress=True,
557
+ size_level=args.size_level,
558
+ )
559
+
560
+ print(f"Time taken: {time.time() - start_time:.2f} seconds")
561
+ time_list.append(time.time() - start_time)
562
+
563
+ images[0].save(output_path, lossless=True)
564
+ if len(time_list) > 1:
565
+ print(f'average time for {args.output_dir}: ', sum(time_list[1:]) / len(time_list[1:]))
566
+
567
+
568
+ if __name__ == "__main__":
569
+ main()
FE2E/infer/inner_evaluation.py ADDED
@@ -0,0 +1,598 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import os
3
+ import sys
4
+ import csv # 保留csv库用于保存结果
5
+ import multiprocessing as mp
6
+ import time
7
+ import numpy as np
8
+ import torch
9
+ from omegaconf import OmegaConf
10
+ from tabulate import tabulate
11
+ from torch.utils.data import DataLoader
12
+ from tqdm.auto import tqdm
13
+ import cv2
14
+ from infer.dataset import (
15
+ BaseDepthDataset,
16
+ DatasetMode,
17
+ get_dataset,
18
+ get_pred_name,
19
+ )
20
+ from .util import metric, normal_utils
21
+ from .util.alignment import (align_depth_least_square, depth2disparity, disparity2depth, depth2log_space, log_space2depth)
22
+ from .util.metric import MetricTracker
23
+ from infer.image_utils import colorize_depth_map
24
+
25
+ eval_metrics = [
26
+ "abs_relative_difference",
27
+ "squared_relative_difference",
28
+ "rmse_linear",
29
+ "rmse_log",
30
+ "delta1_acc",
31
+ "delta2_acc",
32
+ "delta3_acc",
33
+ ]
34
+
35
+
36
+ def save_visualization_worker(save_vis_path, safe_pred_name, cfg_suffix, depth_pred_np, depth_raw_np, valid_mask_np, input_rgb_data, rank):
37
+ """
38
+ 可视化保存的工作函数,在独立进程中运行
39
+ Args:
40
+ save_vis_path: 保存路径
41
+ safe_pred_name: 安全的预测文件名
42
+ cfg_suffix: cfg后缀
43
+ depth_pred_np: 预测深度图 numpy数组
44
+ depth_raw_np: GT深度图 numpy数组
45
+ valid_mask_np: 有效掩码 numpy数组
46
+ input_rgb_data: 输入RGB图像数据
47
+ rank: GPU rank
48
+ """
49
+ try:
50
+ # 转换为torch tensor用于colorize_depth_map
51
+ depth_pred_ts = torch.from_numpy(depth_pred_np)
52
+ depth_raw_ts = torch.from_numpy(depth_raw_np)
53
+ valid_mask_ts = torch.from_numpy(valid_mask_np)
54
+
55
+ # 1. 保存预测深度图
56
+ depth_pred_vis = colorize_depth_map(depth_pred_ts)
57
+ pred_save_path = os.path.join(save_vis_path, f"{safe_pred_name}{cfg_suffix}_pred.png")
58
+ depth_pred_vis.save(pred_save_path)
59
+ print(f"saved: {pred_save_path}")
60
+ # 3. 保存误差图
61
+ # 计算绝对相对误差
62
+ abs_rel_error = torch.abs(depth_pred_ts - depth_raw_ts) / (depth_raw_ts + 1e-6)
63
+ abs_rel_error = abs_rel_error * valid_mask_ts.float()
64
+
65
+ # 使用matplotlib生成误差图
66
+ import matplotlib
67
+ matplotlib.use('Agg') # 使用非交互式后端
68
+ import matplotlib.pyplot as plt
69
+ import matplotlib.cm as cm
70
+
71
+ error_np = abs_rel_error.numpy()
72
+ # 设置误差显示范围
73
+ vmax = 0.2 # 可以根据需要调整
74
+ error_normalized = np.clip(error_np / vmax, 0, 1)
75
+
76
+ # 应用颜色映射
77
+ jet_cmap = cm.get_cmap('jet')
78
+ error_colored = jet_cmap(error_normalized)[:, :, :3] # 去掉alpha通道
79
+ error_colored = (error_colored * 255).astype(np.uint8)
80
+
81
+ # 将无效区域设为黑色
82
+ error_colored[~valid_mask_np] = [0, 0, 0]
83
+
84
+ error_save_path = os.path.join(save_vis_path, f"{safe_pred_name}{cfg_suffix}_error.png")
85
+ plt.imsave(error_save_path, error_colored)
86
+ print(f"saved: {error_save_path}")
87
+ # 关闭matplotlib figure以释放内存
88
+ plt.close('all')
89
+
90
+ except Exception as e:
91
+ print(f"[VIS-Worker-{rank}] 可视化保存失败: {e}", file=sys.stderr)
92
+
93
+
94
+ def prepare_input_rgb_data(input_rgb):
95
+ """
96
+ 预处理输入RGB数据,转换为numpy格式供子进程使用
97
+ """
98
+ if input_rgb is None:
99
+ return None
100
+
101
+ try:
102
+ # 处理不同格式的输入图像
103
+ if isinstance(input_rgb, torch.Tensor):
104
+ # 如果是torch tensor,转换为numpy
105
+ if input_rgb.dim() == 4: # Batch dimension
106
+ input_rgb = input_rgb[0]
107
+ if input_rgb.dim() == 3 and input_rgb.shape[0] == 3: # CHW格式
108
+ input_rgb = input_rgb.permute(1, 2, 0)
109
+
110
+ # 确保值在[0,1]范围内
111
+ if input_rgb.max() <= 1.0:
112
+ input_rgb = (input_rgb * 255).clamp(0, 255).byte()
113
+
114
+ input_rgb_np = input_rgb.cpu().numpy()
115
+
116
+ elif isinstance(input_rgb, np.ndarray):
117
+ input_rgb_np = input_rgb
118
+ # 确保值在正确范围内
119
+ if input_rgb_np.max() <= 1.0:
120
+ input_rgb_np = (input_rgb_np * 255).astype(np.uint8)
121
+ else:
122
+ # 假设是PIL图像或其他格式
123
+ input_rgb_np = np.array(input_rgb)
124
+
125
+ return input_rgb_np.copy() # 创建副本避免进程间共享问题
126
+
127
+ except Exception as e:
128
+ print(f"处理输入RGB数据失败: {e}", file=sys.stderr)
129
+ return None
130
+
131
+
132
+ def evaluate_single_prediction(pred_depth, depth_raw, valid_mask, dataset, device, metric_funcs, alignment_max_res=None, save_pred_vis=False, save_vis_path=None, pred_name=None, cfg_suffix="", alignment="least_square", rank=0, input_rgb=None):
133
+ """Args: pred_depth: 预测的深度图 (numpy array, [0,1]) depth_raw: 真实深度图 (numpy array) valid_mask: ��效掩码 (numpy array) dataset: 数据集对象 device: 计算设备 metric_funcs: 评估指标函数列表 alignment_max_res: 对齐时的最大分辨率 save_pred_vis: 是否保存可视化结果 save_vis_path: 可视化保存路径 pred_name: 预测文件名 cfg_suffix: cfg后缀,用于区分不同的cfg设置
134
+ alignment: 对齐方式,可选值为"least_square"或"least_square_disparity"
135
+ rank: GPU rank,用于多进程图像保存
136
+ input_rgb: 输入RGB图像 (torch.Tensor or PIL.Image or numpy.ndarray)
137
+ Returns: sample_metric: 该样本的所有评估指标列表"""
138
+ # 确保预测深度图的维度正确
139
+ if len(pred_depth.shape) == 3:
140
+ pred_depth = pred_depth.mean(0) # [0,1]
141
+
142
+ # 调整预测深度图尺寸以匹配真实深度图
143
+ if pred_depth.shape != depth_raw.shape:
144
+ pred_depth = cv2.resize(pred_depth, (depth_raw.shape[1], depth_raw.shape[0]), interpolation=cv2.INTER_LINEAR)
145
+
146
+ if "least_square" == alignment:
147
+ depth_pred, scale, shift = align_depth_least_square(
148
+ gt_arr=depth_raw,
149
+ pred_arr=pred_depth,
150
+ valid_mask_arr=valid_mask,
151
+ return_scale_shift=True,
152
+ max_resolution=alignment_max_res,
153
+ )
154
+ elif "log_space" == alignment:
155
+ gt_log, gt_non_neg_mask = depth2log_space(depth=depth_raw, return_mask=True)
156
+ pred_non_neg_mask = pred_depth > 0
157
+ valid_nonnegative_mask = valid_mask & gt_non_neg_mask & pred_non_neg_mask
158
+
159
+ # 确保输入是numpy数组类型
160
+ if isinstance(gt_log, torch.Tensor):
161
+ gt_log = gt_log.cpu().numpy()
162
+
163
+ log_space_pred, scale, shift = align_depth_least_square(
164
+ gt_arr=gt_log,
165
+ pred_arr=pred_depth,
166
+ valid_mask_arr=valid_nonnegative_mask,
167
+ return_scale_shift=True,
168
+ max_resolution=alignment_max_res,
169
+ )
170
+ log_space_pred = np.clip(log_space_pred, a_min=None, a_max=5.)
171
+ depth_pred = log_space2depth(log_space_pred)
172
+ # 裁剪到数据集的深度范围
173
+ depth_pred = np.clip(depth_pred, a_min=dataset.min_depth, a_max=dataset.max_depth)
174
+
175
+ # 裁剪到 d > 0 以便评估
176
+ depth_pred = np.clip(depth_pred, a_min=1e-6, a_max=None)
177
+
178
+ # 转换到设备进行评估
179
+ depth_pred_ts = torch.from_numpy(depth_pred).to(device)
180
+ depth_raw_ts = torch.from_numpy(depth_raw).to(device)
181
+ valid_mask_ts = torch.from_numpy(valid_mask).to(device)
182
+
183
+ # 启动可视化保存进程(同步)
184
+ if save_pred_vis and save_vis_path is not None and pred_name is not None:
185
+ safe_pred_name = pred_name.replace('/', '_').replace('\\', '_')
186
+ input_rgb_data = prepare_input_rgb_data(input_rgb)
187
+ vis_process = mp.Process(
188
+ target=save_visualization_worker,
189
+ args=(
190
+ save_vis_path,
191
+ safe_pred_name,
192
+ cfg_suffix,
193
+ depth_pred.copy(),
194
+ depth_raw.copy(),
195
+ valid_mask.copy(),
196
+ input_rgb_data,
197
+ rank
198
+ )
199
+ )
200
+ vis_process.start()
201
+ # save_visualization_worker(save_vis_path, safe_pred_name, cfg_suffix, depth_pred.copy(), depth_raw.copy(), valid_mask.copy(), input_rgb_data, rank)
202
+
203
+ # 计算评估指标
204
+ sample_metric = []
205
+ for met_func in metric_funcs:
206
+ _metric = met_func(depth_pred_ts, depth_raw_ts, valid_mask_ts).item()
207
+ sample_metric.append(_metric)
208
+
209
+ return sample_metric
210
+
211
+
212
+ def evaluation_depth_custom_parallel(rank, world_size, output_dir, dataset_config, args, pipeline, base_data_dir, pred_suffix="", alignment="least_square", alignment_max_res=None, prediction_dir=None, save_pred_vis=False):
213
+ """
214
+ 支持多GPU并行的深度评估函数
215
+ """
216
+ import time
217
+
218
+ os.makedirs(output_dir, exist_ok=True)
219
+
220
+ cuda_avail = torch.cuda.is_available()
221
+ device = torch.device(f"cuda:{rank}")
222
+
223
+ cfg_data = OmegaConf.load(dataset_config)
224
+
225
+ dataset: BaseDepthDataset = get_dataset(cfg_data, base_data_dir=base_data_dir, mode=DatasetMode.EVAL, prompt_type=args.prompt_type)
226
+
227
+ # 获取数据集名称,用于CSV表命名
228
+ dataset_name = dataset.__class__.__name__
229
+
230
+ # 初始化存储结果的数据列表
231
+ results_data = []
232
+
233
+ # 计算每个GPU处理的数据范围
234
+ total_samples = len(dataset)
235
+ if args.num_samples > 0:
236
+ total_samples = min(args.num_samples, total_samples)
237
+
238
+ chunk_size = total_samples // world_size
239
+ start_idx = rank * chunk_size
240
+ end_idx = start_idx + chunk_size if rank < world_size - 1 else total_samples
241
+
242
+ from torch.utils.data import SubsetRandomSampler
243
+ indices = list(range(start_idx, end_idx))
244
+
245
+ dataloader = DataLoader(dataset, batch_size=1, num_workers=0 if args.debug else 4, pin_memory=True, sampler=SubsetRandomSampler(indices),shuffle=False)
246
+
247
+ metric_funcs = [getattr(metric, _met) for _met in eval_metrics]
248
+
249
+ # 为cfg=1和cfg=6分别创建metric tracker
250
+ metric_tracker_Lpred = MetricTracker(*[m.__name__ for m in metric_funcs])
251
+ metric_tracker_Lpred.reset()
252
+ metric_tracker_Rpred = MetricTracker(*[m.__name__ for m in metric_funcs])
253
+ metric_tracker_Rpred.reset()
254
+
255
+ if save_pred_vis:
256
+ save_vis_path = os.path.join(output_dir, "vis")
257
+ os.makedirs(save_vis_path, exist_ok=True)
258
+
259
+ # 创建CSV保存目录 - 每个卡都创建
260
+ csv_save_path = os.path.join(output_dir, "csv_results")
261
+ os.makedirs(csv_save_path, exist_ok=True)
262
+ else:
263
+ save_vis_path = None
264
+ csv_save_path = None
265
+
266
+ processing_times = []
267
+ vis_processes = [] # 用于管理可视化进程
268
+ max_vis_processes = 4 # 限制同时运行的可视化进程数量
269
+
270
+ sample_count = 0
271
+ for data in dataloader:
272
+ sample_count += 1
273
+
274
+ depth_raw_ts = data["depth_raw_linear"].squeeze()
275
+ valid_mask_ts = data["valid_mask_raw"].squeeze()
276
+ rgb_name = data["rgb_relative_path"][0]
277
+
278
+ depth_raw = depth_raw_ts.numpy()
279
+ valid_mask = valid_mask_ts.numpy()
280
+
281
+ # Get predictions
282
+ rgb_basename = os.path.basename(rgb_name)
283
+ pred_basename = get_pred_name(rgb_basename, dataset.name_mode, suffix=pred_suffix)
284
+ pred_name = os.path.join(os.path.dirname(rgb_name), pred_basename)
285
+
286
+ start_time = time.time()
287
+ image_list, Lpred, Rpred = pipeline.generate_image(args.prompt if args.prompt_type == "query" else data["prompt"][0], negative_prompt="", ref_images=data["rgb"], num_samples=1, num_steps=args.num_steps, cfg_guidance=args.cfg_guidance, seed=args.seed + rank, show_progress=False, size_level=args.size_level, args=args)
288
+ end_time = time.time()
289
+ processing_times.append(end_time - start_time)
290
+
291
+ Lpred = Lpred[0].cpu().numpy()
292
+
293
+ # 保存可视化结果(使用新的多进程方式)
294
+ if save_pred_vis and save_vis_path is not None:
295
+ # 清理文件名,替换路径分隔符
296
+ safe_pred_name = pred_name.replace('/', '_').replace('\\', '_')
297
+
298
+ # 预处理输入RGB数据
299
+ input_rgb_data = prepare_input_rgb_data(data["rgb"])
300
+
301
+ # 限制同时运行的可视化进程数量
302
+ while len([p for p in vis_processes if p.is_alive()]) >= max_vis_processes:
303
+ # 等待一些进程完成
304
+ for p in vis_processes[:]:
305
+ if not p.is_alive():
306
+ vis_processes.remove(p)
307
+ if len([p for p in vis_processes if p.is_alive()]) >= max_vis_processes:
308
+ time.sleep(0.1) # 短暂等待
309
+
310
+ # 创建子进程进行可视化保存
311
+ vis_process = mp.Process(
312
+ target=save_visualization_worker,
313
+ args=(
314
+ save_vis_path,
315
+ safe_pred_name,
316
+ "_Lpred",
317
+ Lpred.copy(),
318
+ depth_raw.copy(),
319
+ valid_mask.copy(),
320
+ input_rgb_data,
321
+ rank
322
+ )
323
+ )
324
+ vis_process.start()
325
+ vis_processes.append(vis_process)
326
+
327
+ sample_metric_Lpred = evaluate_single_prediction(pred_depth=Lpred, depth_raw=depth_raw, valid_mask=valid_mask, dataset=dataset, device=device, metric_funcs=metric_funcs, alignment_max_res=alignment_max_res, save_pred_vis=False, save_vis_path=None, pred_name=pred_name, cfg_suffix="_Lpred", alignment=alignment, rank=rank, input_rgb=data["rgb"])
328
+
329
+ for i, met_func in enumerate(metric_funcs):
330
+ metric_name = met_func.__name__
331
+ metric_tracker_Lpred.update(metric_name, sample_metric_Lpred[i])
332
+
333
+ # 输出每个样本的结果
334
+ img_id = os.path.basename(rgb_name).replace('.png', '').replace('.jpg', '')
335
+ global_sample_idx = start_idx + sample_count
336
+
337
+ # CFG=1结果
338
+ abs_rel_Lpred = sample_metric_Lpred[0] # abs_relative_difference
339
+ rmse_Lpred = sample_metric_Lpred[2] # rmse_linear
340
+ delta1_Lpred = sample_metric_Lpred[4] # delta1_acc
341
+
342
+ # 修改输出格式
343
+ if args.save_viz:
344
+ print(f"|{global_sample_idx:03d}|{abs_rel_Lpred:.4f}|{rmse_Lpred:.4f}|{delta1_Lpred:.4f}|", file=sys.stderr)
345
+ elif not args.save_viz:
346
+ print(f"[GPU:{rank}] 样本:{global_sample_idx:03d}/{total_samples} | ID:{img_id:<12}", file=sys.stderr)
347
+ print(f" CFG=1: abs_rel:{abs_rel_Lpred:.4f} | rmse:{rmse_Lpred:.4f} | a1:{delta1_Lpred:.4f}", file=sys.stderr)
348
+ print(f" 时间: {processing_times[-1]:.2f}s", file=sys.stderr)
349
+
350
+ # 所有卡都保存结果到列表
351
+ if args.save_viz:
352
+ results_data.append({'GPU_Rank': rank, 'Sample_ID': global_sample_idx, 'Image_Name': rgb_name, 'abs_rel': abs_rel_Lpred, 'rmse': rmse_Lpred, 'delta1': delta1_Lpred, 'processing_time': processing_times[-1]})
353
+
354
+ # 等待所有可视化进程完成
355
+ if save_pred_vis:
356
+ print(f"[GPU:{rank}] 等待可视化进程完成...", file=sys.stderr)
357
+ for p in vis_processes:
358
+ p.join(timeout=30) # 设置超时时间
359
+ if p.is_alive():
360
+ print(f"[GPU:{rank}] 可视化进程超时,强制终止", file=sys.stderr)
361
+ p.terminate()
362
+ print(f"[GPU:{rank}] 所有可视化进程已完成", file=sys.stderr)
363
+
364
+ if args.save_viz and csv_save_path is not None:
365
+ csv_file_path = os.path.join(csv_save_path, f"{dataset_name}_results_rank{rank}.csv")
366
+
367
+ try:
368
+ with open(csv_file_path, 'w', newline='') as csvfile:
369
+ fieldnames = ['GPU_Rank', 'Sample_ID', 'Image_Name', 'abs_rel', 'rmse', 'delta1', 'processing_time']
370
+ writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
371
+ writer.writeheader()
372
+ for row in results_data:
373
+ writer.writerow(row)
374
+ print(f"[GPU:{rank}] 结果已保存至CSV: {csv_file_path}", file=sys.stderr)
375
+ except Exception as e:
376
+ print(f"[GPU:{rank}] 保存CSV失败: {e}", file=sys.stderr)
377
+
378
+ return metric_tracker_Lpred, metric_tracker_Rpred, processing_times
379
+
380
+
381
+ def evaluation_normal_custom_parallel(rank, world_size, output_dir, base_data_dir, dataset_split_path, pipeline, args, eval_datasets, save_pred_vis=False):
382
+ """
383
+ 支持多GPU并行的normal评估函数
384
+ """
385
+ import time
386
+
387
+ os.makedirs(output_dir, exist_ok=True)
388
+ device = torch.device(f"cuda:{rank}")
389
+
390
+ # 为每个数据集创建结果字典
391
+ all_normal_errors = {}
392
+ all_processing_times = {}
393
+ all_dataset_metrics = {}
394
+
395
+ for dataset_name, split in eval_datasets:
396
+ # 创建数据加载器 - 减少num_workers避免资源竞争
397
+ try:
398
+ # 创建数据集
399
+ from infer.dataset_normal.normal_dataloader import NormalDataset
400
+ dataset = NormalDataset(base_data_dir, dataset_split_path, dataset_name=dataset_name, split=split, mode='test', epoch=0)
401
+
402
+ total_samples = len(dataset)
403
+ if args.num_samples > 0:
404
+ total_samples = min(args.num_samples, total_samples)
405
+
406
+ # 计算当前GPU需要处理的样本范围
407
+ samples_per_gpu = total_samples // world_size
408
+ start_idx = rank * samples_per_gpu
409
+ if rank == world_size - 1:
410
+ end_idx = total_samples
411
+ else:
412
+ end_idx = start_idx + samples_per_gpu
413
+
414
+ # 创建样本索引并使用SubsetRandomSampler
415
+ from torch.utils.data import SubsetRandomSampler
416
+ indices = list(range(start_idx, end_idx))
417
+
418
+ dataloader = DataLoader(dataset, batch_size=1, shuffle=False, num_workers=1, pin_memory=False, sampler=SubsetRandomSampler(indices))
419
+
420
+ if rank == 0:
421
+ print(f"[GPU:{rank}] 开始评估Normal数据集: {dataset_name}")
422
+
423
+ except Exception as e:
424
+ print(f"[GPU:{rank}] 创建数据加载器失败: {e}")
425
+ continue
426
+
427
+ dataset_output_dir = os.path.join(output_dir, dataset_name)
428
+ os.makedirs(dataset_output_dir, exist_ok=True)
429
+
430
+ if save_pred_vis:
431
+ save_vis_path = os.path.join(dataset_output_dir, "vis")
432
+ os.makedirs(save_vis_path, exist_ok=True)
433
+ else:
434
+ save_vis_path = None
435
+
436
+ processing_times = []
437
+ total_normal_errors = None
438
+ sample_count = 0
439
+ vis_processes = [] # 用于管理normal可视化进程
440
+ max_vis_processes = 5 # 限制同时运行的可视化进程数量(normal可视化更耗内存)
441
+
442
+ for data_dict in dataloader:
443
+ sample_count += 1
444
+
445
+ img = data_dict['img'].to(device)
446
+ scene_names = data_dict['scene_name']
447
+ img_names = data_dict['img_name']
448
+
449
+ # 获取原始图像尺寸
450
+ _, _, orig_H, orig_W = img.shape
451
+
452
+ start_time = time.time()
453
+ image_list,L_pred , norm_out = pipeline.generate_image("Predict the depth map for the image on the left and the normal map on the right.", negative_prompt="", ref_images=img, num_samples=1, num_steps=args.num_steps, cfg_guidance=args.cfg_guidance, seed=args.seed + rank, show_progress=False, size_level=args.size_level, args=args, judge=data_dict['normal'].to(device) if dataset_name == "vkitti" or dataset_name == "hypersim" else None, name=img_names)
454
+ end_time = time.time()
455
+ processing_times.append(end_time - start_time)
456
+
457
+ # 处理normal输出
458
+ norm_out = torch.nn.functional.interpolate(norm_out, size=(orig_H, orig_W), mode='bilinear', align_corners=False)
459
+ norm = torch.linalg.norm(norm_out, axis=1, keepdims=True)
460
+ norm[norm < 1e-9] = 1e-9
461
+ norm_out = norm_out / norm
462
+
463
+ pred_norm, pred_kappa = norm_out[:, :3, :, :], norm_out[:, 3:, :, :]
464
+ pred_kappa = None if pred_kappa.size(1) == 0 else pred_kappa
465
+
466
+ # 计算误��(如果有ground truth)
467
+ # if 'normal' in data_dict.keys():
468
+ gt_norm = data_dict['normal'].to(device)
469
+ gt_norm_mask = data_dict['normal_mask'].to(device)
470
+
471
+ pred_error = normal_utils.compute_normal_error(pred_norm, gt_norm)
472
+ if total_normal_errors is None:
473
+ total_normal_errors = pred_error[gt_norm_mask]
474
+ else:
475
+ total_normal_errors = torch.cat((total_normal_errors, pred_error[gt_norm_mask]), dim=0)
476
+
477
+ # 保存可视化结果(使用新的多进程方式)
478
+ if save_vis_path is not None:
479
+ # 限制同时运行的可视化进程数量
480
+ while len([p for p in vis_processes if p.is_alive()]) >= max_vis_processes:
481
+ # 等待一些进程完成
482
+ for p in vis_processes[:]:
483
+ if not p.is_alive():
484
+ vis_processes.remove(p)
485
+ if len([p for p in vis_processes if p.is_alive()]) >= max_vis_processes:
486
+ time.sleep(0.1) # 短暂等待
487
+
488
+ prefixs = ['%s_%s' % (i, j) for (i, j) in zip(scene_names, img_names)]
489
+
490
+ # 预处理数据
491
+ img_data, pred_norm_data, pred_kappa_data, gt_norm_data, gt_norm_mask_data, pred_error_data = prepare_normal_data_for_process(
492
+ img, pred_norm, pred_kappa, gt_norm, gt_norm_mask, pred_error
493
+ )
494
+
495
+ if img_data is not None: # 确保数据预处理成功
496
+ # 创建子进程进行可视化保存
497
+ vis_process = mp.Process(
498
+ target=save_normal_visualization_worker,
499
+ args=(
500
+ save_vis_path,
501
+ prefixs,
502
+ img_data,
503
+ pred_norm_data,
504
+ pred_kappa_data,
505
+ gt_norm_data,
506
+ gt_norm_mask_data,
507
+ pred_error_data,
508
+ rank
509
+ )
510
+ )
511
+ vis_process.start()
512
+ vis_processes.append(vis_process)
513
+
514
+ # 输出进度信息
515
+ global_sample_idx = start_idx + sample_count
516
+ img_id = '_'.join([scene_names[0], img_names[0]])
517
+
518
+ if rank == 0 or sample_count % 10 == 0: # 减少输出频率
519
+ print(f"[GPU:{rank}] | 样本:{global_sample_idx:03d} | ID:{img_id} | 时间:{processing_times[-1]:.2f}s| ", file=sys.stderr)
520
+
521
+ # 等待所有可视化进程结束
522
+ if save_pred_vis:
523
+ print(f"[GPU:{rank}] 等待Normal可视化进程完成...", file=sys.stderr)
524
+ for p in vis_processes:
525
+ p.join(timeout=60) # normal可视化需要更长时间,设置60秒超时
526
+ if p.is_alive():
527
+ print(f"[GPU:{rank}] Normal可视化进程超时,强制终止", file=sys.stderr)
528
+ p.terminate()
529
+ print(f"[GPU:{rank}] 所有Normal可视化进程已完成", file=sys.stderr)
530
+
531
+ # 计算当前GPU的指标
532
+ metrics = None
533
+ if total_normal_errors is not None and len(total_normal_errors) > 0:
534
+ metrics = normal_utils.compute_normal_metrics(total_normal_errors)
535
+ if rank == 0:
536
+ print(f"[GPU:{rank}] 数据集 {dataset_name} 部分结果:")
537
+ print("mean median rmse 5 7.5 11.25 22.5 30")
538
+ print("%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f" % (metrics['mean'], metrics['median'], metrics['rmse'], metrics['a1'], metrics['a2'], metrics['a3'], metrics['a4'], metrics['a5']))
539
+
540
+ # 存储结果
541
+ all_normal_errors[dataset_name] = total_normal_errors.cpu() if total_normal_errors is not None else None
542
+ all_processing_times[dataset_name] = processing_times
543
+ all_dataset_metrics[dataset_name] = metrics
544
+
545
+ return all_normal_errors, all_processing_times, all_dataset_metrics
546
+
547
+ def save_normal_visualization_worker(save_vis_path, prefixs, img_data, pred_norm_data, pred_kappa_data, gt_norm_data, gt_norm_mask_data, pred_error_data, rank):
548
+ """
549
+ Normal可视化保存的工作函数,在独立进程中运行
550
+ Args:
551
+ save_vis_path: 保存路径
552
+ prefixs: 文件名前缀列表
553
+ img_data: 输入图像数据 (numpy array)
554
+ pred_norm_data: 预测normal数据 (numpy array)
555
+ pred_kappa_data: 预测kappa数据 (numpy array or None)
556
+ gt_norm_data: GT normal数据 (numpy array)
557
+ gt_norm_mask_data: GT normal掩码数据 (numpy array)
558
+ pred_error_data: 预测误差数据 (numpy array)
559
+ rank: GPU rank
560
+ """
561
+ try:
562
+ import infer.visualize as vis_utils
563
+
564
+ # 转换为torch tensor用于可视化函数
565
+ img_ts = torch.from_numpy(img_data)
566
+ pred_norm_ts = torch.from_numpy(pred_norm_data)
567
+ pred_kappa_ts = torch.from_numpy(pred_kappa_data) if pred_kappa_data is not None else None
568
+ gt_norm_ts = torch.from_numpy(gt_norm_data)
569
+ gt_norm_mask_ts = torch.from_numpy(gt_norm_mask_data)
570
+ pred_error_ts = torch.from_numpy(pred_error_data)
571
+
572
+ # 使用matplotlib的非交互式后端
573
+ import matplotlib
574
+ matplotlib.use('Agg')
575
+
576
+ # 调用可视化函数
577
+ vis_utils.visualize_normal(save_vis_path, prefixs, img_ts, pred_norm_ts, pred_kappa_ts, gt_norm_ts, gt_norm_mask_ts, pred_error_ts)
578
+
579
+ except Exception as e:
580
+ print(f"[NORMAL-VIS-Worker-{rank}] Normal可视化保存失败: {e}", file=sys.stderr)
581
+
582
+
583
+ def prepare_normal_data_for_process(img, pred_norm, pred_kappa, gt_norm, gt_norm_mask, pred_error):
584
+ """
585
+ 预处理normal数据,转换为numpy格式供子进程使用
586
+ """
587
+ try:
588
+ img_data = img.cpu().numpy()
589
+ pred_norm_data = pred_norm.cpu().numpy()
590
+ pred_kappa_data = pred_kappa.cpu().numpy() if pred_kappa is not None else None
591
+ gt_norm_data = gt_norm.cpu().numpy()
592
+ gt_norm_mask_data = gt_norm_mask.cpu().numpy()
593
+ pred_error_data = pred_error.cpu().numpy()
594
+
595
+ return img_data, pred_norm_data, pred_kappa_data, gt_norm_data, gt_norm_mask_data, pred_error_data
596
+ except Exception as e:
597
+ print(f"处理Normal数据失败: {e}", file=sys.stderr)
598
+ return None, None, None, None, None, None
FE2E/infer/sampling.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from collections.abc import Callable
3
+
4
+ import torch
5
+ from torch import Tensor
6
+
7
+
8
+ def get_noise(num_samples: int, height: int, width: int, device: torch.device, dtype: torch.dtype, seed: int):
9
+ return torch.randn(
10
+ num_samples,
11
+ 16,
12
+ # allow for packing
13
+ 2 * math.ceil(height / 16),
14
+ 2 * math.ceil(width / 16),
15
+ device=device,
16
+ dtype=dtype,
17
+ generator=torch.Generator(device=device).manual_seed(seed),
18
+ )
19
+
20
+
21
+ def time_shift(mu: float, sigma: float, t: Tensor):
22
+ return math.exp(mu) / (math.exp(mu) + (1 / t - 1) ** sigma)
23
+
24
+
25
+ def get_lin_function(x1: float = 256, y1: float = 0.5, x2: float = 4096, y2: float = 1.15) -> Callable[[float], float]:
26
+ m = (y2 - y1) / (x2 - x1)
27
+ b = y1 - m * x1
28
+ return lambda x: m * x + b
29
+
30
+
31
+ def get_schedule(
32
+ num_steps: int,
33
+ image_seq_len: int,
34
+ base_shift: float = 0.5,
35
+ max_shift: float = 1.15,
36
+ shift: bool = True,
37
+ ) -> list[float]:
38
+ # extra step for zero
39
+ timesteps = torch.linspace(1, 0, num_steps + 1)
40
+
41
+ # shifting the schedule to favor high timesteps for higher signal images
42
+ if shift:
43
+ # estimate mu based on linear estimation between two points
44
+ mu = get_lin_function(y1=base_shift, y2=max_shift)(image_seq_len)
45
+ timesteps = time_shift(mu, 1.0, timesteps)
46
+
47
+ return timesteps.tolist()
FE2E/infer/seed_all.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Bingxin Ke, ETH Zurich. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # --------------------------------------------------------------------------
15
+ # If you find this code useful, we kindly ask you to cite our paper in your work.
16
+ # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
17
+ # More information about the method can be found at https://marigoldmonodepth.github.io
18
+ # --------------------------------------------------------------------------
19
+
20
+
21
+ import numpy as np
22
+ import random
23
+ import torch
24
+
25
+
26
+ def seed_all(seed: int = 0):
27
+ """
28
+ Set random seeds of all components.
29
+ """
30
+ random.seed(seed)
31
+ np.random.seed(seed)
32
+ torch.manual_seed(seed)
33
+ torch.cuda.manual_seed_all(seed)
FE2E/infer/util/__init__.py ADDED
File without changes
FE2E/infer/util/alignment.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Bingxin Ke
2
+ # Last modified: 2024-01-11
3
+
4
+ import numpy as np
5
+ import torch
6
+
7
+
8
+ def align_depth_least_square(
9
+ gt_arr: np.ndarray,
10
+ pred_arr: np.ndarray,
11
+ valid_mask_arr: np.ndarray,
12
+ return_scale_shift=True,
13
+ max_resolution=None,
14
+ ):
15
+ ori_shape = pred_arr.shape # input shape
16
+
17
+ gt = gt_arr.squeeze() # [H, W]
18
+ pred = pred_arr.squeeze()
19
+ valid_mask = valid_mask_arr.squeeze()
20
+
21
+ # Downsample
22
+ if max_resolution is not None:
23
+ scale_factor = np.min(max_resolution / np.array(ori_shape[-2:]))
24
+ if scale_factor < 1:
25
+ downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
26
+ gt = downscaler(torch.as_tensor(gt).unsqueeze(0)).numpy()
27
+ pred = downscaler(torch.as_tensor(pred).unsqueeze(0)).numpy()
28
+ valid_mask = (
29
+ downscaler(torch.as_tensor(valid_mask).unsqueeze(0).float())
30
+ .bool()
31
+ .numpy()
32
+ )
33
+
34
+ assert (
35
+ gt.shape == pred.shape == valid_mask.shape
36
+ ), f"{gt.shape}, {pred.shape}, {valid_mask.shape}"
37
+
38
+ gt_masked = gt[valid_mask].reshape((-1, 1))
39
+ pred_masked = pred[valid_mask].reshape((-1, 1))
40
+
41
+ # numpy solver
42
+ _ones = np.ones_like(pred_masked)
43
+ A = np.concatenate([pred_masked, _ones], axis=-1)
44
+ X = np.linalg.lstsq(A, gt_masked, rcond=None)[0]
45
+ scale, shift = X
46
+
47
+ aligned_pred = pred_arr * scale + shift
48
+
49
+ # restore dimensions
50
+ aligned_pred = aligned_pred.reshape(ori_shape)
51
+
52
+ if return_scale_shift:
53
+ return aligned_pred, scale, shift
54
+ else:
55
+ return aligned_pred
56
+
57
+
58
+ # ******************** disparity space ********************
59
+ def depth2disparity(depth, return_mask=False):
60
+ if isinstance(depth, torch.Tensor):
61
+ disparity = torch.zeros_like(depth)
62
+ elif isinstance(depth, np.ndarray):
63
+ disparity = np.zeros_like(depth)
64
+ non_negtive_mask = depth > 0
65
+ disparity[non_negtive_mask] = 1.0 / depth[non_negtive_mask]
66
+ if return_mask:
67
+ return disparity, non_negtive_mask
68
+ else:
69
+ return disparity
70
+
71
+
72
+ def disparity2depth(disparity, **kwargs):
73
+ return depth2disparity(disparity, **kwargs)
74
+
75
+ # ******************** log space ********************
76
+ def depth2log_space(depth, **kwargs):
77
+ if isinstance(depth, torch.Tensor):
78
+ log_space = torch.zeros_like(depth)
79
+ elif isinstance(depth, np.ndarray):
80
+ log_space = np.zeros_like(depth)
81
+ non_negtive_mask = depth > 0
82
+ log_space[non_negtive_mask] = np.log(depth[non_negtive_mask])
83
+ return log_space, non_negtive_mask
84
+
85
+ def log_space2depth(log_space, **kwargs):
86
+ depth = np.exp(log_space)
87
+ return depth
88
+
FE2E/infer/util/metric.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ import torch
3
+
4
+
5
+ # Adapted from: https://github.com/victoresque/pytorch-template/blob/master/utils/util.py
6
+ class MetricTracker:
7
+ def __init__(self, *keys, writer=None):
8
+ self.writer = writer
9
+ self._data = pd.DataFrame(index=keys, columns=["total", "counts", "average"])
10
+ self.reset()
11
+
12
+ def reset(self):
13
+ for col in self._data.columns:
14
+ self._data[col].values[:] = 0.0
15
+
16
+ def update(self, key, value, n=1):
17
+ if self.writer is not None:
18
+ self.writer.add_scalar(key, value)
19
+ # 确保value是数值类型
20
+ value = float(value) if hasattr(value, '__float__') else value.item() if hasattr(value, 'item') else float(value)
21
+ self._data.at[key, "total"] += value * n
22
+ self._data.at[key, "counts"] += n
23
+ self._data.at[key, "average"] = self._data.at[key, "total"] / self._data.at[key, "counts"]
24
+
25
+ def avg(self, key):
26
+ return self._data.average[key]
27
+
28
+ def result(self):
29
+ return dict(self._data.average)
30
+
31
+ def pixel_mean(pred, gt, valid_mask):
32
+ if valid_mask is not None:
33
+ masked_pred = pred * valid_mask
34
+ masked_gt = gt * valid_mask
35
+
36
+ valid_pixel_count = torch.sum(valid_mask, dim=(0,1))
37
+
38
+ pred_mean = torch.sum(masked_pred, dim=(0,1)) / valid_pixel_count
39
+ gt_mean = torch.sum(masked_gt, dim=(0,1)) / valid_pixel_count
40
+ else:
41
+ pred_mean = torch.mean(pred, dim=(0,1))
42
+ gt_mean = torch.mean(gt, dim=(0,1))
43
+
44
+ mean_difference = torch.abs(pred_mean - gt_mean)
45
+ return mean_difference
46
+
47
+ def pixel_var(pred, gt, valid_mask):
48
+ if valid_mask is not None:
49
+ masked_pred = pred * valid_mask
50
+ masked_gt = gt * valid_mask
51
+
52
+ valid_pixel_count = torch.sum(valid_mask, dim=(0,1))
53
+
54
+ pred_mean = torch.sum(masked_pred, dim=(0,1)) / valid_pixel_count
55
+ gt_mean = torch.sum(masked_gt, dim=(0,1)) / valid_pixel_count
56
+
57
+ pred_var = torch.sum(valid_mask * (pred - pred_mean)**2, dim=(0,1)) / valid_pixel_count
58
+ gt_var = torch.sum(valid_mask * (gt - gt_mean)**2, dim=(0,1)) / valid_pixel_count
59
+ else:
60
+ pred_var = torch.var(pred, dim=(0,1))
61
+ gt_var = torch.var(gt, dim=(0,1))
62
+
63
+ var_difference = torch.abs(pred_var - gt_var)
64
+
65
+ return var_difference
66
+
67
+ def abs_relative_difference(output, target, valid_mask=None):
68
+ actual_output = output
69
+ actual_target = target
70
+ abs_relative_diff = torch.abs(actual_output - actual_target) / actual_target
71
+ if valid_mask is not None:
72
+ abs_relative_diff[~valid_mask] = 0
73
+ n = valid_mask.sum((-1, -2))
74
+ else:
75
+ n = output.shape[-1] * output.shape[-2]
76
+ abs_relative_diff = torch.sum(abs_relative_diff, (-1, -2)) / n
77
+ return abs_relative_diff.mean()
78
+
79
+
80
+ def squared_relative_difference(output, target, valid_mask=None):
81
+ actual_output = output
82
+ actual_target = target
83
+ square_relative_diff = (
84
+ torch.pow(torch.abs(actual_output - actual_target), 2) / actual_target
85
+ )
86
+ if valid_mask is not None:
87
+ square_relative_diff[~valid_mask] = 0
88
+ n = valid_mask.sum((-1, -2))
89
+ else:
90
+ n = output.shape[-1] * output.shape[-2]
91
+ square_relative_diff = torch.sum(square_relative_diff, (-1, -2)) / n
92
+ return square_relative_diff.mean()
93
+
94
+
95
+ def rmse_linear(output, target, valid_mask=None):
96
+ actual_output = output
97
+ actual_target = target
98
+ diff = actual_output - actual_target
99
+ if valid_mask is not None:
100
+ diff[~valid_mask] = 0
101
+ n = valid_mask.sum((-1, -2))
102
+ else:
103
+ n = output.shape[-1] * output.shape[-2]
104
+ diff2 = torch.pow(diff, 2)
105
+ mse = torch.sum(diff2, (-1, -2)) / n
106
+ rmse = torch.sqrt(mse)
107
+ return rmse.mean()
108
+
109
+
110
+ def rmse_log(output, target, valid_mask=None):
111
+ diff = torch.log(output) - torch.log(target)
112
+ if valid_mask is not None:
113
+ diff[~valid_mask] = 0
114
+ n = valid_mask.sum((-1, -2))
115
+ else:
116
+ n = output.shape[-1] * output.shape[-2]
117
+ diff2 = torch.pow(diff, 2)
118
+ mse = torch.sum(diff2, (-1, -2)) / n # [B]
119
+ rmse = torch.sqrt(mse)
120
+ return rmse.mean()
121
+
122
+
123
+
124
+
125
+ # adapt from: https://github.com/imran3180/depth-map-prediction/blob/master/main.py
126
+ def threshold_percentage(output, target, threshold_val, valid_mask=None):
127
+ d1 = output / target
128
+ d2 = target / output
129
+ max_d1_d2 = torch.max(d1, d2)
130
+ bit_mat = (max_d1_d2 < threshold_val).to(output.dtype)
131
+ if valid_mask is not None:
132
+ bit_mat = bit_mat * valid_mask.to(output.dtype)
133
+ n = valid_mask.sum((-1, -2))
134
+ else:
135
+ n = torch.tensor(output.shape[-1] * output.shape[-2], device=output.device)
136
+ n = torch.clamp(n, min=1)
137
+ count_mat = torch.sum(bit_mat, (-1, -2))
138
+ threshold_mat = count_mat / n.to(count_mat.dtype)
139
+ return threshold_mat.mean()
140
+
141
+
142
+ def delta1_acc(pred, gt, valid_mask):
143
+ return threshold_percentage(pred, gt, 1.25, valid_mask)
144
+
145
+
146
+ def delta2_acc(pred, gt, valid_mask):
147
+ return threshold_percentage(pred, gt, 1.25**2, valid_mask)
148
+
149
+
150
+ def delta3_acc(pred, gt, valid_mask):
151
+ return threshold_percentage(pred, gt, 1.25**3, valid_mask)
FE2E/infer/util/normal_utils.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import torch
4
+ import torch.nn.functional as F
5
+ import torch.distributed as dist
6
+
7
+
8
+ def get_padding(orig_H, orig_W):
9
+ """ returns how the input of shape (orig_H, orig_W) should be padded
10
+ this ensures that both H and W are divisible by 32
11
+ """
12
+ if orig_W % 32 == 0:
13
+ l = 0
14
+ r = 0
15
+ else:
16
+ new_W = 32 * ((orig_W // 32) + 1)
17
+ l = (new_W - orig_W) // 2
18
+ r = (new_W - orig_W) - l
19
+
20
+ if orig_H % 32 == 0:
21
+ t = 0
22
+ b = 0
23
+ else:
24
+ new_H = 32 * ((orig_H // 32) + 1)
25
+ t = (new_H - orig_H) // 2
26
+ b = (new_H - orig_H) - t
27
+ return l, r, t, b
28
+
29
+ def pad_input(img, intrins, lrtb=(0,0,0,0)):
30
+ """ pad input image
31
+ img should be a torch tensor of shape (B, 3, H, W)
32
+ intrins should be a torch tensor of shape (B, 3, 3)
33
+ """
34
+ l, r, t, b = lrtb
35
+ if l+r+t+b != 0:
36
+ pad_value_R = (0 - 0.485) / 0.229
37
+ pad_value_G = (0 - 0.456) / 0.224
38
+ pad_value_B = (0 - 0.406) / 0.225
39
+
40
+ img_R = F.pad(img[:,0:1,:,:], (l, r, t, b), mode="constant", value=pad_value_R)
41
+ img_G = F.pad(img[:,1:2,:,:], (l, r, t, b), mode="constant", value=pad_value_G)
42
+ img_B = F.pad(img[:,2:3,:,:], (l, r, t, b), mode="constant", value=pad_value_B)
43
+
44
+ img = torch.cat([img_R, img_G, img_B], dim=1)
45
+
46
+ if intrins is not None:
47
+ intrins[:, 0, 2] += l
48
+ intrins[:, 1, 2] += t
49
+ return img, intrins
50
+
51
+ def compute_normal_error(pred_norm, gt_norm):
52
+ """ compute per-pixel surface normal error in degrees
53
+ NOTE: pred_norm and gt_norm should be torch tensors of shape (B, 3, ...)
54
+ """
55
+ pred_error = torch.cosine_similarity(pred_norm, gt_norm, dim=1)
56
+ pred_error = torch.clamp(pred_error, min=-1.0, max=1.0)
57
+ pred_error = torch.acos(pred_error) * 180.0 / np.pi
58
+ pred_error = pred_error.unsqueeze(1) # (B, 1, ...)
59
+ return pred_error
60
+
61
+ def compute_normal_metrics(total_normal_errors):
62
+ """ compute surface normal metrics (used for benchmarking)
63
+ NOTE: total_normal_errors should be a 1D torch tensor of errors in degrees
64
+ """
65
+ total_normal_errors = total_normal_errors.detach().cpu().numpy()
66
+ num_pixels = total_normal_errors.shape[0]
67
+
68
+ metrics = {
69
+ 'mean': np.average(total_normal_errors),
70
+ 'median': np.median(total_normal_errors),
71
+ 'rmse': np.sqrt(np.sum(total_normal_errors * total_normal_errors) / num_pixels),
72
+ 'a1': 100.0 * (np.sum(total_normal_errors < 5) / num_pixels),
73
+ 'a2': 100.0 * (np.sum(total_normal_errors < 7.5) / num_pixels),
74
+ 'a3': 100.0 * (np.sum(total_normal_errors < 11.25) / num_pixels),
75
+ 'a4': 100.0 * (np.sum(total_normal_errors < 22.5) / num_pixels),
76
+ 'a5': 100.0 * (np.sum(total_normal_errors < 30) / num_pixels)
77
+ }
78
+ return metrics
FE2E/infer/visualize.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import cv2
2
+ import numpy as np
3
+
4
+ import torch
5
+
6
+ from matplotlib import cm
7
+ import matplotlib.pyplot as plt
8
+
9
+ import logging
10
+ logger = logging.getLogger('root')
11
+
12
+
13
+
14
+ def tensor_to_numpy(tensor_in):
15
+ """ torch tensor to numpy array
16
+ """
17
+ if tensor_in is not None:
18
+ if tensor_in.ndim == 3:
19
+ # (C, H, W) -> (H, W, C)
20
+ tensor_in = tensor_in.detach().cpu().permute(1, 2, 0).numpy()
21
+ elif tensor_in.ndim == 4:
22
+ # (B, C, H, W) -> (B, H, W, C)
23
+ tensor_in = tensor_in.detach().cpu().permute(0, 2, 3, 1).numpy()
24
+ else:
25
+ raise Exception('invalid tensor size')
26
+ return tensor_in
27
+
28
+ # def unnormalize(img_in, img_stats={'mean': [0.485, 0.456, 0.406],
29
+ # 'std': [0.229, 0.224, 0.225]}):
30
+ def unnormalize(img_in, img_stats={'mean': [0.5,0.5,0.5], 'std': [0.5,0.5,0.5]}):
31
+ """ unnormalize input image
32
+ """
33
+ if torch.is_tensor(img_in):
34
+ img_in = tensor_to_numpy(img_in)
35
+
36
+ # 检查输入图像的数值范围,决定是否需要去归一化
37
+ img_min, img_max = img_in.min(), img_in.max()
38
+
39
+ # 如果图像已经在[0,1]范围内,直接转换为[0,255]
40
+ if img_min >= -0.1 and img_max <= 1.1: # 允许小的浮点误差
41
+ img_out = np.clip(img_in, 0, 1)
42
+ img_out = (img_out * 255.0).astype(np.uint8)
43
+ else:
44
+ # 如果图像在[-1,1]或其他归一化范围内,进行标准去归一化
45
+ img_out = np.zeros_like(img_in)
46
+ for ich in range(3):
47
+ img_out[..., ich] = img_in[..., ich] * img_stats['std'][ich]
48
+ img_out[..., ich] += img_stats['mean'][ich]
49
+ img_out = np.clip(img_out, 0, 1)
50
+ img_out = (img_out * 255.0).astype(np.uint8)
51
+
52
+ return img_out
53
+
54
+ def normal_to_rgb(normal, normal_mask=None):
55
+ """ surface normal map to RGB
56
+ (used for visualization)
57
+
58
+ NOTE: x, y, z are mapped to R, G, B
59
+ NOTE: [-1, 1] are mapped to [0, 255]
60
+ """
61
+ if torch.is_tensor(normal):
62
+ normal = tensor_to_numpy(normal)
63
+ normal_mask = tensor_to_numpy(normal_mask)
64
+
65
+ normal_norm = np.linalg.norm(normal, axis=-1, keepdims=True)
66
+ normal_norm[normal_norm < 1e-12] = 1e-12
67
+ normal = normal / normal_norm
68
+
69
+ normal_rgb = (((normal + 1) * 0.5) * 255).astype(np.uint8)
70
+ if normal_mask is not None:
71
+ normal_rgb = normal_rgb * normal_mask # (B, H, W, 3)
72
+ return normal_rgb
73
+
74
+ def kappa_to_alpha(pred_kappa, to_numpy=True):
75
+ """ Confidence kappa to uncertainty alpha
76
+ Assuming AngMF distribution (introduced in https://arxiv.org/abs/2109.09881)
77
+ """
78
+ if torch.is_tensor(pred_kappa) and to_numpy:
79
+ pred_kappa = tensor_to_numpy(pred_kappa)
80
+
81
+ if torch.is_tensor(pred_kappa):
82
+ alpha = ((2 * pred_kappa) / ((pred_kappa ** 2.0) + 1)) \
83
+ + ((torch.exp(- pred_kappa * np.pi) * np.pi) / (1 + torch.exp(- pred_kappa * np.pi)))
84
+ alpha = torch.rad2deg(alpha)
85
+ else:
86
+ alpha = ((2 * pred_kappa) / ((pred_kappa ** 2.0) + 1)) \
87
+ + ((np.exp(- pred_kappa * np.pi) * np.pi) / (1 + np.exp(- pred_kappa * np.pi)))
88
+ alpha = np.degrees(alpha)
89
+
90
+ return alpha
91
+
92
+
93
+ def visualize_normal(target_dir, prefixs, img, pred_norm, pred_kappa,
94
+ gt_norm, gt_norm_mask, pred_error, num_vis=-1):
95
+ """ visualize normal
96
+ """
97
+ error_max = 60.0
98
+
99
+ # img = tensor_to_numpy(img) # (B, H, W, 3)
100
+ pred_norm = tensor_to_numpy(pred_norm) # (B, H, W, 3)
101
+ # pred_kappa = tensor_to_numpy(pred_kappa) # (B, H, W, 1)
102
+ gt_norm = tensor_to_numpy(gt_norm) # (B, H, W, 3)
103
+ gt_norm_mask = tensor_to_numpy(gt_norm_mask) # (B, H, W, 1)
104
+ pred_error = tensor_to_numpy(pred_error) # (B, H, W, 1)
105
+
106
+ num_vis = len(prefixs) if num_vis == -1 else num_vis
107
+ for i in range(num_vis):
108
+ # # img
109
+ # img_ = unnormalize(img[i, ...])
110
+ # target_path = '%s/%s_img.png' % (target_dir, prefixs[i])
111
+ # plt.imsave(target_path, img_)
112
+
113
+ # pred_norm
114
+ target_path = '%s/%s_norm.png' % (target_dir, prefixs[i])
115
+ plt.imsave(target_path, normal_to_rgb(pred_norm[i, ...]))
116
+
117
+ # # pred_kappa
118
+ # if pred_kappa is not None:
119
+ # pred_alpha = kappa_to_alpha(pred_kappa[i, :, :, 0])
120
+ # target_path = '%s/%s_pred_alpha.png' % (target_dir, prefixs[i])
121
+ # plt.imsave(target_path, pred_alpha, vmin=0.0, vmax=error_max, cmap='jet')
122
+
123
+ # gt_norm, pred_error
124
+ if gt_norm is not None:
125
+ target_path = '%s/%s_gt.png' % (target_dir, prefixs[i])
126
+ # plt.imsave(target_path, normal_to_rgb(gt_norm[i, ...], gt_norm_mask[i, ...]))
127
+
128
+ E = pred_error[i, :, :, 0] * gt_norm_mask[i, :, :, 0]
129
+ target_path = '%s/%s_pred_error.png' % (target_dir, prefixs[i])
130
+ plt.imsave(target_path, E, vmin=0, vmax=error_max, cmap='jet')
FE2E/library/__init__.py ADDED
File without changes