Alfred Liu commited on
Commit
3403979
·
1 Parent(s): 90720f7

Support visualization (#25)

Browse files
README.md CHANGED
@@ -13,6 +13,7 @@ This is the official PyTorch implementation for our ICCV 2023 paper:
13
 
14
  ## News
15
 
 
16
  * 2023-09-23: We release [the native PyTorch implementation of sparse sampling](https://github.com/MCG-NJU/SparseBEV/blob/97c8c798284555accedd0625395dd397fa4511d2/models/csrc/wrapper.py#L14). You can use this version if you encounter problems when compiling CUDA operators. It’s only about 15% slower.
17
  * 2023-08-21: We release the paper, code and pretrained weights.
18
  * 2023-07-14: SparseBEV is accepted to ICCV 2023.
@@ -90,7 +91,9 @@ data/nuscenes
90
  ├── maps
91
  ├── nuscenes_infos_test_sweep.pkl
92
  ├── nuscenes_infos_train_sweep.pkl
 
93
  ├── nuscenes_infos_val_sweep.pkl
 
94
  ├── samples
95
  ├── sweeps
96
  ├── v1.0-test
@@ -149,6 +152,20 @@ export CUDA_VISIBLE_DEVICES=0
149
  python timing.py --config configs/r50_nuimg_704x256.py --weights checkpoints/r50_nuimg_704x256.pth
150
  ```
151
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
152
  ## Acknowledgements
153
 
154
  Many thanks to these excellent open-source projects:
 
13
 
14
  ## News
15
 
16
+ * 2023-10-20: We provide code for visualizing the predictions and the sampling points, as requested in [#25](https://github.com/MCG-NJU/SparseBEV/issues/25).
17
  * 2023-09-23: We release [the native PyTorch implementation of sparse sampling](https://github.com/MCG-NJU/SparseBEV/blob/97c8c798284555accedd0625395dd397fa4511d2/models/csrc/wrapper.py#L14). You can use this version if you encounter problems when compiling CUDA operators. It’s only about 15% slower.
18
  * 2023-08-21: We release the paper, code and pretrained weights.
19
  * 2023-07-14: SparseBEV is accepted to ICCV 2023.
 
91
  ├── maps
92
  ├── nuscenes_infos_test_sweep.pkl
93
  ├── nuscenes_infos_train_sweep.pkl
94
+ ├── nuscenes_infos_train_mini_sweep.pkl
95
  ├── nuscenes_infos_val_sweep.pkl
96
+ ├── nuscenes_infos_val_mini_sweep.pkl
97
  ├── samples
98
  ├── sweeps
99
  ├── v1.0-test
 
152
  python timing.py --config configs/r50_nuimg_704x256.py --weights checkpoints/r50_nuimg_704x256.pth
153
  ```
154
 
155
+ ## Visualization
156
+
157
+ Visualize the predicted bbox:
158
+
159
+ ```
160
+ python viz_bbox_predictions.py --config configs/r50_nuimg_704x256.py --weights checkpoints/r50_nuimg_704x256.pth
161
+ ```
162
+
163
+ Visualize the sampling points (like Fig. 6 in the paper):
164
+
165
+ ```
166
+ python viz_sample_points.py --config configs/r50_nuimg_704x256.py --weights checkpoints/r50_nuimg_704x256.pth
167
+ ```
168
+
169
  ## Acknowledgements
170
 
171
  Many thanks to these excellent open-source projects:
models/sparsebev_sampling.py CHANGED
@@ -80,9 +80,9 @@ def sampling_4d(sample_points, mlvl_feats, scale_weights, lidar2img, image_h, im
80
 
81
  # for visualization only
82
  if DUMP.enabled:
83
- torch.save(torch.cat([sample_points_cam, homo_nonzero], dim=-1),
84
  '{}/sample_points_cam_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
85
- torch.save(valid_mask,
86
  '{}/sample_points_cam_valid_mask_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
87
 
88
  valid_mask = valid_mask.permute(0, 1, 3, 4, 2) # [B, T, Q, GP, N]
 
80
 
81
  # for visualization only
82
  if DUMP.enabled:
83
+ torch.save(torch.cat([sample_points_cam, homo_nonzero], dim=-1).cpu(),
84
  '{}/sample_points_cam_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
85
+ torch.save(valid_mask.cpu(),
86
  '{}/sample_points_cam_valid_mask_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
87
 
88
  valid_mask = valid_mask.permute(0, 1, 3, 4, 2) # [B, T, Q, GP, N]
models/sparsebev_transformer.py CHANGED
@@ -186,9 +186,9 @@ class SparseBEVTransformerDecoderLayer(BaseModule):
186
  query_bbox_dec = decode_bbox(query_bbox, self.pc_range)
187
  bbox_pred_dec = decode_bbox(bbox_pred, self.pc_range)
188
  cls_score_sig = torch.sigmoid(cls_score)
189
- torch.save(query_bbox_dec, '{}/query_bbox_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
190
- torch.save(bbox_pred_dec, '{}/bbox_pred_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
191
- torch.save(cls_score_sig, '{}/cls_score_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
192
 
193
  return query_feat, cls_score, bbox_pred
194
 
@@ -216,7 +216,7 @@ class SparseBEVSelfAttention(BaseModule):
216
  tau = self.gen_tau(query_feat) # [B, Q, 8]
217
 
218
  if DUMP.enabled:
219
- torch.save(tau, '{}/sasa_tau_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
220
 
221
  tau = tau.permute(0, 2, 1) # [B, 8, Q]
222
  attn_mask = dist[:, None, :, :] * tau[..., None] # [B, 8, Q, Q]
 
186
  query_bbox_dec = decode_bbox(query_bbox, self.pc_range)
187
  bbox_pred_dec = decode_bbox(bbox_pred, self.pc_range)
188
  cls_score_sig = torch.sigmoid(cls_score)
189
+ torch.save(query_bbox_dec.cpu(), '{}/query_bbox_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
190
+ torch.save(bbox_pred_dec.cpu(), '{}/bbox_pred_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
191
+ torch.save(cls_score_sig.cpu(), '{}/cls_score_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
192
 
193
  return query_feat, cls_score, bbox_pred
194
 
 
216
  tau = self.gen_tau(query_feat) # [B, Q, 8]
217
 
218
  if DUMP.enabled:
219
+ torch.save(tau.cpu(), '{}/sasa_tau_stage{}.pth'.format(DUMP.out_dir, DUMP.stage_count))
220
 
221
  tau = tau.permute(0, 2, 1) # [B, 8, Q]
222
  attn_mask = dist[:, None, :, :] * tau[..., None] # [B, 8, Q, Q]
models/utils.py CHANGED
@@ -1,3 +1,4 @@
 
1
  import torch
2
  import torch.nn as nn
3
  import torch.nn.functional as F
@@ -308,7 +309,7 @@ class GpuPhotoMetricDistortion:
308
  class DumpConfig:
309
  def __init__(self):
310
  self.enabled = False
311
- self.out_dir = 'outputs'
312
  self.stage_count = 0
313
  self.frame_count = 0
314
 
 
1
+ import tempfile
2
  import torch
3
  import torch.nn as nn
4
  import torch.nn.functional as F
 
309
  class DumpConfig:
310
  def __init__(self):
311
  self.enabled = False
312
+ self.out_dir = tempfile.mkdtemp()
313
  self.stage_count = 0
314
  self.frame_count = 0
315
 
viz_bbox_predictions.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import utils
2
+ import logging
3
+ import argparse
4
+ import importlib
5
+ import torch
6
+ import numpy as np
7
+ import matplotlib.pyplot as plt
8
+ from matplotlib.gridspec import GridSpec
9
+ from PIL import Image
10
+ from mmcv import Config, DictAction
11
+ from mmcv.parallel import MMDataParallel
12
+ from mmcv.runner import load_checkpoint
13
+ from mmdet.apis import set_random_seed
14
+ from mmdet3d.datasets import build_dataset, build_dataloader
15
+ from mmdet3d.models import build_model
16
+ from nuscenes.utils.data_classes import Box
17
+ from pyquaternion import Quaternion
18
+ from nuscenes.nuscenes import NuScenes
19
+ from nuscenes.utils.geometry_utils import box_in_image
20
+ from configs.r50_nuimg_704x256 import class_names
21
+ from models.utils import VERSION
22
+
23
+
24
+ classname_to_color = { # RGB
25
+ 'car': (255, 158, 0), # Orange
26
+ 'pedestrian': (0, 0, 230), # Blue
27
+ 'trailer': (255, 140, 0), # Darkorange
28
+ 'truck': (255, 99, 71), # Tomato
29
+ 'bus': (255, 127, 80), # Coral
30
+ 'motorcycle': (255, 61, 99), # Red
31
+ 'construction_vehicle': (233, 150, 70), # Darksalmon
32
+ 'bicycle': (220, 20, 60), # Crimson
33
+ 'barrier': (112, 128, 144), # Slategrey
34
+ 'traffic_cone': (47, 79, 79), # Darkslategrey
35
+ }
36
+
37
+
38
+ def convert_to_nusc_box(bboxes, scores=None, labels=None, names=None, score_threshold=0.3, lift_center=False):
39
+ results = []
40
+ for q in range(bboxes.shape[0]):
41
+ if scores is not None:
42
+ score = scores[q]
43
+ else:
44
+ score = 1.0
45
+
46
+ if score < score_threshold:
47
+ continue
48
+
49
+ if labels is not None:
50
+ label = labels[q]
51
+ else:
52
+ label = 0
53
+
54
+ if names is not None:
55
+ name = names[q]
56
+ else:
57
+ name = class_names[label]
58
+
59
+ if name not in class_names:
60
+ name = class_names[-1]
61
+
62
+ bbox = bboxes[q].copy()
63
+ if lift_center:
64
+ bbox[2] += bbox[5] * 0.5
65
+
66
+ orientation = Quaternion(axis=[0, 0, 1], radians=bbox[6])
67
+
68
+ box = Box(
69
+ center=[bbox[0], bbox[1], bbox[2]],
70
+ size=[bbox[4], bbox[3], bbox[5]],
71
+ orientation=orientation,
72
+ score=score,
73
+ label=label,
74
+ velocity=(bbox[7], bbox[8], 0),
75
+ name=name
76
+ )
77
+
78
+ results.append(box)
79
+
80
+ return results
81
+
82
+
83
+ def viz_bbox(nusc, bboxes, data_info, fig, gs):
84
+ cam_types = [
85
+ 'CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT',
86
+ 'CAM_BACK_RIGHT', 'CAM_BACK', 'CAM_BACK_LEFT',
87
+ ]
88
+
89
+ for cam_id, cam_type in enumerate(cam_types):
90
+ sample_data_token = nusc.get('sample', data_info['token'])['data'][cam_type]
91
+
92
+ sd_record = nusc.get('sample_data', sample_data_token)
93
+ cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
94
+ intrinsic = np.array(cs_record['camera_intrinsic'])
95
+
96
+ img_path = nusc.get_sample_data_path(sample_data_token)
97
+ img_size = (sd_record['width'], sd_record['height'])
98
+
99
+ ax = fig.add_subplot(gs[cam_id // 3, cam_id % 3])
100
+ ax.imshow(Image.open(img_path))
101
+
102
+ for bbox in bboxes:
103
+ bbox = bbox.copy()
104
+
105
+ # Move box to ego vehicle coord system
106
+ bbox.rotate(Quaternion(data_info['lidar2ego_rotation']))
107
+ bbox.translate(np.array(data_info['lidar2ego_translation']))
108
+
109
+ # Move box to sensor coord system
110
+ bbox.translate(-np.array(cs_record['translation']))
111
+ bbox.rotate(Quaternion(cs_record['rotation']).inverse)
112
+
113
+ if box_in_image(bbox, intrinsic, img_size):
114
+ c = np.array(classname_to_color[bbox.name]) / 255.0
115
+ bbox.render(ax, view=intrinsic, normalize=True, colors=(c, c, c), linewidth=1)
116
+
117
+ ax.axis('off')
118
+ ax.set_title(cam_type)
119
+ ax.set_xlim(0, img_size[0])
120
+ ax.set_ylim(img_size[1], 0)
121
+
122
+ sample = nusc.get('sample', data_info['token'])
123
+ lidar_data_token = sample['data']['LIDAR_TOP']
124
+
125
+ ax = fig.add_subplot(gs[0:2, 3])
126
+ nusc.explorer.render_sample_data(lidar_data_token, with_anns=False, ax=ax, verbose=False)
127
+ ax.axis('off')
128
+ ax.set_title('LIDAR_TOP')
129
+ ax.set_xlim(-40, 40)
130
+ ax.set_ylim(-40, 40)
131
+
132
+ sd_record = nusc.get('sample_data', lidar_data_token)
133
+ pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
134
+ cs_record = nusc.get('calibrated_sensor', sd_record['calibrated_sensor_token'])
135
+
136
+ for bbox in bboxes:
137
+ bbox = bbox.copy()
138
+
139
+ bbox.rotate(Quaternion(cs_record['rotation']))
140
+ bbox.translate(np.array(cs_record['translation']))
141
+ bbox.rotate(Quaternion(pose_record['rotation']))
142
+
143
+ yaw = Quaternion(pose_record['rotation']).yaw_pitch_roll[0]
144
+ bbox.rotate(Quaternion(scalar=np.cos(yaw / 2), vector=[0, 0, np.sin(yaw / 2)]).inverse)
145
+
146
+ c = np.array(classname_to_color[bbox.name]) / 255.0
147
+ bbox.render(ax, view=np.eye(4), colors=(c, c, c))
148
+
149
+
150
+ def main():
151
+ parser = argparse.ArgumentParser(description='Validate a detector')
152
+ parser.add_argument('--config', required=True)
153
+ parser.add_argument('--weights', required=True)
154
+ parser.add_argument('--override', nargs='+', action=DictAction)
155
+ parser.add_argument('--score_threshold', default=0.3)
156
+ args = parser.parse_args()
157
+
158
+ # parse configs
159
+ cfgs = Config.fromfile(args.config)
160
+ if args.override is not None:
161
+ cfgs.merge_from_dict(args.override)
162
+
163
+ # use val-mini for visualization
164
+ cfgs.data.val.ann_file = cfgs.data.val.ann_file.replace('val', 'val_mini')
165
+
166
+ # register custom module
167
+ importlib.import_module('models')
168
+ importlib.import_module('loaders')
169
+
170
+ # MMCV, please shut up
171
+ from mmcv.utils.logging import logger_initialized
172
+ logger_initialized['root'] = logging.Logger(__name__, logging.WARNING)
173
+ logger_initialized['mmcv'] = logging.Logger(__name__, logging.WARNING)
174
+
175
+ # you need one GPU
176
+ assert torch.cuda.is_available()
177
+ assert torch.cuda.device_count() == 1
178
+
179
+ utils.init_logging(None, cfgs.debug)
180
+ logging.info('Using GPU: %s' % torch.cuda.get_device_name(0))
181
+ logging.info('Setting random seed: 0')
182
+ set_random_seed(0, deterministic=True)
183
+
184
+ logging.info('Loading validation set from %s' % cfgs.data.val.data_root)
185
+ val_dataset = build_dataset(cfgs.data.val)
186
+ val_loader = build_dataloader(
187
+ val_dataset,
188
+ samples_per_gpu=1,
189
+ workers_per_gpu=cfgs.data.workers_per_gpu,
190
+ num_gpus=1,
191
+ dist=False,
192
+ shuffle=False,
193
+ seed=0,
194
+ )
195
+
196
+ logging.info('Creating model: %s' % cfgs.model.type)
197
+ model = build_model(cfgs.model)
198
+ model.cuda()
199
+ model = MMDataParallel(model, [0])
200
+
201
+ logging.info('Loading checkpoint from %s' % args.weights)
202
+ checkpoint = load_checkpoint(
203
+ model, args.weights, map_location='cuda', strict=True,
204
+ logger=logging.Logger(__name__, logging.ERROR)
205
+ )
206
+
207
+ if 'version' in checkpoint:
208
+ VERSION.name = checkpoint['version']
209
+
210
+ logging.info('Initialize nuscenes toolkit...')
211
+ if 'mini' in cfgs.data.val.ann_file:
212
+ nusc = NuScenes(version='v1.0-mini', dataroot=cfgs.data.val.data_root, verbose=False)
213
+ else:
214
+ nusc = NuScenes(version='v1.0-trainval', dataroot=cfgs.data.val.data_root, verbose=False)
215
+
216
+ for i, data in enumerate(val_loader):
217
+ model.eval()
218
+
219
+ with torch.no_grad():
220
+ results = model(return_loss=False, rescale=True, **data)
221
+ results = results[0]['pts_bbox']
222
+
223
+ bboxes_pred = convert_to_nusc_box(
224
+ bboxes=results['boxes_3d'].tensor.numpy(),
225
+ scores=results['scores_3d'].numpy(),
226
+ labels=results['labels_3d'].numpy(),
227
+ score_threshold=args.score_threshold,
228
+ lift_center=True,
229
+ )
230
+
231
+ fig = plt.figure(figsize=(15.5, 5))
232
+ gs = GridSpec(2, 4, figure=fig)
233
+
234
+ viz_bbox(nusc, bboxes_pred, val_dataset.data_infos[i], fig, gs)
235
+
236
+ plt.tight_layout()
237
+ plt.savefig('outputs/bbox_%04d.jpg' % i, dpi=200)
238
+ plt.close()
239
+
240
+ logging.info('Visualized result is dumped to outputs/bbox_%04d.jpg' % i)
241
+
242
+
243
+ if __name__ == '__main__':
244
+ main()
viz_sample_points.py ADDED
@@ -0,0 +1,151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import utils
2
+ import logging
3
+ import argparse
4
+ import importlib
5
+ import torch
6
+ import numpy as np
7
+ import matplotlib.pyplot as plt
8
+ from PIL import Image
9
+ from mmcv import Config, DictAction
10
+ from mmcv.parallel import MMDataParallel
11
+ from mmcv.runner import load_checkpoint
12
+ from mmdet.apis import set_random_seed
13
+ from mmdet3d.datasets import build_dataset, build_dataloader
14
+ from mmdet3d.models import build_model
15
+ from models.utils import DUMP, VERSION
16
+
17
+
18
+ def main():
19
+ parser = argparse.ArgumentParser(description='Validate a detector')
20
+ parser.add_argument('--config', required=True)
21
+ parser.add_argument('--weights', required=True)
22
+ parser.add_argument('--override', nargs='+', action=DictAction)
23
+ parser.add_argument('--score_threshold', default=0.3)
24
+ parser.add_argument('--stage_id', default=5)
25
+ parser.add_argument('--num_frames', default=3)
26
+ parser.add_argument('--num_views', default=6)
27
+ args = parser.parse_args()
28
+
29
+ # parse configs
30
+ cfgs = Config.fromfile(args.config)
31
+ if args.override is not None:
32
+ cfgs.merge_from_dict(args.override)
33
+
34
+ # use val-mini for visualization
35
+ cfgs.data.val.ann_file = cfgs.data.val.ann_file.replace('val', 'val_mini')
36
+
37
+ # register custom module
38
+ importlib.import_module('models')
39
+ importlib.import_module('loaders')
40
+
41
+ # MMCV, please shut up
42
+ from mmcv.utils.logging import logger_initialized
43
+ logger_initialized['root'] = logging.Logger(__name__, logging.WARNING)
44
+ logger_initialized['mmcv'] = logging.Logger(__name__, logging.WARNING)
45
+
46
+ # you need one GPU
47
+ assert torch.cuda.is_available()
48
+ assert torch.cuda.device_count() == 1
49
+
50
+ utils.init_logging(None, cfgs.debug)
51
+
52
+ logging.info('Using GPU: %s' % torch.cuda.get_device_name(0))
53
+ logging.info('Setting random seed: 0')
54
+ set_random_seed(0, deterministic=True)
55
+
56
+ logging.info('Loading validation set from %s' % cfgs.data.val.data_root)
57
+ val_dataset = build_dataset(cfgs.data.val)
58
+ val_loader = build_dataloader(
59
+ val_dataset,
60
+ samples_per_gpu=1,
61
+ workers_per_gpu=2,
62
+ num_gpus=1,
63
+ dist=False,
64
+ shuffle=False,
65
+ seed=0,
66
+ )
67
+
68
+ logging.info('Creating model: %s' % cfgs.model.type)
69
+ model = build_model(cfgs.model)
70
+ model.cuda()
71
+ model = MMDataParallel(model, [0])
72
+
73
+ logging.info('Loading checkpoint from %s' % args.weights)
74
+ checkpoint = load_checkpoint(
75
+ model, args.weights, map_location='cuda', strict=True,
76
+ logger=logging.Logger(__name__, logging.ERROR)
77
+ )
78
+
79
+ if 'version' in checkpoint:
80
+ VERSION.name = checkpoint['version']
81
+
82
+ for idx, data in enumerate(val_loader):
83
+ DUMP.enabled = True
84
+ model.eval()
85
+
86
+ with torch.no_grad():
87
+ model(return_loss=False, rescale=True, **data)
88
+
89
+ cls_scores = torch.load('{}/cls_score_stage{}.pth'.format(DUMP.out_dir, args.stage_id))[0]
90
+ cls_scores, cls_ids = torch.max(cls_scores, dim=-1)
91
+
92
+ # only select queries with high confidence
93
+ query_ids = torch.where(cls_scores > args.score_threshold)[0]
94
+ cls_scores, cls_ids = cls_scores[query_ids], cls_ids[query_ids]
95
+
96
+ plt.figure(figsize=(240, 49))
97
+ view_mapping = [1, 2, 0, 4, 5, 3]
98
+
99
+ for frame_id in range(args.num_frames):
100
+ sample_points_cam = torch.load(
101
+ '{}/sample_points_cam_stage{}.pth'.format(DUMP.out_dir, args.stage_id)
102
+ ) # [1, 8f, 6view, 900, 32, 2]
103
+ valid_mask = torch.load(
104
+ '{}/sample_points_cam_valid_mask_stage{}.pth'.format(DUMP.out_dir, args.stage_id)
105
+ ) # [1, 8f, 6view, 900, 32]
106
+
107
+ for view_id in range(args.num_views):
108
+ filenames = data['img_metas'][0].data[0][0]['filename']
109
+ filename = filenames[frame_id * 6 + view_id]
110
+
111
+ # crop 1600x640 area
112
+ img = Image.open(filename)
113
+ img = img.crop((0, 260, 1600, 900))
114
+
115
+ # plot image
116
+ plot_id = frame_id * args.num_views + view_mapping[view_id] + 1
117
+ ax = plt.subplot(args.num_frames, args.num_views, plot_id)
118
+ ax.imshow(img)
119
+ ax.axis('off')
120
+ ax.set_xlim(0, 1600)
121
+ ax.set_ylim(640, 0)
122
+
123
+ # plot the sampling points for each query
124
+ for query_id in query_ids:
125
+ xyz = sample_points_cam[0, frame_id, view_id, query_id].numpy() # [32, 3]
126
+ mask = valid_mask[0, frame_id, view_id, query_id].numpy() # [32]
127
+ mask = np.round(mask).astype(bool)
128
+
129
+ cx = xyz[:, 0] * 1600
130
+ cy = xyz[:, 1] * 640
131
+ cz = xyz[:, 2]
132
+
133
+ cz[np.where(cz <= 0)] = 1e8
134
+ cz = np.log(60 / cz ** 0.8) * 2.4
135
+ cx, cy, cz = cx[mask], cy[mask], cz[mask]
136
+
137
+ if len(cz) == 0:
138
+ continue
139
+
140
+ ax.scatter(cx, cy, s=4**(cz + 1), alpha=0.7, color='C%d' % (query_id % 5))
141
+
142
+ plt.tight_layout()
143
+ plt.subplots_adjust(hspace=0.01, wspace=0.01)
144
+ plt.savefig('outputs/sp_%04d.jpg' % idx, dpi=20)
145
+ plt.close()
146
+
147
+ logging.info('Visualized result is dumped to outputs/sp_%04d.jpg' % idx)
148
+
149
+
150
+ if __name__ == '__main__':
151
+ main()