bdck commited on
Commit
dccd28a
·
verified ·
1 Parent(s): e30e254

Upload point_sam/model/common.py

Browse files
Files changed (1) hide show
  1. point_sam/model/common.py +602 -0
point_sam/model/common.py ADDED
@@ -0,0 +1,602 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # https://github.com/baaivision/Uni3D/blob/main/models/point_encoder.py
2
+ # Modified: torkit3d dependencies replaced with pure-PyTorch implementations
3
+ from typing import Union
4
+
5
+ import torch
6
+ from torch import nn
7
+ from torch.nn import functional as F
8
+
9
+
10
+ def sample_farthest_points(points: torch.Tensor, num_samples: int):
11
+ """Pure PyTorch farthest point sampling (FPS).
12
+
13
+ Args:
14
+ points: [B, N, 3]. Input point clouds.
15
+ num_samples: int. Number of points to sample.
16
+
17
+ Returns:
18
+ torch.Tensor: [B, num_samples]. Indices of sampled points.
19
+ """
20
+ device = points.device
21
+ batch_size, num_points, _ = points.shape
22
+ indices = torch.zeros(batch_size, num_samples, dtype=torch.long, device=device)
23
+ distances = torch.ones(batch_size, num_points, device=device) * float('inf')
24
+
25
+ # Start from a random point
26
+ farthest = torch.randint(0, num_points, (batch_size,), dtype=torch.long, device=device)
27
+
28
+ for i in range(num_samples):
29
+ indices[:, i] = farthest
30
+ centroid = points[torch.arange(batch_size, device=device), farthest, :].view(batch_size, 1, 3)
31
+ dist = torch.sum((points - centroid) ** 2, -1)
32
+ mask = dist < distances
33
+ distances[mask] = dist[mask]
34
+ farthest = torch.max(distances, dim=1)[1]
35
+
36
+ return indices
37
+
38
+
39
+ def batch_index_select(data: torch.Tensor, index: torch.Tensor, dim: int):
40
+ """Batch index select — pure PyTorch implementation.
41
+
42
+ Args:
43
+ data: [B, N, C] tensor.
44
+ index: [B, K] indices.
45
+ dim: dimension to index along (after batch dim).
46
+
47
+ Returns:
48
+ torch.Tensor: [B, K, C] selected values.
49
+ """
50
+ batch_size = data.shape[0]
51
+ view_shape = [1] * data.dim()
52
+ view_shape[0] = batch_size
53
+ view_shape[dim] = -1
54
+ index = index.view(view_shape)
55
+ shape = list(data.shape)
56
+ shape[dim] = index.shape[dim]
57
+ index = index.expand(shape)
58
+ return torch.gather(data, dim, index)
59
+
60
+
61
+ def chamfer_distance(x: torch.Tensor, y: torch.Tensor):
62
+ """Compute chamfer distance between two point clouds.
63
+
64
+ Args:
65
+ x: [B, N, 3]
66
+ y: [B, M, 3]
67
+
68
+ Returns:
69
+ min_dists_x: [B, N] minimum distances from x to y
70
+ min_idx_x: [B, N] indices of nearest neighbors in y
71
+ """
72
+ # x: [B, N, 3], y: [B, M, 3]
73
+ dist = torch.cdist(x, y) # [B, N, M]
74
+ min_dists, min_idx = torch.min(dist, dim=2)
75
+ return min_dists, min_idx
76
+
77
+
78
+ def fps(points: torch.Tensor, num_samples: int):
79
+ """A wrapper of farthest point sampling (FPS).
80
+
81
+ Args:
82
+ points: [B, N, 3]. Input point clouds.
83
+ num_samples: int. The number of points to sample.
84
+
85
+ Returns:
86
+ torch.Tensor: [B, num_samples, 3]. Sampled points.
87
+ """
88
+ idx = sample_farthest_points(points, num_samples)
89
+ sampled_points = batch_index_select(points, idx, dim=1)
90
+ return sampled_points
91
+
92
+
93
+ def knn_points(
94
+ query: torch.Tensor,
95
+ key: torch.Tensor,
96
+ k: int,
97
+ sorted: bool = False,
98
+ transpose: bool = False,
99
+ ):
100
+ """Compute k nearest neighbors.
101
+
102
+ Args:
103
+ query: [B, N1, D], query points. [B, D, N1] if @transpose is True.
104
+ key: [B, N2, D], key points. [B, D, N2] if @transpose is True.
105
+ k: the number of nearest neighbors.
106
+ sorted: whether to sort the results
107
+ transpose: whether to transpose the last two dimensions.
108
+
109
+ Returns:
110
+ torch.Tensor: [B, N1, K], distances to the k nearest neighbors in the key.
111
+ torch.Tensor: [B, N1, K], indices of the k nearest neighbors in the key.
112
+ """
113
+ if transpose:
114
+ query = query.transpose(1, 2)
115
+ key = key.transpose(1, 2)
116
+ # Compute pairwise distances, [B, N1, N2]
117
+ distance = torch.cdist(query, key)
118
+ if k == 1:
119
+ knn_dist, knn_ind = torch.min(distance, dim=2, keepdim=True)
120
+ else:
121
+ knn_dist, knn_ind = torch.topk(distance, k, dim=2, largest=False, sorted=sorted)
122
+ return knn_dist, knn_ind
123
+
124
+
125
+ class KNNGrouper(nn.Module):
126
+ """Group points based on K nearest neighbors.
127
+
128
+ A number of points are sampled as centers by farthest point sampling (FPS).
129
+ Each group is formed by the center and its k nearest neighbors.
130
+ """
131
+
132
+ def __init__(self, num_groups, group_size, radius=None, centralize_features=False):
133
+ super().__init__()
134
+ self.num_groups = num_groups
135
+ self.group_size = group_size
136
+ self.radius = radius
137
+ self.centralize_features = centralize_features
138
+
139
+ def forward(self, xyz: torch.Tensor, features: torch.Tensor, use_fps=True):
140
+ """
141
+ Args:
142
+ xyz: [B, N, 3]. Input point clouds.
143
+ features: [B, N, C]. Point features.
144
+ use_fps: bool. Whether to use farthest point sampling.
145
+ If not, `xyz` should already be sampled by FPS.
146
+
147
+ Returns:
148
+ dict: {
149
+ features: [B, G, K, 3 + C]. Group features.
150
+ centers: [B, G, 3]. Group centers.
151
+ knn_idx: [B, G, K]. The indices of k nearest neighbors.
152
+ }
153
+ """
154
+ batch_size, num_points, _ = xyz.shape
155
+ with torch.no_grad():
156
+ if use_fps:
157
+ fps_idx = sample_farthest_points(xyz.float(), self.num_groups)
158
+ centers = batch_index_select(xyz, fps_idx, dim=1)
159
+ else:
160
+ fps_idx = torch.arange(self.num_groups, device=xyz.device)
161
+ fps_idx = fps_idx.expand(batch_size, -1)
162
+ centers = xyz[:, : self.num_groups]
163
+ _, knn_idx = knn_points(centers, xyz, self.group_size) # [B, G, K]
164
+
165
+ batch_offset = torch.arange(batch_size, device=xyz.device) * num_points
166
+ batch_offset = batch_offset.reshape(-1, 1, 1)
167
+ knn_idx_flat = (knn_idx + batch_offset).reshape(-1) # [B * G * K]
168
+
169
+ nbr_xyz = xyz.reshape(-1, 3)[knn_idx_flat]
170
+ nbr_xyz = nbr_xyz.reshape(batch_size, self.num_groups, self.group_size, 3)
171
+ nbr_xyz = nbr_xyz - centers.unsqueeze(2) # [B, G, K, 3]
172
+ # NOTE: Follow PointNext to normalize the relative position
173
+ if self.radius is not None:
174
+ nbr_xyz = nbr_xyz / self.radius
175
+
176
+ nbr_feats = features.reshape(-1, features.shape[-1])[knn_idx_flat]
177
+ nbr_feats = nbr_feats.reshape(
178
+ batch_size, self.num_groups, self.group_size, features.shape[-1]
179
+ )
180
+
181
+ group_feats = [nbr_xyz, nbr_feats]
182
+ if self.centralize_features:
183
+ center_feats = batch_index_select(features, fps_idx, dim=1)
184
+ group_feats.append(nbr_feats - center_feats.unsqueeze(2))
185
+
186
+ group_feats = torch.cat(group_feats, dim=-1)
187
+ return dict(
188
+ features=group_feats, centers=centers, knn_idx=knn_idx, fps_idx=fps_idx
189
+ )
190
+
191
+
192
+ def group_with_centers_and_knn(
193
+ xyz: torch.Tensor,
194
+ features: torch.Tensor,
195
+ centers: torch.Tensor,
196
+ knn_idx: torch.Tensor,
197
+ radius: float = None,
198
+ centralize_features: bool = False,
199
+ center_idx: torch.Tensor = None,
200
+ ):
201
+ """Group points based on K nearest neighbors.
202
+
203
+ Args:
204
+ xyz: [B, N, 3]. Input point clouds.
205
+ features: [B * M, N, C]. Point features. Support multiple features for the same point cloud.
206
+ centers: [B, L, 3]. Group centers.
207
+ knn_idx: [B, L, K]. The indices of k nearest neighbors.
208
+
209
+ Returns:
210
+ torch.Tensor: [B * M, L, K, 3 + C]. Group features.
211
+ """
212
+ assert xyz.dim() == features.dim(), (xyz.shape, features.shape)
213
+ assert xyz.shape[1] == features.shape[1], (xyz.shape, features.shape)
214
+ assert xyz.shape[0] == centers.shape[0] == knn_idx.shape[0]
215
+ assert knn_idx.shape[:2] == centers.shape[:2], (knn_idx.shape, centers.shape)
216
+
217
+ # 1. Compute neighborhood coordinates
218
+ batch_size, num_points, _ = xyz.shape
219
+ _, num_patches, patch_size = knn_idx.shape
220
+
221
+ batch_offset = torch.arange(batch_size, device=xyz.device) * num_points
222
+ batch_offset = batch_offset.reshape(-1, 1, 1)
223
+ knn_idx_flat = (knn_idx + batch_offset).reshape(-1) # [B * L * K]
224
+
225
+ nbr_xyz = xyz.reshape(-1, 3)[knn_idx_flat]
226
+ nbr_xyz = nbr_xyz.reshape(batch_size, num_patches, patch_size, 3)
227
+ nbr_xyz = nbr_xyz - centers.unsqueeze(2) # [B, L, K, 3]
228
+ if radius is not None:
229
+ nbr_xyz = nbr_xyz / radius
230
+
231
+ # 2. Compute neighborhood features
232
+ batch_size2 = features.shape[0]
233
+ repeats = features.shape[0] // xyz.shape[0]
234
+ knn_idx2 = torch.repeat_interleave(knn_idx, repeats, dim=0) # [B*M,L,K]
235
+
236
+ batch_offset = torch.arange(batch_size2, device=xyz.device) * num_points
237
+ batch_offset = batch_offset.reshape(-1, 1, 1)
238
+ knn_idx_flat = (knn_idx2 + batch_offset).reshape(-1) # [B*M*L*K]
239
+ nbr_feats = features.reshape(-1, features.shape[-1])[knn_idx_flat]
240
+ nbr_feats = nbr_feats.reshape(
241
+ batch_size2, num_patches, patch_size, features.shape[-1]
242
+ )
243
+
244
+ # 3. Concatenate features
245
+ nbr_xyz = torch.repeat_interleave(nbr_xyz, repeats, dim=0)
246
+ group_feats = [nbr_xyz, nbr_feats]
247
+ if centralize_features:
248
+ center_idx = torch.repeat_interleave(center_idx, repeats, dim=0)
249
+ center_feats = batch_index_select(features, center_idx, dim=1)
250
+ group_feats.append(nbr_feats - center_feats.unsqueeze(2))
251
+ return torch.cat(group_feats, dim=-1)
252
+
253
+
254
+ class NNGrouper(nn.Module):
255
+ """Group points based on the nearest neighbors."""
256
+
257
+ def __init__(self, num_groups: int):
258
+ super().__init__()
259
+ self.num_groups = num_groups
260
+
261
+ def forward(self, xyz: torch.Tensor, features: torch.Tensor):
262
+ with torch.no_grad():
263
+ fps_idx = sample_farthest_points(xyz.float(), self.num_groups)
264
+ centers = batch_index_select(xyz, fps_idx, dim=1)
265
+ _, nn_idx = knn_points(xyz, centers, 1) # [B, N, 1]
266
+
267
+ # Compute the relative position of each point to its nearest center
268
+ nn_idx = nn_idx.squeeze(-1)
269
+ nbr_xyz = xyz - batch_index_select(centers, nn_idx, dim=1) # [B, N, 3]
270
+
271
+ # Normalize the relative position
272
+ dist = torch.linalg.norm(nbr_xyz, dim=-1, keepdim=True, ord=2)
273
+ nbr_xyz = nbr_xyz / torch.clamp(dist, min=1e-8)
274
+
275
+ group_feats = torch.cat([nbr_xyz, dist, features], dim=-1)
276
+ return dict(features=group_feats, centers=centers, nn_idx=nn_idx)
277
+
278
+
279
+ def group_with_centers_and_nn(
280
+ xyz: torch.Tensor,
281
+ features: torch.Tensor,
282
+ centers: torch.Tensor,
283
+ nn_idx: torch.Tensor,
284
+ ):
285
+ """
286
+ Group points based on the voronoi diagram.
287
+
288
+ Args:
289
+ xyz: [B, N, 3]. Input point clouds.
290
+ features: [B, N, C]. Point features.
291
+ centers: [B, L, 3]. Group centers.
292
+ nn_idx: [B, N]. The indices of the nearest neighbors.
293
+
294
+ Returns:
295
+ torch.Tensor: [B, L, 3 + C]. Group features.
296
+ """
297
+ nbr_xyz = xyz - batch_index_select(centers, nn_idx, dim=1) # [B, N, 3]
298
+ dist = torch.linalg.norm(nbr_xyz, dim=-1, keepdim=True, ord=2)
299
+ nbr_xyz = nbr_xyz / torch.clamp(dist, min=1e-8)
300
+ group_feats = torch.cat([nbr_xyz, dist, features], dim=-1)
301
+ return group_feats
302
+
303
+
304
+ def compute_interp_weights(query: torch.Tensor, key: torch.Tensor, k=3, eps=1e-8):
305
+ """Compute interpolation weights for each query point.
306
+
307
+ Args:
308
+ query: [B, Nq, 3]. Query points.
309
+ key: [B, Nk, 3]. Key points.
310
+ k: int. The number of nearest neighbors.
311
+ eps: float. A small value to avoid division by zero.
312
+
313
+ Returns:
314
+ torch.Tensor: [B, Nq, K], indices of the k nearest neighbors in the key.
315
+ torch.Tensor: [B, Nq, K], interpolation weights.
316
+ """
317
+ dist, idx = knn_points(query, key, k)
318
+ inv_dist = 1.0 / torch.clamp(dist.square(), min=eps)
319
+ normalizer = torch.sum(inv_dist, dim=2, keepdim=True)
320
+ weight = inv_dist / normalizer # [B, Nq, K]
321
+ return idx, weight
322
+
323
+
324
+ def interpolate_features(x: torch.Tensor, index: torch.Tensor, weight: torch.Tensor):
325
+ """
326
+ Interpolates features based on the given index and weight.
327
+
328
+ Args:
329
+ x (torch.Tensor): The input tensor of shape (batch_size, num_keys, num_features).
330
+ index (torch.Tensor): The index tensor of shape (batch_size, num_queries, K).
331
+ weight (torch.Tensor): The weight tensor of shape (batch_size, num_queries, K).
332
+
333
+ Returns:
334
+ torch.Tensor: The interpolated features tensor of shape (batch_size, num_queries, num_features).
335
+ """
336
+ B, Nq, K = index.shape
337
+ batch_offset = torch.arange(B, device=x.device).reshape(-1, 1, 1) * x.shape[1]
338
+ index_flat = (index + batch_offset).flatten() # [B*Nq*K]
339
+ _x = x.flatten(0, 1)[index_flat].reshape(B, Nq, K, x.shape[-1])
340
+ return (_x * weight.unsqueeze(-1)).sum(-2)
341
+
342
+
343
+ def repeat_interleave(x: torch.Tensor, repeats: int, dim: int):
344
+ if repeats == 1:
345
+ return x
346
+ shape = list(x.shape)
347
+ shape.insert(dim + 1, 1)
348
+ shape[dim + 1] = repeats
349
+ x = x.unsqueeze(dim + 1).expand(shape).flatten(dim, dim + 1)
350
+ return x
351
+
352
+
353
+ @torch.no_grad()
354
+ def sample_prompts_adapter(
355
+ points: torch.Tensor,
356
+ gt_masks: torch.Tensor,
357
+ pred_logits: Union[torch.Tensor, None],
358
+ threshold: float = None,
359
+ is_eval = False,
360
+ ):
361
+ """Select prompt sampler based on iou."""
362
+ if pred_logits is None:
363
+ return sample_fixed_points(
364
+ points, gt_masks, pred_logits, threshold, from_error_region=True
365
+ )
366
+ else:
367
+ batch_size, num_masks, _ = gt_masks.shape
368
+
369
+ # if the batch iou is less than 0.5, use fixed sampler
370
+ gt_masks_copy = gt_masks.reshape(batch_size * num_masks, -1)
371
+ if threshold is None:
372
+ pred_masks = pred_logits > 0
373
+ else:
374
+ pred_masks = pred_logits.sigmoid() > threshold
375
+
376
+ iou = (gt_masks_copy & pred_masks).sum() / (gt_masks_copy | pred_masks).sum()
377
+ if iou < 1 or is_eval:
378
+ return sample_fixed_points(
379
+ points, gt_masks, pred_logits, threshold, from_error_region=False
380
+ )
381
+ else:
382
+ return sample_prompts(points, gt_masks, pred_logits, threshold)
383
+
384
+
385
+ @torch.no_grad()
386
+ def sample_prompts(
387
+ points: torch.Tensor,
388
+ gt_masks: torch.Tensor,
389
+ pred_logits: Union[torch.Tensor, None],
390
+ threshold: float = None,
391
+ ):
392
+ """Sample prompts from point clouds given ground-truth and predicted masks.
393
+
394
+ Args:
395
+ points: [B, N, 3]. Input point clouds.
396
+ gt_masks: [B, M, N], bool. Ground-truth (binary) masks.
397
+ pred_logits: A float tensor of shape [B*M, N]. Predicted logits.
398
+ If None, the prompt points will be sampled from the ground-truth masks.
399
+
400
+ Returns:
401
+ torch.Tensor: [B*M, 1, 3]. Prompt points.
402
+ torch.Tensor: [B*M, 1], bool. Prompt labels.
403
+ """
404
+ batch_size, num_masks, _ = gt_masks.shape
405
+
406
+ # The prompt point will be sampled from the error region.
407
+ if pred_logits is None:
408
+ diff_masks = gt_masks
409
+ else:
410
+ pred_logits = pred_logits.reshape(batch_size, num_masks, -1)
411
+ assert gt_masks.shape == pred_logits.shape, (gt_masks.shape, pred_logits.shape)
412
+ if threshold is None:
413
+ pred_masks = pred_logits > 0
414
+ else:
415
+ pred_masks = pred_logits.sigmoid() > threshold
416
+ diff_masks = gt_masks != pred_masks
417
+
418
+ prompt_coords, prompt_labels = [], []
419
+ for i in range(batch_size):
420
+ for j in range(num_masks):
421
+ diff_inds = torch.nonzero(diff_masks[i, j]) # [?, 1]
422
+ if len(diff_inds) == 0:
423
+ diff_inds = torch.nonzero(gt_masks[i, j])
424
+ diff_inds = diff_inds.squeeze(1) # [?]
425
+ idx = diff_inds[torch.randint(0, len(diff_inds), [1])]
426
+ prompt_coords.append(points[i][idx])
427
+ prompt_labels.append(gt_masks[i, j][idx])
428
+
429
+ prompt_coords = torch.stack(prompt_coords)
430
+ prompt_labels = torch.stack(prompt_labels)
431
+ return prompt_coords, prompt_labels
432
+
433
+
434
+ @torch.no_grad()
435
+ def sample_fixed_points(
436
+ points: torch.Tensor,
437
+ gt_masks: torch.Tensor,
438
+ pred_logits: Union[torch.Tensor, None],
439
+ threshold: float = None,
440
+ from_error_region: bool = False,
441
+ ):
442
+ """Sample prompts from point clouds given ground-truth and predicted masks.
443
+
444
+ Args:
445
+ points: [B, N, 3]. Input point clouds.
446
+ gt_masks: [B, M, N], bool. Ground-truth (binary) masks.
447
+ pred_logits: A float tensor of shape [B*M, N]. Predicted logits.
448
+ If None, the prompt points will be sampled from the ground-truth masks.
449
+
450
+ Returns:
451
+ torch.Tensor: [B*M, 1, 3]. Prompt points.
452
+ torch.Tensor: [B*M, 1], bool. Prompt labels.
453
+ """
454
+ batch_size, num_masks, _ = gt_masks.shape
455
+
456
+ # The prompt point will be sampled from the error region.
457
+ if pred_logits is None:
458
+ fn = gt_masks
459
+ fp = torch.zeros_like(fn)
460
+ else:
461
+ pred_logits = pred_logits.reshape(batch_size, num_masks, -1)
462
+ assert gt_masks.shape == pred_logits.shape, (gt_masks.shape, pred_logits.shape)
463
+ if threshold is None:
464
+ pred_masks = pred_logits > 0
465
+ else:
466
+ pred_masks = pred_logits.sigmoid() > threshold
467
+ fn = gt_masks & ~pred_masks
468
+ fp = ~gt_masks & pred_masks
469
+
470
+ prompt_points, prompt_labels = [], []
471
+ if from_error_region:
472
+ mask = fn | fp
473
+ for i in range(batch_size):
474
+ for j in range(num_masks):
475
+ coords, label, _ = sample_furthest_points_from_border(
476
+ points[i], mask[i, j], gt_masks[i, j]
477
+ )
478
+ prompt_points.append(coords)
479
+ prompt_labels.append(label)
480
+ else:
481
+ for i in range(batch_size):
482
+ for j in range(num_masks):
483
+ pprompt_coord, pprompt_label, pdist = (
484
+ sample_furthest_points_from_border(
485
+ points[i], fn[i, j], gt_masks[i, j]
486
+ )
487
+ )
488
+ nprompt_coord, nprompt_label, ndist = (
489
+ sample_furthest_points_from_border(
490
+ points[i], fp[i, j], gt_masks[i, j]
491
+ )
492
+ )
493
+ if pdist > ndist:
494
+ prompt_points.append(pprompt_coord)
495
+ prompt_labels.append(pprompt_label)
496
+ elif ndist == -1:
497
+ pprompt_coord, pprompt_label, pdist = (
498
+ sample_furthest_points_from_border(
499
+ points[i], gt_masks[i, j], gt_masks[i, j]
500
+ )
501
+ )
502
+ prompt_points.append(pprompt_coord)
503
+ prompt_labels.append(pprompt_label)
504
+ else:
505
+ prompt_points.append(nprompt_coord)
506
+ prompt_labels.append(nprompt_label)
507
+
508
+ prompt_points = torch.stack(prompt_points)
509
+ prompt_labels = torch.stack(prompt_labels)
510
+ return prompt_points, prompt_labels
511
+
512
+
513
+ def sample_furthest_points_from_border(
514
+ coords: torch.Tensor, lables: torch.Tensor, gt: torch.Tensor
515
+ ):
516
+ """
517
+ Sample points from the border of the mask.
518
+
519
+ Args:
520
+ coords: [N, 3]. Input point clouds.
521
+ lables: [N]. Point labels.
522
+ gt: [N]. Ground-truth labels.
523
+ """
524
+ bg_inds = lables == 0
525
+ fg_inds = lables == 1
526
+
527
+ # if bg_inds or fg_inds is empty, return None
528
+ if bg_inds.sum() == 0 or fg_inds.sum() == 0:
529
+ return None, None, -1
530
+
531
+ # All distances from foreground points to background points
532
+ min_dists, _ = chamfer_distance(coords[fg_inds][None, ...], coords[bg_inds][None, ...])
533
+
534
+ # Sample the farthest points from the border
535
+ center_idx = torch.argmax(min_dists)
536
+ center_coords = coords[fg_inds][center_idx]
537
+ center_dist = torch.max(min_dists)
538
+ center_label = gt[fg_inds][center_idx]
539
+
540
+ return center_coords[None, ...], center_label[None, ...], center_dist
541
+
542
+
543
+ class PatchEncoder(nn.Module):
544
+ """Encode point patches following the PointNet structure for segmentation."""
545
+
546
+ def __init__(self, in_channels, out_channels, hidden_dims: list[int]):
547
+ super().__init__()
548
+ self.in_channels = in_channels
549
+ self.out_channels = out_channels
550
+
551
+ # NOTE: The original Uni3D implementation uses BatchNorm1d, while we use LayerNorm.
552
+ self.conv1 = nn.Sequential(
553
+ nn.Linear(in_channels, hidden_dims[0]),
554
+ nn.LayerNorm(hidden_dims[0]),
555
+ nn.GELU(),
556
+ nn.Linear(hidden_dims[0], hidden_dims[0]),
557
+ )
558
+ self.conv2 = nn.Sequential(
559
+ nn.Linear(hidden_dims[0] * 2, hidden_dims[1]),
560
+ nn.LayerNorm(hidden_dims[1]),
561
+ nn.GELU(),
562
+ nn.Linear(hidden_dims[1], out_channels),
563
+ )
564
+
565
+ def forward(self, point_patches: torch.Tensor):
566
+ # point_patches: [B, L, K, C_in]
567
+ x = self.conv1(point_patches)
568
+ y = torch.max(x, dim=-2, keepdim=True).values
569
+ x = torch.cat([y.expand_as(x), x], dim=-1)
570
+ x = self.conv2(x) # [B, L, K, C_out]
571
+ y = torch.max(x, dim=-2).values # [B, L, C_out]
572
+ return y
573
+
574
+
575
+ class PatchEncoderNN(nn.Module):
576
+ def __init__(self, in_channels, out_channels, hidden_dims: list[int]) -> None:
577
+ super().__init__()
578
+ self.conv1 = nn.Sequential(
579
+ nn.Linear(in_channels, hidden_dims[0]),
580
+ nn.LayerNorm(hidden_dims[0]),
581
+ nn.GELU(),
582
+ nn.Linear(hidden_dims[0], hidden_dims[0]),
583
+ )
584
+ self.conv2 = nn.Sequential(
585
+ nn.Linear(hidden_dims[0] * 2, hidden_dims[1]),
586
+ nn.LayerNorm(hidden_dims[1]),
587
+ nn.GELU(),
588
+ nn.Linear(hidden_dims[1], out_channels),
589
+ )
590
+
591
+ def forward(self, point_patches: torch.Tensor, nn_idx: torch.Tensor, center_number: int) -> torch.Tensor:
592
+ # point_patches: [B, N, C_in]
593
+ x = self.conv1(point_patches)
594
+ y = torch.zeros([x.shape[0], center_number, x.shape[-1]], device=x.device, dtype=x.dtype)
595
+ y = torch.scatter_reduce(y, 1, nn_idx, x, "max")
596
+ x_max = torch.zeros_like(x)
597
+ x_max = torch.gather(y, 1, nn_idx.unsqueeze(-1).expand_as(y))
598
+ x = torch.cat([x_max, x], dim=-1)
599
+ x = self.conv2(x)
600
+ y = torch.zeros([x.shape[0], center_number, x.shape[-1]], device=x.device, dtype=x.dtype)
601
+ y = torch.scatter_reduce(y, 1, nn_idx, x, "max")
602
+ return y