bdck commited on
Commit
8847f53
·
verified ·
1 Parent(s): 4fafdbf

Upload point_sam/model/transformer.py

Browse files
Files changed (1) hide show
  1. point_sam/model/transformer.py +253 -0
point_sam/model/transformer.py ADDED
@@ -0,0 +1,253 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+
4
+ # This source code is licensed under the license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ # https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/transformer.py
7
+
8
+ import torch
9
+ from torch import Tensor, nn
10
+
11
+ import math
12
+ from typing import Tuple, Type
13
+
14
+
15
+ class TwoWayTransformer(nn.Module):
16
+ def __init__(
17
+ self,
18
+ depth: int,
19
+ embedding_dim: int,
20
+ num_heads: int,
21
+ mlp_dim: int,
22
+ activation: Type[nn.Module] = nn.ReLU,
23
+ attention_downsample_rate: int = 2,
24
+ ) -> None:
25
+ """
26
+ A transformer decoder that attends to an input image using
27
+ queries whose positional embedding is supplied.
28
+
29
+ Args:
30
+ depth (int): number of layers in the transformer
31
+ embedding_dim (int): the channel dimension for the input embeddings
32
+ num_heads (int): the number of heads for multihead attention. Must
33
+ divide embedding_dim
34
+ mlp_dim (int): the channel dimension internal to the MLP block
35
+ activation (nn.Module): the activation to use in the MLP block
36
+ """
37
+ super().__init__()
38
+ self.depth = depth
39
+ self.embedding_dim = embedding_dim
40
+ self.num_heads = num_heads
41
+ self.mlp_dim = mlp_dim
42
+ self.layers = nn.ModuleList()
43
+
44
+ for i in range(depth):
45
+ self.layers.append(
46
+ TwoWayAttentionBlock(
47
+ embedding_dim=embedding_dim,
48
+ num_heads=num_heads,
49
+ mlp_dim=mlp_dim,
50
+ activation=activation,
51
+ attention_downsample_rate=attention_downsample_rate,
52
+ skip_first_layer_pe=(i == 0),
53
+ )
54
+ )
55
+
56
+ self.final_attn_token_to_image = Attention(
57
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
58
+ )
59
+ self.norm_final_attn = nn.LayerNorm(embedding_dim)
60
+
61
+ def forward(
62
+ self,
63
+ pc_embedding: Tensor,
64
+ pc_pe: Tensor,
65
+ point_embedding: Tensor,
66
+ ) -> Tuple[Tensor, Tensor]:
67
+ """
68
+ Args:
69
+ pc_embedding (torch.Tensor): point cloud to attend to. Should be shape
70
+ B x N_pc_tokens x embedding_dim.
71
+ pc_pe (torch.Tensor): the positional encoding to add to the point cloud.
72
+ Must have the same shape as pc_embedding.
73
+ point_embedding (torch.Tensor): the embedding to add to the query points.
74
+ Must have shape B x N_points x embedding_dim for any N_points.
75
+
76
+ Returns:
77
+ torch.Tensor: the processed point_embedding
78
+ torch.Tensor: the processed pc_embedding
79
+ """
80
+ # Prepare queries
81
+ queries = point_embedding
82
+ keys = pc_embedding
83
+
84
+ # Apply transformer blocks and final layernorm
85
+ for layer in self.layers:
86
+ queries, keys = layer(
87
+ queries=queries,
88
+ keys=keys,
89
+ query_pe=point_embedding,
90
+ key_pe=pc_pe,
91
+ )
92
+
93
+ # Apply the final attention layer from the points to the image
94
+ q = queries + point_embedding
95
+ k = keys + pc_pe
96
+ attn_out = self.final_attn_token_to_image(q=q, k=k, v=keys)
97
+ queries = queries + attn_out
98
+ queries = self.norm_final_attn(queries)
99
+
100
+ return queries, keys
101
+
102
+
103
+ class TwoWayAttentionBlock(nn.Module):
104
+ def __init__(
105
+ self,
106
+ embedding_dim: int,
107
+ num_heads: int,
108
+ mlp_dim: int = 2048,
109
+ activation: Type[nn.Module] = nn.ReLU,
110
+ attention_downsample_rate: int = 2,
111
+ skip_first_layer_pe: bool = False,
112
+ ) -> None:
113
+ """
114
+ A transformer block with four layers: (1) self-attention of sparse
115
+ inputs, (2) cross attention of sparse inputs to dense inputs, (3) mlp
116
+ block on sparse inputs, and (4) cross attention of dense inputs to sparse
117
+ inputs.
118
+
119
+ Arguments:
120
+ embedding_dim (int): the channel dimension of the embeddings
121
+ num_heads (int): the number of heads in the attention layers
122
+ mlp_dim (int): the hidden dimension of the mlp block
123
+ activation (nn.Module): the activation of the mlp block
124
+ skip_first_layer_pe (bool): skip the PE on the first layer
125
+ """
126
+ super().__init__()
127
+ self.self_attn = Attention(embedding_dim, num_heads)
128
+ self.norm1 = nn.LayerNorm(embedding_dim)
129
+
130
+ self.cross_attn_token_to_image = Attention(
131
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
132
+ )
133
+ self.norm2 = nn.LayerNorm(embedding_dim)
134
+
135
+ self.mlp = MLPBlock(embedding_dim, mlp_dim, activation)
136
+ self.norm3 = nn.LayerNorm(embedding_dim)
137
+
138
+ self.norm4 = nn.LayerNorm(embedding_dim)
139
+ self.cross_attn_image_to_token = Attention(
140
+ embedding_dim, num_heads, downsample_rate=attention_downsample_rate
141
+ )
142
+
143
+ self.skip_first_layer_pe = skip_first_layer_pe
144
+
145
+ def forward(
146
+ self, queries: Tensor, keys: Tensor, query_pe: Tensor, key_pe: Tensor
147
+ ) -> Tuple[Tensor, Tensor]:
148
+ # Self attention block
149
+ if self.skip_first_layer_pe:
150
+ queries = self.self_attn(q=queries, k=queries, v=queries)
151
+ else:
152
+ q = queries + query_pe
153
+ attn_out = self.self_attn(q=q, k=q, v=queries)
154
+ queries = queries + attn_out
155
+ queries = self.norm1(queries)
156
+
157
+ # Cross attention block, tokens attending to image embedding
158
+ q = queries + query_pe
159
+ k = keys + key_pe
160
+ attn_out = self.cross_attn_token_to_image(q=q, k=k, v=keys)
161
+ queries = queries + attn_out
162
+ queries = self.norm2(queries)
163
+
164
+ # MLP block
165
+ mlp_out = self.mlp(queries)
166
+ queries = queries + mlp_out
167
+ queries = self.norm3(queries)
168
+
169
+ # Cross attention block, image embedding attending to tokens
170
+ q = queries + query_pe
171
+ k = keys + key_pe
172
+ attn_out = self.cross_attn_image_to_token(q=k, k=q, v=queries)
173
+ keys = keys + attn_out
174
+ keys = self.norm4(keys)
175
+
176
+ return queries, keys
177
+
178
+
179
+ class Attention(nn.Module):
180
+ """
181
+ An attention layer that allows for downscaling the size of the embedding
182
+ after projection to queries, keys, and values.
183
+ """
184
+
185
+ def __init__(
186
+ self,
187
+ embedding_dim: int,
188
+ num_heads: int,
189
+ downsample_rate: int = 1,
190
+ ) -> None:
191
+ super().__init__()
192
+ self.embedding_dim = embedding_dim
193
+ self.internal_dim = embedding_dim // downsample_rate
194
+ self.num_heads = num_heads
195
+ assert (
196
+ self.internal_dim % num_heads == 0
197
+ ), "num_heads must divide embedding_dim."
198
+
199
+ self.q_proj = nn.Linear(embedding_dim, self.internal_dim)
200
+ self.k_proj = nn.Linear(embedding_dim, self.internal_dim)
201
+ self.v_proj = nn.Linear(embedding_dim, self.internal_dim)
202
+ self.out_proj = nn.Linear(self.internal_dim, embedding_dim)
203
+
204
+ def _separate_heads(self, x: Tensor, num_heads: int) -> Tensor:
205
+ b, n, c = x.shape
206
+ x = x.reshape(b, n, num_heads, c // num_heads)
207
+ return x.transpose(1, 2) # B x N_heads x N_tokens x C_per_head
208
+
209
+ def _recombine_heads(self, x: Tensor) -> Tensor:
210
+ b, n_heads, n_tokens, c_per_head = x.shape
211
+ x = x.transpose(1, 2)
212
+ return x.reshape(b, n_tokens, n_heads * c_per_head) # B x N_tokens x C
213
+
214
+ def forward(self, q: Tensor, k: Tensor, v: Tensor) -> Tensor:
215
+ # Input projections
216
+ q = self.q_proj(q)
217
+ k = self.k_proj(k)
218
+ v = self.v_proj(v)
219
+
220
+ # Separate into heads
221
+ q = self._separate_heads(q, self.num_heads)
222
+ k = self._separate_heads(k, self.num_heads)
223
+ v = self._separate_heads(v, self.num_heads)
224
+
225
+ # Attention
226
+ _, _, _, c_per_head = q.shape
227
+ attn = q @ k.permute(0, 1, 3, 2) # B x N_heads x N_tokens x N_tokens
228
+ attn = attn / math.sqrt(c_per_head)
229
+ attn = torch.softmax(attn, dim=-1)
230
+
231
+ # Get output
232
+ out = attn @ v
233
+ out = self._recombine_heads(out)
234
+ out = self.out_proj(out)
235
+
236
+ return out
237
+
238
+
239
+ # https://github.com/facebookresearch/segment-anything/blob/6fdee8f2727f4506cfbbe553e23b895e27956588/segment_anything/modeling/common.py#L13
240
+ class MLPBlock(nn.Module):
241
+ def __init__(
242
+ self,
243
+ embedding_dim: int,
244
+ mlp_dim: int,
245
+ act: Type[nn.Module] = nn.GELU,
246
+ ) -> None:
247
+ super().__init__()
248
+ self.lin1 = nn.Linear(embedding_dim, mlp_dim)
249
+ self.lin2 = nn.Linear(mlp_dim, embedding_dim)
250
+ self.act = act()
251
+
252
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
253
+ return self.lin2(self.act(self.lin1(x)))