omar-ah commited on
Commit
523bea4
·
verified ·
1 Parent(s): 2d0199e

Upload vil_tracker/models/mlstm.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. vil_tracker/models/mlstm.py +297 -0
vil_tracker/models/mlstm.py ADDED
@@ -0,0 +1,297 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ mLSTM Cell and Block for Vision-LSTM (ViL) backbone.
3
+
4
+ Architecture follows the official NX-AI ViL-S implementation:
5
+ - LinearHeadwiseExpand for Q/K/V projections (block-diagonal, ~3K params each)
6
+ - Depthwise causal Conv1d on the mLSTM branch
7
+ - Gates (igate, fgate) take concatenated [q, k, v] as input
8
+ - Output gate from second half of proj_up output
9
+ - Parallel mLSTM scan with matrix memory
10
+
11
+ Reference: Beck et al., "xLSTM: Extended Long Short-Term Memory" (arXiv:2405.04517)
12
+ Alkin et al., "Vision-LSTM: xLSTM as Generic Vision Backbone" (arXiv:2406.04303)
13
+ """
14
+
15
+ import math
16
+ import torch
17
+ import torch.nn as nn
18
+ import torch.nn.functional as F
19
+ from einops import rearrange, einsum
20
+
21
+
22
+ class LinearHeadwiseExpand(nn.Module):
23
+ """Block-diagonal linear projection: each head has its own small weight matrix.
24
+
25
+ Instead of a full Linear(inner_dim, inner_dim) with inner_dim^2 params,
26
+ this uses num_heads independent (head_dim, head_dim) matrices.
27
+ For inner_dim=768, num_heads=192, head_dim=4:
28
+ Full linear: 768*768 = 589,824 params
29
+ Headwise: 192*4*4 = 3,072 params (192x fewer!)
30
+ """
31
+ def __init__(self, in_features: int, num_heads: int, bias: bool = False):
32
+ super().__init__()
33
+ assert in_features % num_heads == 0, f"{in_features} not divisible by {num_heads}"
34
+ self.num_heads = num_heads
35
+ self.head_dim = in_features // num_heads
36
+ self.in_features = in_features
37
+
38
+ # Weight: (num_heads, head_dim_out, head_dim_in)
39
+ self.weight = nn.Parameter(torch.empty(num_heads, self.head_dim, self.head_dim))
40
+ self.bias = nn.Parameter(torch.zeros(in_features)) if bias else None
41
+ self._reset_parameters()
42
+
43
+ def _reset_parameters(self):
44
+ nn.init.normal_(self.weight, std=math.sqrt(2.0 / (5.0 * self.head_dim)))
45
+ if self.bias is not None:
46
+ nn.init.zeros_(self.bias)
47
+
48
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
49
+ # x: (..., in_features)
50
+ x = rearrange(x, '... (nh d) -> ... nh d', nh=self.num_heads)
51
+ x = einsum(x, self.weight, '... nh d, nh od d -> ... nh od')
52
+ x = rearrange(x, '... nh od -> ... (nh od)')
53
+ if self.bias is not None:
54
+ x = x + self.bias
55
+ return x
56
+
57
+
58
+ class StochasticDepth(nn.Module):
59
+ """Drop entire residual path with probability `drop_prob` during training."""
60
+ def __init__(self, drop_prob: float = 0.0):
61
+ super().__init__()
62
+ self.drop_prob = drop_prob
63
+
64
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
65
+ if not self.training or self.drop_prob == 0.0:
66
+ return x
67
+ keep_prob = 1 - self.drop_prob
68
+ shape = (x.shape[0],) + (1,) * (x.ndim - 1)
69
+ mask = torch.bernoulli(torch.full(shape, keep_prob, device=x.device, dtype=x.dtype))
70
+ return x * mask / keep_prob
71
+
72
+
73
+ class mLSTMCell(nn.Module):
74
+ """Parallel mLSTM cell with matrix memory.
75
+
76
+ Official architecture from xLSTM/ViL:
77
+ - proj_up: Linear(D, 2*inner_dim) → split into mLSTM branch + output gate branch
78
+ - CausalConv1d on mLSTM branch (depthwise, k=4)
79
+ - LinearHeadwiseExpand for Q, K, V projections
80
+ - igate, fgate: Linear(3*inner_dim, num_heads) from concat(q,k,v)
81
+ - Parallel scan: C_t = f_t*C_{t-1} + i_t*(v_t ⊗ k_t), h_t = C_t*q_t
82
+ - Output: (h + skip*conv_act) * sigmoid(z), then proj_down
83
+
84
+ ViL-S config: D=384, proj_factor=2.0, inner_dim=768,
85
+ qkv_proj_blocksize=4, num_heads=4
86
+ Per-cell params: ~920K (vs 2.66M with full Linear Q/K/V)
87
+ """
88
+ def __init__(
89
+ self,
90
+ dim: int = 384,
91
+ proj_factor: float = 2.0,
92
+ qkv_proj_blocksize: int = 4,
93
+ num_heads: int = 4,
94
+ conv_kernel: int = 4,
95
+ bias: bool = False,
96
+ ):
97
+ super().__init__()
98
+ self.dim = dim
99
+ # inner_dim rounded up to multiple of 64
100
+ self.inner_dim = math.ceil(proj_factor * dim / 64) * 64
101
+ self.num_heads = num_heads
102
+ self.head_dim = self.inner_dim // num_heads # 768/4 = 192
103
+
104
+ # Number of projection heads for Q/K/V (block-diagonal)
105
+ num_proj_heads = self.inner_dim // qkv_proj_blocksize
106
+
107
+ # Up-projection: D -> 2*inner_dim (mLSTM branch + output gate branch)
108
+ self.proj_up = nn.Linear(dim, 2 * self.inner_dim, bias=bias)
109
+
110
+ # Depthwise causal conv1d on mLSTM branch
111
+ self.conv1d = nn.Conv1d(
112
+ self.inner_dim, self.inner_dim,
113
+ kernel_size=conv_kernel,
114
+ padding=conv_kernel - 1, # causal: pad left
115
+ groups=self.inner_dim,
116
+ bias=True,
117
+ )
118
+ self.conv_kernel = conv_kernel
119
+
120
+ # Block-diagonal Q/K/V projections
121
+ self.q_proj = LinearHeadwiseExpand(self.inner_dim, num_proj_heads, bias=bias)
122
+ self.k_proj = LinearHeadwiseExpand(self.inner_dim, num_proj_heads, bias=bias)
123
+ self.v_proj = LinearHeadwiseExpand(self.inner_dim, num_proj_heads, bias=bias)
124
+
125
+ # Gates: take concat(q, k, v) as input
126
+ self.igate = nn.Linear(3 * self.inner_dim, num_heads, bias=True)
127
+ self.fgate = nn.Linear(3 * self.inner_dim, num_heads, bias=True)
128
+
129
+ # Output normalization (per-head group norm)
130
+ self.outnorm = nn.GroupNorm(num_heads, self.inner_dim, affine=True)
131
+
132
+ # Down-projection: inner_dim -> D
133
+ self.proj_down = nn.Linear(self.inner_dim, dim, bias=bias)
134
+
135
+ # Learnable skip connection and layer scale
136
+ self.learnable_skip = nn.Parameter(torch.ones(self.inner_dim))
137
+ self.layerscale = nn.Parameter(torch.ones(self.inner_dim))
138
+
139
+ self._reset_gate_bias()
140
+
141
+ def _reset_gate_bias(self):
142
+ """Initialize forget gate bias high (encourages remembering) and input gate low."""
143
+ with torch.no_grad():
144
+ nn.init.zeros_(self.igate.bias)
145
+ # Forget gate bias: initialize to encourage remembering
146
+ nn.init.constant_(self.fgate.bias, 3.0)
147
+
148
+ def forward(self, x: torch.Tensor, reverse: bool = False) -> torch.Tensor:
149
+ """
150
+ Args:
151
+ x: (B, S, D) input sequence
152
+ reverse: if True, process sequence right-to-left (for bidirectional scanning)
153
+ Returns:
154
+ (B, S, D) output
155
+ """
156
+ B, S, D = x.shape
157
+
158
+ if reverse:
159
+ x = x.flip(1)
160
+
161
+ # 1. Up-project to 2*inner_dim
162
+ up = self.proj_up(x) # (B, S, 2*inner)
163
+ x_mlstm = up[..., :self.inner_dim] # mLSTM branch
164
+ z = up[..., self.inner_dim:] # output gate branch
165
+
166
+ # 2. Causal conv1d on mLSTM branch
167
+ x_conv = self.conv1d(x_mlstm.transpose(1, 2)) # (B, inner, S+pad)
168
+ x_conv = x_conv[..., :S].transpose(1, 2) # causal: keep first S
169
+ x_conv_act = F.gelu(x_conv)
170
+
171
+ # 3. Q/K/V projections (block-diagonal, very lightweight)
172
+ q = self.q_proj(x_conv_act) # (B, S, inner)
173
+ k = self.k_proj(x_conv_act) # (B, S, inner)
174
+ v = self.v_proj(x_mlstm) # V from pre-conv branch
175
+
176
+ # 4. Gates from concat(q, k, v)
177
+ qkv_cat = torch.cat([q, k, v], dim=-1) # (B, S, 3*inner)
178
+ i_gate = self.igate(qkv_cat) # (B, S, num_heads)
179
+ f_gate = self.fgate(qkv_cat) # (B, S, num_heads)
180
+
181
+ # Stabilized gates
182
+ i_tilde = torch.exp(i_gate) # (B, S, H)
183
+ f_tilde = torch.sigmoid(f_gate) # (B, S, H)
184
+ # Log-space stabilization
185
+ log_f = torch.log(f_tilde.clamp(min=1e-6)) # (B, S, H)
186
+
187
+ # 5. Reshape Q/K/V for multi-head matrix memory
188
+ q = rearrange(q, 'b s (h d) -> b h s d', h=self.num_heads) # (B, H, S, D_h)
189
+ k = rearrange(k, 'b s (h d) -> b h s d', h=self.num_heads)
190
+ v = rearrange(v, 'b s (h d) -> b h s d', h=self.num_heads)
191
+
192
+ # 6. Parallel mLSTM computation (log-space stabilized)
193
+ # Cumulative sum of log forget gates for parallel scan
194
+ log_f_cumsum = torch.cumsum(log_f.permute(0, 2, 1), dim=-1) # (B, H, S)
195
+
196
+ # Compute pairwise log forget gate differences for attention-like matrix
197
+ # log_f_cumsum[:,:,j] - log_f_cumsum[:,:,i] gives product of f gates from i+1 to j
198
+ log_D = log_f_cumsum.unsqueeze(-1) - log_f_cumsum.unsqueeze(-2) # (B, H, S, S)
199
+
200
+ # Causal mask: only attend to past positions
201
+ causal_mask = torch.tril(torch.ones(S, S, device=x.device, dtype=torch.bool))
202
+ log_D = log_D.masked_fill(~causal_mask, -1e9)
203
+
204
+ # Add input gate contribution
205
+ i_tilde_perm = i_tilde.permute(0, 2, 1) # (B, H, S)
206
+ log_D = log_D + torch.log(i_tilde_perm.clamp(min=1e-6)).unsqueeze(-2) # broadcast over queries
207
+
208
+ # Stabilize with max trick
209
+ max_log_D = log_D.max(dim=-1, keepdim=True).values.clamp(min=-10)
210
+ D = torch.exp(log_D - max_log_D) # (B, H, S, S)
211
+ D = D.masked_fill(~causal_mask, 0.0)
212
+
213
+ # Compute attention: h = D @ v, then normalize by D @ k·q
214
+ # Scale queries
215
+ q_scaled = q / math.sqrt(self.head_dim)
216
+
217
+ # Attention scores: (q @ k^T) * D
218
+ attn = torch.matmul(q_scaled, k.transpose(-1, -2)) * D # (B, H, S, S)
219
+
220
+ # Normalizer
221
+ normalizer = attn.sum(dim=-1, keepdim=True).clamp(min=1.0)
222
+ attn = attn / normalizer
223
+
224
+ # Output
225
+ h = torch.matmul(attn, v) # (B, H, S, D_h)
226
+ h = rearrange(h, 'b h s d -> b s (h d)')
227
+
228
+ # 7. Output norm
229
+ h = self.outnorm(h.transpose(1, 2)).transpose(1, 2) # GroupNorm on channel dim
230
+
231
+ # 8. Skip connection + output gate
232
+ h_skip = h + self.learnable_skip * x_conv_act
233
+ output = h_skip * torch.sigmoid(z) # output gate
234
+
235
+ # 9. Down-project + layer scale
236
+ output = self.proj_down(output)
237
+ output = output * self.layerscale[:self.dim] # Note: layerscale is inner_dim sized, we need dim
238
+
239
+ if reverse:
240
+ output = output.flip(1)
241
+
242
+ return output
243
+
244
+
245
+ class SwiGLUMLP(nn.Module):
246
+ """SwiGLU MLP as used in ViL blocks.
247
+
248
+ SwiGLU(x) = (W1·x ⊙ Swish(V·x)) then W2·hidden → output
249
+ """
250
+ def __init__(self, dim: int, mlp_ratio: float = 4.0, bias: bool = False, drop: float = 0.0):
251
+ super().__init__()
252
+ hidden_dim = int(dim * mlp_ratio)
253
+ # SwiGLU: two parallel projections, one gated
254
+ self.w1 = nn.Linear(dim, hidden_dim, bias=bias) # value path
255
+ self.w2 = nn.Linear(hidden_dim, dim, bias=bias) # down projection
256
+ self.v = nn.Linear(dim, hidden_dim, bias=bias) # gate path
257
+ self.drop = nn.Dropout(drop)
258
+
259
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
260
+ return self.drop(self.w2(F.silu(self.v(x)) * self.w1(x)))
261
+
262
+
263
+ class mLSTMBlock(nn.Module):
264
+ """Single ViL block: LayerNorm → mLSTMCell → residual.
265
+
266
+ Following the official ViL-S architecture, there is NO separate MLP/FFN layer.
267
+ The gated output (proj_up → split → z-gate → proj_down) inside the mLSTMCell
268
+ already performs the role of dimension expansion + nonlinearity + projection.
269
+
270
+ This matches ViL-S: ~0.92M params per block, 24 blocks ≈ 22M backbone.
271
+ """
272
+ def __init__(
273
+ self,
274
+ dim: int = 384,
275
+ proj_factor: float = 2.0,
276
+ qkv_proj_blocksize: int = 4,
277
+ num_heads: int = 4,
278
+ conv_kernel: int = 4,
279
+ mlp_ratio: float = 4.0, # kept for API compat but unused in standard blocks
280
+ drop_path: float = 0.0,
281
+ bias: bool = False,
282
+ ):
283
+ super().__init__()
284
+ self.norm1 = nn.LayerNorm(dim, bias=False)
285
+ self.mlstm = mLSTMCell(
286
+ dim=dim,
287
+ proj_factor=proj_factor,
288
+ qkv_proj_blocksize=qkv_proj_blocksize,
289
+ num_heads=num_heads,
290
+ conv_kernel=conv_kernel,
291
+ bias=bias,
292
+ )
293
+ self.drop_path = StochasticDepth(drop_path)
294
+
295
+ def forward(self, x: torch.Tensor, reverse: bool = False) -> torch.Tensor:
296
+ x = x + self.drop_path(self.mlstm(self.norm1(x), reverse=reverse))
297
+ return x