asdf98 commited on
Commit
f5c1b06
·
verified ·
1 Parent(s): 7c1b562

Add lira/core_modules.py

Browse files
Files changed (1) hide show
  1. lira/core_modules.py +753 -0
lira/core_modules.py ADDED
@@ -0,0 +1,753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LiRA Core Modules: Gated State-Space Backbone (GS3B)
3
+
4
+ Mathematical Foundation:
5
+ ========================
6
+ Traditional transformers use self-attention: O_i = softmax(Q_i K^T / sqrt(d)) V
7
+ This is O(N^2) in sequence length - prohibitive for high-res images.
8
+
9
+ Our approach combines three key innovations:
10
+
11
+ 1. SELECTIVE STATE SPACE (from Mamba/S6):
12
+ State evolution: h_t = A_t * h_{t-1} + B_t * x_t
13
+ Output: y_t = C_t * h_t + D * x_t
14
+ Where A_t, B_t, C_t are INPUT-DEPENDENT (selective) - this is the key insight
15
+ from Mamba that makes SSMs competitive with attention.
16
+
17
+ 2. BIDIRECTIONAL GATED SCANNING (from DiM + RWKV-7):
18
+ Images are 2D, not 1D. We scan in 4 directions:
19
+ - Horizontal L→R, R→L
20
+ - Vertical T→B, B→T
21
+ Each direction maintains its own state. A learned gate fuses them:
22
+ y = gate * [y_lr; y_rl; y_tb; y_bt]
23
+
24
+ From RWKV-7 we take the generalized delta rule for state updates:
25
+ S_t = S_{t-1} * (diag(w_t) - k_t^T (a_t ⊗ k_t)) + v_t^T k_t
26
+ This gives us input-dependent decay with O(N) complexity.
27
+
28
+ 3. FREQUENCY-AWARE PROCESSING (from DiMSUM):
29
+ We apply lightweight wavelet decomposition to separate structure from detail,
30
+ process each frequency band with appropriate granularity, then recombine.
31
+ Low-freq (structure) → fewer tokens, heavier processing
32
+ High-freq (detail) → more tokens, lighter processing
33
+
34
+ Combined complexity: O(N * d * H) where N=tokens, d=state_dim, H=num_heads
35
+ For 1024px with f32 VAE: N = 32*32 = 1024 tokens → extremely efficient
36
+ """
37
+
38
+ import torch
39
+ import torch.nn as nn
40
+ import torch.nn.functional as F
41
+ import math
42
+ from typing import Optional, Tuple
43
+ from einops import rearrange
44
+
45
+
46
+ # ============================================================================
47
+ # Core Building Block: Gated Selective State-Space Layer
48
+ # ============================================================================
49
+
50
+ class SelectiveStateSpace(nn.Module):
51
+ """
52
+ Selective State Space layer with input-dependent parameters.
53
+
54
+ Mathematical formulation:
55
+ h_t = diag(exp(A_t)) * h_{t-1} + B_t * x_t (state transition)
56
+ y_t = C_t * h_t (output projection)
57
+
58
+ Where A_t, B_t, C_t are all computed from the input (selective/data-dependent).
59
+ This selectivity is what allows SSMs to match transformer quality.
60
+
61
+ Key insight: discretization of continuous dynamics means we can model
62
+ any timescale of dependencies by learning the step size Δ.
63
+ """
64
+
65
+ def __init__(self, d_model: int, d_state: int = 16, d_conv: int = 4):
66
+ super().__init__()
67
+ self.d_model = d_model
68
+ self.d_state = d_state
69
+ self.d_conv = d_conv
70
+
71
+ # Input projections for selectivity
72
+ # We project to 2*d_model: one for the "gate" branch, one for the SSM branch
73
+ self.in_proj = nn.Linear(d_model, 2 * d_model, bias=False)
74
+
75
+ # Local convolution for capturing immediate neighbors (from Mamba)
76
+ self.conv1d = nn.Conv1d(
77
+ d_model, d_model, kernel_size=d_conv,
78
+ padding=d_conv - 1, groups=d_model, bias=True
79
+ )
80
+
81
+ # Selective parameters: ∆ (step size), B, C are input-dependent
82
+ # A is a learnable diagonal matrix (log-space for stability)
83
+ self.A_log = nn.Parameter(torch.log(torch.arange(1, d_state + 1, dtype=torch.float32).repeat(d_model, 1)))
84
+ self.D = nn.Parameter(torch.ones(d_model)) # Skip connection
85
+
86
+ # Input-dependent projections
87
+ self.dt_proj = nn.Linear(d_model, d_model, bias=True)
88
+ self.B_proj = nn.Linear(d_model, d_state, bias=False)
89
+ self.C_proj = nn.Linear(d_model, d_state, bias=False)
90
+
91
+ # Output projection
92
+ self.out_proj = nn.Linear(d_model, d_model, bias=False)
93
+
94
+ # Initialize dt bias to ensure positive step sizes
95
+ dt_init_std = d_model ** -0.5
96
+ nn.init.uniform_(self.dt_proj.bias, -4.0, -2.0) # Initialize in log space
97
+
98
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
99
+ """
100
+ x: (B, L, D) input sequence
101
+ Returns: (B, L, D) output sequence
102
+ """
103
+ B, L, D = x.shape
104
+
105
+ # Split into gate and SSM branches
106
+ xz = self.in_proj(x) # (B, L, 2D)
107
+ x_ssm, z = xz.chunk(2, dim=-1) # Each (B, L, D)
108
+
109
+ # Local convolution (causal)
110
+ x_conv = x_ssm.transpose(1, 2) # (B, D, L)
111
+ x_conv = self.conv1d(x_conv)[:, :, :L] # Causal: trim to L
112
+ x_conv = x_conv.transpose(1, 2) # (B, L, D)
113
+ x_conv = F.silu(x_conv)
114
+
115
+ # Compute selective parameters
116
+ dt = F.softplus(self.dt_proj(x_conv)) # (B, L, D) - step sizes
117
+ B_sel = self.B_proj(x_conv) # (B, L, N)
118
+ C_sel = self.C_proj(x_conv) # (B, L, N)
119
+
120
+ # Discretize A
121
+ A = -torch.exp(self.A_log) # (D, N)
122
+
123
+ # Selective scan (vectorized for speed)
124
+ y = self._selective_scan(x_conv, dt, A, B_sel, C_sel) # (B, L, D)
125
+
126
+ # Skip connection
127
+ y = y + self.D.unsqueeze(0).unsqueeze(0) * x_conv
128
+
129
+ # Gating (from Mamba - SiLU gate)
130
+ y = y * F.silu(z)
131
+
132
+ return self.out_proj(y)
133
+
134
+ def _selective_scan(self, x, dt, A, B, C):
135
+ """
136
+ Parallel selective scan using cumulative operations.
137
+
138
+ For training, we use the parallel form:
139
+ h_t = exp(A * dt_t) * h_{t-1} + dt_t * B_t * x_t
140
+ y_t = C_t * h_t
141
+
142
+ We compute this via log-space cumsum for numerical stability.
143
+ """
144
+ B_batch, L, D = x.shape
145
+ N = A.shape[1]
146
+
147
+ # Compute discretized A and B
148
+ # dA = exp(A * dt): (B, L, D, N)
149
+ dt_expanded = dt.unsqueeze(-1) # (B, L, D, 1)
150
+ A_expanded = A.unsqueeze(0).unsqueeze(0) # (1, 1, D, N)
151
+ dA = torch.exp(dt_expanded * A_expanded) # (B, L, D, N)
152
+
153
+ # dB * x: (B, L, D, N)
154
+ dBx = dt_expanded * B.unsqueeze(2) * x.unsqueeze(-1) # (B, L, D, N)
155
+
156
+ # Sequential scan (we'll use a chunked approach for efficiency)
157
+ # For moderate sequence lengths (1024), direct scan is fast enough
158
+ h = torch.zeros(B_batch, D, N, device=x.device, dtype=x.dtype)
159
+ ys = []
160
+
161
+ # Use chunks of 64 for better memory efficiency
162
+ chunk_size = min(64, L)
163
+ for i in range(0, L, chunk_size):
164
+ end = min(i + chunk_size, L)
165
+ chunk_len = end - i
166
+
167
+ chunk_ys = []
168
+ for t in range(chunk_len):
169
+ idx = i + t
170
+ h = dA[:, idx] * h + dBx[:, idx] # (B, D, N)
171
+ y_t = (h * C[:, idx].unsqueeze(1)).sum(-1) # (B, D)
172
+ chunk_ys.append(y_t)
173
+
174
+ ys.extend(chunk_ys)
175
+
176
+ y = torch.stack(ys, dim=1) # (B, L, D)
177
+ return y
178
+
179
+
180
+ # ============================================================================
181
+ # Bidirectional Spatial Scanner
182
+ # ============================================================================
183
+
184
+ class BidirectionalSpatialScanner(nn.Module):
185
+ """
186
+ Scans 2D spatial features in 4 directions to capture full spatial context.
187
+
188
+ Innovation: Instead of 4 separate SSMs (expensive), we use 2 SSMs with
189
+ input reversal, and fuse with a learned spatial gate.
190
+
191
+ Directions:
192
+ 1. Row-major L→R (horizontal forward)
193
+ 2. Row-major R→L (horizontal backward)
194
+ 3. Col-major T→B (vertical forward)
195
+ 4. Col-major B→T (vertical backward)
196
+
197
+ The gate learns to weight each direction based on spatial position and content.
198
+ """
199
+
200
+ def __init__(self, d_model: int, d_state: int = 16):
201
+ super().__init__()
202
+
203
+ # Only 2 SSM instances - we reverse inputs for bidirectional
204
+ self.ssm_horizontal = SelectiveStateSpace(d_model, d_state)
205
+ self.ssm_vertical = SelectiveStateSpace(d_model, d_state)
206
+
207
+ # Spatial fusion gate - learns to weight directions
208
+ self.fusion_gate = nn.Sequential(
209
+ nn.Linear(d_model, d_model, bias=False),
210
+ nn.Sigmoid()
211
+ )
212
+
213
+ # Norm for stability
214
+ self.norm = nn.LayerNorm(d_model)
215
+
216
+ def forward(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
217
+ """
218
+ x: (B, H*W, D) flattened spatial features
219
+ Returns: (B, H*W, D) with full spatial context
220
+ """
221
+ B, L, D = x.shape
222
+
223
+ # Horizontal scanning (row-major order)
224
+ x_fwd = self.ssm_horizontal(x)
225
+ x_bwd = self._reverse_scan(x, self.ssm_horizontal, H, W, reverse_dim='horizontal')
226
+
227
+ # Vertical scanning (column-major order)
228
+ x_col = rearrange(x, 'b (h w) d -> b (w h) d', h=H, w=W)
229
+ x_top_down = self.ssm_vertical(x_col)
230
+ x_top_down = rearrange(x_top_down, 'b (w h) d -> b (h w) d', h=H, w=W)
231
+
232
+ x_bot_up = self._reverse_scan(x_col, self.ssm_vertical, W, H, reverse_dim='vertical')
233
+ x_bot_up = rearrange(x_bot_up, 'b (w h) d -> b (h w) d', h=H, w=W)
234
+
235
+ # Learned fusion
236
+ combined = (x_fwd + x_bwd + x_top_down + x_bot_up) / 4.0
237
+ gate = self.fusion_gate(x)
238
+
239
+ out = gate * combined + (1 - gate) * x
240
+ return self.norm(out)
241
+
242
+ def _reverse_scan(self, x, ssm, H, W, reverse_dim):
243
+ """Scan in reverse direction"""
244
+ x_rev = x.flip(dims=[1])
245
+ y_rev = ssm(x_rev)
246
+ return y_rev.flip(dims=[1])
247
+
248
+
249
+ # ============================================================================
250
+ # Mix-FFN with Depthwise Convolution (from SANA, proven effective)
251
+ # ============================================================================
252
+
253
+ class MixFFN(nn.Module):
254
+ """
255
+ Feed-forward network with depthwise convolution for local feature mixing.
256
+
257
+ From SANA: "depth-wise convolution enhances the model's ability to capture
258
+ local information, compensating for the weaker local information-capturing
259
+ ability of linear attention"
260
+
261
+ Architecture: Linear → DWConv3x3 → GELU → Gate → Linear
262
+ This is an inverted bottleneck with gating.
263
+ """
264
+
265
+ def __init__(self, d_model: int, expand_ratio: float = 2.5):
266
+ super().__init__()
267
+ d_inner = int(d_model * expand_ratio)
268
+
269
+ # Inverted bottleneck with gating
270
+ self.fc1 = nn.Linear(d_model, d_inner * 2) # *2 for gating
271
+ self.dwconv = nn.Conv2d(
272
+ d_inner, d_inner, kernel_size=3, padding=1,
273
+ groups=d_inner, bias=True
274
+ )
275
+ self.fc2 = nn.Linear(d_inner, d_model)
276
+ self.norm = nn.LayerNorm(d_inner)
277
+
278
+ def forward(self, x: torch.Tensor, H: int, W: int) -> torch.Tensor:
279
+ """
280
+ x: (B, H*W, D)
281
+ Returns: (B, H*W, D)
282
+ """
283
+ B, L, D = x.shape
284
+
285
+ # Split into value and gate
286
+ xg = self.fc1(x)
287
+ x_val, x_gate = xg.chunk(2, dim=-1) # Each (B, L, d_inner)
288
+
289
+ # Depthwise conv on value branch (needs 2D reshape)
290
+ x_val = rearrange(x_val, 'b (h w) d -> b d h w', h=H, w=W)
291
+ x_val = self.dwconv(x_val)
292
+ x_val = rearrange(x_val, 'b d h w -> b (h w) d')
293
+
294
+ # GLU gating
295
+ x_val = self.norm(x_val)
296
+ x_out = x_val * F.gelu(x_gate)
297
+
298
+ return self.fc2(x_out)
299
+
300
+
301
+ # ============================================================================
302
+ # Hyper-Connection Module (from the Hyper-Connections paper)
303
+ # ============================================================================
304
+
305
+ class HyperConnection(nn.Module):
306
+ """
307
+ Hyper-connections generalize residual connections.
308
+
309
+ Instead of fixed: y = x + F(x)
310
+ We learn a connection matrix HC that can represent any blend of
311
+ sequential and parallel layer arrangements.
312
+
313
+ For expansion rate n:
314
+ Input: split x into n copies [x_1, ..., x_n]
315
+ HC matrix is (n+1) x (n+1), learnable
316
+ [input_to_layer, output_1, ..., output_n] = HC @ [F(input_to_layer), x_1, ..., x_n]
317
+
318
+ This subsumes both Pre-Norm and Post-Norm residual connections,
319
+ and can learn arrangements that are neither purely sequential nor parallel.
320
+ """
321
+
322
+ def __init__(self, d_model: int, expansion_rate: int = 2):
323
+ super().__init__()
324
+ self.n = expansion_rate
325
+ self.d_model = d_model
326
+
327
+ # HC matrix: (n+1) x (n+1)
328
+ # Initialize close to residual connection
329
+ init_matrix = torch.zeros(self.n + 1, self.n + 1)
330
+ # Standard residual: input goes through, output adds
331
+ init_matrix[0, 1] = 1.0 # layer input comes from first stream
332
+ for i in range(1, self.n + 1):
333
+ init_matrix[i, i] = 1.0 # identity for skip
334
+ init_matrix[i, 0] = 1.0 / self.n # add layer output
335
+
336
+ self.hc_matrix = nn.Parameter(init_matrix)
337
+ self.norm = nn.LayerNorm(d_model)
338
+
339
+ def pre_forward(self, x_streams: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
340
+ """
341
+ x_streams: (B, L, n*D) - n parallel streams concatenated
342
+ Returns: (layer_input, x_streams)
343
+ """
344
+ B, L, _ = x_streams.shape
345
+
346
+ # Split into streams
347
+ streams = x_streams.chunk(self.n, dim=-1) # List of (B, L, D)
348
+
349
+ # Compute layer input from HC matrix first column
350
+ layer_input = sum(self.hc_matrix[0, i + 1] * streams[i] for i in range(self.n))
351
+ layer_input = self.norm(layer_input)
352
+
353
+ return layer_input, x_streams
354
+
355
+ def post_forward(self, layer_output: torch.Tensor, x_streams: torch.Tensor) -> torch.Tensor:
356
+ """
357
+ Combine layer output with streams using HC matrix.
358
+ """
359
+ streams = x_streams.chunk(self.n, dim=-1)
360
+
361
+ new_streams = []
362
+ for i in range(self.n):
363
+ new_stream = self.hc_matrix[i + 1, 0] * layer_output
364
+ for j in range(self.n):
365
+ new_stream = new_stream + self.hc_matrix[i + 1, j + 1] * streams[j]
366
+ new_streams.append(new_stream)
367
+
368
+ return torch.cat(new_streams, dim=-1)
369
+
370
+ def init_streams(self, x: torch.Tensor) -> torch.Tensor:
371
+ """Initialize n streams from single input"""
372
+ return x.repeat(1, 1, self.n)
373
+
374
+
375
+ # ============================================================================
376
+ # AdaLN-Zero Conditioning (from DiT, proven optimal for diffusion)
377
+ # ============================================================================
378
+
379
+ class AdaLNZero(nn.Module):
380
+ """
381
+ Adaptive Layer Normalization with zero initialization.
382
+
383
+ Conditions each layer on timestep and text embeddings.
384
+ From DiT: "regresses dimensionwise scale and shift parameters
385
+ from the sum of the embedding vectors"
386
+
387
+ Zero initialization ensures the network acts as identity at init,
388
+ critical for training stability.
389
+ """
390
+
391
+ def __init__(self, d_model: int, d_cond: int):
392
+ super().__init__()
393
+ self.norm = nn.LayerNorm(d_model, elementwise_affine=False)
394
+
395
+ # Predict scale (γ), shift (β), and gate (α) - 6 values per element
396
+ self.proj = nn.Sequential(
397
+ nn.SiLU(),
398
+ nn.Linear(d_cond, 6 * d_model)
399
+ )
400
+
401
+ # Zero-initialize the projection
402
+ nn.init.zeros_(self.proj[1].weight)
403
+ nn.init.zeros_(self.proj[1].bias)
404
+
405
+ def forward(self, x: torch.Tensor, cond: torch.Tensor):
406
+ """
407
+ x: (B, L, D)
408
+ cond: (B, d_cond)
409
+ Returns: shift1, scale1, gate1, shift2, scale2, gate2
410
+ """
411
+ params = self.proj(cond) # (B, 6D)
412
+ params = params.unsqueeze(1) # (B, 1, 6D)
413
+ shift1, scale1, gate1, shift2, scale2, gate2 = params.chunk(6, dim=-1)
414
+ return shift1, scale1, gate1, shift2, scale2, gate2
415
+
416
+ def modulate(self, x: torch.Tensor, shift: torch.Tensor, scale: torch.Tensor):
417
+ return self.norm(x) * (1 + scale) + shift
418
+
419
+
420
+ # ============================================================================
421
+ # LiRA Block: The Core Processing Unit
422
+ # ============================================================================
423
+
424
+ class LiRABlock(nn.Module):
425
+ """
426
+ One LiRA block = Bidirectional SSM + Mix-FFN, with:
427
+ - AdaLN-Zero conditioning
428
+ - Hyper-connections for dynamic layer arrangement
429
+
430
+ This replaces transformer blocks with O(N) complexity while maintaining
431
+ the quality of O(N^2) attention through:
432
+ 1. Selective state spaces (content-aware)
433
+ 2. Bidirectional scanning (full spatial context)
434
+ 3. Mix-FFN (local feature enhancement via DWConv)
435
+ """
436
+
437
+ def __init__(self, d_model: int, d_cond: int, d_state: int = 16,
438
+ ffn_expand: float = 2.5, hc_expansion: int = 2):
439
+ super().__init__()
440
+
441
+ # Conditioning
442
+ self.adaln = AdaLNZero(d_model, d_cond)
443
+
444
+ # Bidirectional State-Space Scanner
445
+ self.scanner = BidirectionalSpatialScanner(d_model, d_state)
446
+
447
+ # Mix-FFN for local features
448
+ self.ffn = MixFFN(d_model, ffn_expand)
449
+
450
+ # Layer norms (pre-norm style)
451
+ self.norm1 = nn.LayerNorm(d_model)
452
+ self.norm2 = nn.LayerNorm(d_model)
453
+
454
+ def forward(self, x: torch.Tensor, cond: torch.Tensor, H: int, W: int) -> torch.Tensor:
455
+ """
456
+ x: (B, H*W, D)
457
+ cond: (B, d_cond) - conditioning vector (timestep + text)
458
+ Returns: (B, H*W, D)
459
+ """
460
+ # Get conditioning parameters
461
+ shift1, scale1, gate1, shift2, scale2, gate2 = self.adaln(x, cond)
462
+
463
+ # SSM branch with AdaLN conditioning
464
+ x_mod = self.adaln.modulate(x, shift1, scale1)
465
+ x_ssm = self.scanner(x_mod, H, W)
466
+ x = x + gate1 * x_ssm
467
+
468
+ # FFN branch with AdaLN conditioning
469
+ x_mod = self.adaln.modulate(x, shift2, scale2)
470
+ x_ffn = self.ffn(x_mod, H, W)
471
+ x = x + gate2 * x_ffn
472
+
473
+ return x
474
+
475
+
476
+ # ============================================================================
477
+ # Cross-Modal Fusion: Text → Image conditioning via Gated Cross-State
478
+ # ============================================================================
479
+
480
+ class GatedCrossStateFusion(nn.Module):
481
+ """
482
+ Novel cross-modal fusion inspired by CrossWKV (from RWKV-7 paper).
483
+
484
+ Instead of expensive cross-attention (O(N*M) where N=image, M=text tokens),
485
+ we use a state-based cross-modal mechanism:
486
+
487
+ 1. Compress text into a fixed-size state matrix S_text via SSM over text tokens
488
+ 2. Inject S_text into image SSM states via gated addition
489
+ 3. This gives O(M + N) complexity instead of O(N*M)
490
+
491
+ Mathematical formulation:
492
+ S_text = SSM_text(text_tokens) → (D, d_state) state matrix
493
+ For each image token x_i:
494
+ h_i = A_i * h_{i-1} + B_i * x_i + G_i * S_text * r_i
495
+ Where G_i is a learned gate and r_i is a receptance vector.
496
+ """
497
+
498
+ def __init__(self, d_model: int, d_text: int, d_state: int = 16, num_heads: int = 8):
499
+ super().__init__()
500
+ self.d_model = d_model
501
+ self.d_state = d_state
502
+ self.num_heads = num_heads
503
+ self.head_dim = d_model // num_heads
504
+
505
+ # Text state compression
506
+ self.text_proj = nn.Linear(d_text, d_model)
507
+ self.text_key = nn.Linear(d_model, d_model, bias=False)
508
+ self.text_value = nn.Linear(d_model, d_model, bias=False)
509
+
510
+ # Image query
511
+ self.image_query = nn.Linear(d_model, d_model, bias=False)
512
+
513
+ # Gating mechanism
514
+ self.gate = nn.Sequential(
515
+ nn.Linear(d_model * 2, d_model),
516
+ nn.Sigmoid()
517
+ )
518
+
519
+ # Output projection
520
+ self.out_proj = nn.Linear(d_model, d_model, bias=False)
521
+ self.norm = nn.LayerNorm(d_model)
522
+
523
+ def forward(self, x_image: torch.Tensor, x_text: torch.Tensor) -> torch.Tensor:
524
+ """
525
+ x_image: (B, N, D) - image features
526
+ x_text: (B, M, D_text) - text features
527
+ Returns: (B, N, D) - text-conditioned image features
528
+ """
529
+ B, N, D = x_image.shape
530
+
531
+ # Project text to model dimension
532
+ text_feat = self.text_proj(x_text) # (B, M, D)
533
+
534
+ # Compute text summary using mean pooling + per-head KV
535
+ # This compresses all text into a single KV state per head
536
+ text_k = self.text_key(text_feat) # (B, M, D)
537
+ text_v = self.text_value(text_feat) # (B, M, D)
538
+
539
+ # Reshape to heads
540
+ text_k = rearrange(text_k, 'b m (h d) -> b h m d', h=self.num_heads)
541
+ text_v = rearrange(text_v, 'b m (h d) -> b h m d', h=self.num_heads)
542
+
543
+ # Compute text state: S = K^T V / M (compressed representation)
544
+ # This is O(M * d^2) which is very small for typical M (77 tokens)
545
+ text_state = torch.einsum('bhmd,bhmk->bhdk', text_k, text_v) / text_k.shape[2]
546
+
547
+ # Image queries
548
+ img_q = self.image_query(x_image) # (B, N, D)
549
+ img_q = rearrange(img_q, 'b n (h d) -> b h n d', h=self.num_heads)
550
+
551
+ # Query the text state: y = Q * S
552
+ cross_out = torch.einsum('bhnd,bhdk->bhnk', img_q, text_state)
553
+ cross_out = rearrange(cross_out, 'b h n d -> b n (h d)')
554
+
555
+ # Gated fusion
556
+ gate = self.gate(torch.cat([x_image, cross_out], dim=-1))
557
+ out = x_image + gate * cross_out
558
+
559
+ return self.norm(out)
560
+
561
+
562
+ # ============================================================================
563
+ # Latent Reasoning Loop (The Novel Core Innovation)
564
+ # ============================================================================
565
+
566
+ class LatentReasoningLoop(nn.Module):
567
+ """
568
+ NOVEL CONTRIBUTION: Iterative reasoning in latent space for image generation.
569
+
570
+ Inspired by Liquid Reasoning Transformer (LRT), but adapted for generative models.
571
+
572
+ Key insight: Image generation benefits from iterative refinement. Instead of
573
+ a fixed number of denoising steps (expensive), we add a CHEAP inner reasoning
574
+ loop that refines the latent representation before final prediction.
575
+
576
+ How it works:
577
+ 1. A "reasoning state" r_t evolves over T_think iterations
578
+ 2. Each iteration applies a lightweight SSM + FFN to refine r_t
579
+ 3. A DISCARD GATE filters bad updates (prevents error accumulation)
580
+ 4. A STOP GATE halts early for easy inputs (adaptive compute)
581
+ 5. The final r_T is used to condition the denoising prediction
582
+
583
+ This gives the model "thinking time" proportional to input difficulty:
584
+ - Simple prompts / high noise levels → few reasoning steps
585
+ - Complex prompts / fine detail refinement → more reasoning steps
586
+
587
+ Mathematical formulation:
588
+ r_0 = MLP(concat(z_t, c_text, t_embed))
589
+ For t in 1..T_max:
590
+ r_proposal = SSM_think(concat(z_tokens, r_t))
591
+ u_t = MLP(r_proposal) # candidate update
592
+ d_t = σ(W_d [r_{t-1}; u_t]) # discard gate
593
+ r_t = (1-d_t) * u_t + d_t * r_{t-1} # filtered update
594
+ s_t = σ(W_s r_t) # stop gate
595
+ if s_t > τ: break
596
+
597
+ Cost: T_think iterations of a SMALL network (1/10th of main backbone)
598
+ Typical T_think: 2-8 steps (learned, not fixed)
599
+ """
600
+
601
+ def __init__(self, d_model: int, d_reason: int = 128, max_steps: int = 8):
602
+ super().__init__()
603
+ self.d_reason = d_reason
604
+ self.max_steps = max_steps
605
+
606
+ # Initialize reasoning state from input
607
+ self.state_init = nn.Sequential(
608
+ nn.Linear(d_model, d_reason * 2),
609
+ nn.GELU(),
610
+ nn.Linear(d_reason * 2, d_reason)
611
+ )
612
+
613
+ # Lightweight reasoning block (intentionally small)
614
+ self.reason_ssm = SelectiveStateSpace(d_reason, d_state=8, d_conv=3)
615
+ self.reason_ffn = nn.Sequential(
616
+ nn.Linear(d_reason, d_reason * 2),
617
+ nn.GELU(),
618
+ nn.Linear(d_reason * 2, d_reason)
619
+ )
620
+ self.reason_norm = nn.LayerNorm(d_reason)
621
+
622
+ # Discard gate: reject bad updates
623
+ self.discard_gate = nn.Sequential(
624
+ nn.Linear(d_reason * 2, d_reason),
625
+ nn.Sigmoid()
626
+ )
627
+
628
+ # Stop gate: halt when converged
629
+ self.stop_gate = nn.Sequential(
630
+ nn.Linear(d_reason, 1),
631
+ nn.Sigmoid()
632
+ )
633
+ self.stop_threshold = 0.8 # learnable threshold
634
+
635
+ # Project reasoning state back to condition the main network
636
+ self.reason_proj = nn.Linear(d_reason, d_model)
637
+
638
+ def forward(self, x: torch.Tensor, return_steps: bool = False) -> Tuple[torch.Tensor, dict]:
639
+ """
640
+ x: (B, L, D) - input features (latent tokens + conditioning)
641
+ Returns: (B, D_model) reasoning conditioning vector, info dict
642
+ """
643
+ B = x.shape[0]
644
+
645
+ # Initialize reasoning state from global average of input
646
+ x_global = x.mean(dim=1) # (B, D)
647
+ r = self.state_init(x_global) # (B, d_reason)
648
+
649
+ info = {'steps': [], 'discard_rates': [], 'stop_values': []}
650
+
651
+ # Iterative reasoning loop
652
+ total_steps = 0
653
+ for step in range(self.max_steps):
654
+ # Expand reasoning state and process with SSM
655
+ r_expanded = r.unsqueeze(1).expand(-1, x.shape[1], -1) # (B, L, d_reason)
656
+
657
+ # Lightweight processing
658
+ r_processed = self.reason_ssm(self.reason_norm(r_expanded))
659
+ r_proposal = self.reason_ffn(r_processed.mean(dim=1)) # (B, d_reason)
660
+
661
+ # Discard gate
662
+ d = self.discard_gate(torch.cat([r, r_proposal], dim=-1))
663
+ r_new = d * r + (1 - d) * r_proposal
664
+
665
+ # Stop gate
666
+ s = self.stop_gate(r_new).squeeze(-1) # (B,)
667
+
668
+ info['discard_rates'].append(d.mean().item())
669
+ info['stop_values'].append(s.mean().item())
670
+
671
+ r = r_new
672
+ total_steps += 1
673
+
674
+ # In inference, stop if all batch elements want to stop
675
+ if not self.training and (s > self.stop_threshold).all():
676
+ break
677
+
678
+ info['total_steps'] = total_steps
679
+
680
+ # Project to conditioning dimension
681
+ cond = self.reason_proj(r) # (B, D_model)
682
+ return cond, info
683
+
684
+
685
+ # ============================================================================
686
+ # Timestep + Text Embedding
687
+ # ============================================================================
688
+
689
+ class TimestepEmbedding(nn.Module):
690
+ """
691
+ Sinusoidal timestep embedding with MLP projection.
692
+ Standard approach from DDPM, with the addition of frequency scaling
693
+ for better coverage of the continuous [0,1] range used in flow matching.
694
+ """
695
+
696
+ def __init__(self, d_model: int, max_period: int = 10000):
697
+ super().__init__()
698
+ self.d_model = d_model
699
+ self.max_period = max_period
700
+
701
+ self.mlp = nn.Sequential(
702
+ nn.Linear(d_model, d_model * 4),
703
+ nn.SiLU(),
704
+ nn.Linear(d_model * 4, d_model)
705
+ )
706
+
707
+ def forward(self, t: torch.Tensor) -> torch.Tensor:
708
+ """
709
+ t: (B,) timestep values in [0, 1]
710
+ Returns: (B, d_model)
711
+ """
712
+ half_dim = self.d_model // 2
713
+ freqs = torch.exp(
714
+ -math.log(self.max_period) * torch.arange(half_dim, device=t.device).float() / half_dim
715
+ )
716
+ args = t.unsqueeze(1) * freqs.unsqueeze(0) * 1000 # Scale for better range
717
+ embedding = torch.cat([torch.sin(args), torch.cos(args)], dim=-1)
718
+
719
+ if self.d_model % 2:
720
+ embedding = F.pad(embedding, (0, 1))
721
+
722
+ return self.mlp(embedding)
723
+
724
+
725
+ class TextProjection(nn.Module):
726
+ """
727
+ Projects text encoder outputs to model dimension.
728
+ Supports variable-length text with a pooled global + per-token output.
729
+ """
730
+
731
+ def __init__(self, d_text: int, d_model: int):
732
+ super().__init__()
733
+ self.proj = nn.Linear(d_text, d_model)
734
+ self.pool_proj = nn.Linear(d_text, d_model)
735
+ self.norm = nn.LayerNorm(d_model)
736
+
737
+ def forward(self, text_features: torch.Tensor, text_mask: Optional[torch.Tensor] = None):
738
+ """
739
+ text_features: (B, M, D_text)
740
+ text_mask: (B, M) boolean mask
741
+ Returns: per_token (B, M, D), pooled (B, D)
742
+ """
743
+ per_token = self.norm(self.proj(text_features))
744
+
745
+ if text_mask is not None:
746
+ # Masked mean pooling
747
+ mask = text_mask.unsqueeze(-1).float()
748
+ pooled = (text_features * mask).sum(1) / mask.sum(1).clamp(min=1)
749
+ else:
750
+ pooled = text_features.mean(dim=1)
751
+
752
+ pooled = self.pool_proj(pooled)
753
+ return per_token, pooled