asdf98 commited on
Commit
63a5e3c
·
verified ·
1 Parent(s): 192c527

Upload liqmamba/model.py

Browse files
Files changed (1) hide show
  1. liqmamba/model.py +280 -0
liqmamba/model.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ LiqMamba: Liquid-Mamba Image Generator
3
+
4
+ Complete architecture that combines:
5
+ 1. SDXL VAE for encoding/decoding (pretrained, frozen)
6
+ 2. CfC-gated Mamba-2 SSD backbone with multi-directional 2D scans
7
+ 3. Flow matching objective for stable training
8
+ 4. Lipshitz regularization (physics-informed) to prevent collapse
9
+
10
+ Configurations:
11
+ - LiqMamba-Tiny: ~8M params (extreme lightweight)
12
+ - LiqMamba-Small: ~25M params (Colab/Kaggle free tier target)
13
+ - LiqMamba-Base: ~85M params (higher quality)
14
+ """
15
+
16
+ import torch
17
+ import torch.nn as nn
18
+ import torch.nn.functional as F
19
+ from typing import Optional
20
+ import math
21
+
22
+ from .mamba2_ssd import MultiDirectionalScan, Mamba2SSDBlock
23
+ from .cfc import CfCLayer
24
+
25
+
26
+ class PatchEmbed(nn.Module):
27
+ """Convert image latents to patch tokens."""
28
+ def __init__(self, in_channels=4, dim=256, patch_size=1):
29
+ super().__init__()
30
+ self.proj = nn.Conv2d(in_channels, dim, patch_size, patch_size)
31
+
32
+ def forward(self, x):
33
+ # x: (B, C, H, W) -> (B, dim, H, W) -> (B, H*W, dim)
34
+ x = self.proj(x)
35
+ B, C, H, W = x.shape
36
+ x = x.flatten(2).transpose(1, 2)
37
+ return x, H, W
38
+
39
+
40
+ class Unpatchify(nn.Module):
41
+ """Convert patch tokens back to image latents."""
42
+ def __init__(self, dim=256, out_channels=4, patch_size=1):
43
+ super().__init__()
44
+ self.proj = nn.Conv2d(dim, out_channels, patch_size, patch_size)
45
+
46
+ def forward(self, x, H, W):
47
+ B, L, D = x.shape
48
+ x = x.transpose(1, 2).view(B, D, H, W)
49
+ return self.proj(x)
50
+
51
+
52
+ class AdaLNModulation(nn.Module):
53
+ """
54
+ Adaptive Layer Norm modulation (from DiT).
55
+ Injects timestep and optional class conditioning.
56
+ """
57
+ def __init__(self, dim, cond_dim=256):
58
+ super().__init__()
59
+ self.norm = nn.LayerNorm(dim, elementwise_affine=False)
60
+ self.scale_shift = nn.Sequential(
61
+ nn.SiLU(),
62
+ nn.Linear(cond_dim, dim * 6) # scale, shift, gate x 2
63
+ )
64
+
65
+ def forward(self, x, c):
66
+ # x: (B, L, D), c: (B, cond_dim)
67
+ params = self.scale_shift(c) # (B, D*6)
68
+ scale1, shift1, gate1, scale2, shift2, gate2 = params.chunk(6, dim=-1)
69
+
70
+ # Modulate
71
+ x = self.norm(x) * (1 + scale1.unsqueeze(1)) + shift1.unsqueeze(1)
72
+ x = x * gate1.unsqueeze(1)
73
+ return x
74
+
75
+
76
+ class TimestepEmbedding(nn.Module):
77
+ """Sinusoidal timestep embedding."""
78
+ def __init__(self, dim, max_period=10000):
79
+ super().__init__()
80
+ self.dim = dim
81
+ self.max_period = max_period
82
+ self.mlp = nn.Sequential(
83
+ nn.Linear(dim, dim * 4),
84
+ nn.SiLU(),
85
+ nn.Linear(dim * 4, dim),
86
+ )
87
+
88
+ def forward(self, t):
89
+ # t: (B,) float timesteps in [0,1]
90
+ half = self.dim // 2
91
+ freqs = torch.exp(-math.log(self.max_period) *
92
+ torch.arange(0, half, device=t.device).float() / half)
93
+ args = t.unsqueeze(-1) * freqs.unsqueeze(0)
94
+ embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
95
+ if self.dim % 2:
96
+ embedding = F.pad(embedding, (0, 1))
97
+ return self.mlp(embedding)
98
+
99
+
100
+ class LiqMambaBlock(nn.Module):
101
+ """
102
+ Core LiqMamba block combining:
103
+ - AdaLN-Zero modulation
104
+ - Multi-directional Mamba-2 SSD scan
105
+ - CfC liquid state modulation
106
+ - Feed-forward with CfC gating
107
+ """
108
+ def __init__(self, dim, cond_dim=256, d_state=16, expand=2,
109
+ scan_pattern="row_fwd", use_ffn=True):
110
+ super().__init__()
111
+ self.dim = dim
112
+ self.scan_pattern = scan_pattern
113
+
114
+ # AdaLN
115
+ self.adaLN = AdaLNModulation(dim, cond_dim)
116
+
117
+ # Multi-directional scan
118
+ self.scan = MultiDirectionalScan(dim, pattern=scan_pattern,
119
+ d_state=d_state, expand=expand)
120
+
121
+ # CfC liquid layer (replaces FFN for adaptive computation)
122
+ if use_ffn:
123
+ self.cfc_ffn = CfCLayer(dim, expansion_factor=2)
124
+ else:
125
+ self.cfc_ffn = nn.Identity()
126
+
127
+ # AdaLN for FFN
128
+ self.adaLN_ffn = AdaLNModulation(dim, cond_dim) if use_ffn else None
129
+
130
+ def forward(self, x, c, H, W):
131
+ # x: (B, H*W, dim), c: (B, cond_dim)
132
+
133
+ # Scan with conditioning
134
+ x_mod = self.adaLN(x, c)
135
+ x = x + self.scan(x_mod, H, W)
136
+
137
+ # CfC FFN with conditioning
138
+ if self.adaLN_ffn is not None:
139
+ x_mod2 = self.adaLN_ffn(x, c)
140
+ x = x + self.cfc_ffn(x_mod2)
141
+
142
+ return x
143
+
144
+
145
+ class LiqMamba(nn.Module):
146
+ """
147
+ LiqMamba Image Generator — Liquid Neural Network + Mamba-2 SSD
148
+
149
+ Architecture:
150
+ 1. Patch embed: latent (4, H, W) → tokens (H*W, dim)
151
+ 2. Timestep + condition embedding
152
+ 3. N stacked LiqMambaBlocks with alternating scan directions
153
+ 4. Unpatchify: tokens → latent (4, H, W)
154
+
155
+ Config presets:
156
+ - Tiny: dim=128, depth=4 → ~8M params
157
+ - Small: dim=256, depth=8 → ~25M params
158
+ - Base: dim=512, depth=12 → ~85M params
159
+ """
160
+
161
+ def __init__(
162
+ self,
163
+ in_channels: int = 4, # VAE latent channels
164
+ out_channels: int = 4,
165
+ dim: int = 256, # Hidden dimension
166
+ depth: int = 8, # Number of blocks
167
+ cond_dim: int = 256, # Conditioning dimension
168
+ d_state: int = 16, # SSM state dimension
169
+ expand: int = 2, # SSD expansion factor
170
+ patch_size: int = 1,
171
+ scan_patterns: list[str] | None = None,
172
+ ):
173
+ super().__init__()
174
+
175
+ self.dim = dim
176
+ self.depth = depth
177
+
178
+ # Scan pattern rotation (matching DiM's 4-pattern cycle)
179
+ if scan_patterns is None:
180
+ scan_patterns = ["row_fwd", "row_rev", "col_fwd", "col_rev"]
181
+ self.scan_patterns = scan_patterns
182
+
183
+ # Patch embedding
184
+ self.patch_embed = PatchEmbed(in_channels, dim, patch_size)
185
+ self.unpatchify = Unpatchify(dim, out_channels, patch_size)
186
+
187
+ # Timestep embedding
188
+ self.time_embed = TimestepEmbedding(cond_dim)
189
+
190
+ # Optional class embedding
191
+ self.class_embed = nn.Embedding(1000, cond_dim)
192
+
193
+ # Initial CfC layer for liquid state initialization
194
+ self.cfc_init = CfCLayer(dim, expansion_factor=2)
195
+
196
+ # LiqMamba blocks
197
+ self.blocks = nn.ModuleList()
198
+ for i in range(depth):
199
+ pattern = scan_patterns[i % len(scan_patterns)]
200
+ use_ffn = (i % 2 == 0) # FFN every other block for efficiency
201
+ self.blocks.append(
202
+ LiqMambaBlock(
203
+ dim=dim,
204
+ cond_dim=cond_dim,
205
+ d_state=d_state,
206
+ expand=expand,
207
+ scan_pattern=pattern,
208
+ use_ffn=use_ffn,
209
+ )
210
+ )
211
+
212
+ # Final CfC refinement layer
213
+ self.cfc_final = CfCLayer(dim, expansion_factor=2)
214
+
215
+ # Final projection
216
+ self.final_norm = nn.LayerNorm(dim)
217
+
218
+ # Initialize weights
219
+ self._init_weights()
220
+
221
+ def _init_weights(self):
222
+ """Initialize with small values for stable training."""
223
+ for module in self.modules():
224
+ if isinstance(module, nn.Linear):
225
+ nn.init.normal_(module.weight, std=0.02)
226
+ if module.bias is not None:
227
+ nn.init.zeros_(module.bias)
228
+
229
+ def forward(self, x, t, class_labels=None, return_dict=False):
230
+ """
231
+ Args:
232
+ x: (B, C, H, W) latent images
233
+ t: (B,) float timesteps in [0, 1]
234
+ class_labels: (B,) optional class indices
235
+ Returns:
236
+ velocity field v(x, t) used for flow matching
237
+ """
238
+ B, C, H, W = x.shape
239
+
240
+ # Patch embed
241
+ x, H_p, W_p = self.patch_embed(x) # (B, H*W, dim)
242
+
243
+ # Timestep conditioning
244
+ c = self.time_embed(t) # (B, cond_dim)
245
+ if class_labels is not None:
246
+ c = c + self.class_embed(class_labels)
247
+
248
+ # Initial liquid state
249
+ x = self.cfc_init(x)
250
+
251
+ # LiqMamba blocks
252
+ for block in self.blocks:
253
+ x = block(x, c, H_p, W_p)
254
+
255
+ # Final refinement
256
+ x = self.final_norm(x)
257
+ x = self.cfc_final(x)
258
+
259
+ # Unpatchify
260
+ x = self.unpatchify(x, H_p, W_p)
261
+
262
+ if return_dict:
263
+ return {"velocity": x}
264
+ return x
265
+
266
+ def get_num_params(self):
267
+ return sum(p.numel() for p in self.parameters() if p.requires_grad)
268
+
269
+
270
+ def liqmamba_tiny(**kwargs):
271
+ """Tiny variant: ~8M params, extreme lightweight."""
272
+ return LiqMamba(dim=128, depth=4, d_state=8, expand=2, **kwargs)
273
+
274
+ def liqmamba_small(**kwargs):
275
+ """Small variant: ~25M params, Colab/Kaggle free tier target."""
276
+ return LiqMamba(dim=256, depth=8, d_state=16, expand=2, **kwargs)
277
+
278
+ def liqmamba_base(**kwargs):
279
+ """Base variant: ~85M params, higher quality."""
280
+ return LiqMamba(dim=512, depth=12, d_state=16, expand=2, **kwargs)