File size: 19,073 Bytes
6e408ce
f1df870
 
 
 
 
 
 
 
 
6e408ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1df870
 
6e408ce
f1df870
 
6e408ce
 
 
 
f1df870
6e408ce
 
 
 
 
 
 
f1df870
6e408ce
 
 
 
 
 
 
 
 
 
 
 
 
f1df870
6e408ce
 
 
 
 
 
 
 
 
 
 
 
 
f1df870
6e408ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1df870
 
 
 
 
 
 
 
 
6e408ce
f1df870
 
6e408ce
 
f1df870
 
 
 
 
 
6e408ce
 
 
f1df870
6e408ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1df870
6e408ce
 
 
 
 
 
 
 
 
 
 
 
f1df870
6e408ce
 
 
 
 
 
f1df870
6e408ce
 
 
f1df870
 
6e408ce
f1df870
6e408ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1df870
 
 
 
 
 
 
 
6e408ce
 
 
f1df870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e408ce
f1df870
 
6e408ce
 
 
f1df870
6e408ce
 
 
f1df870
 
 
 
 
6e408ce
 
 
 
 
 
 
 
f1df870
 
6e408ce
 
f1df870
6e408ce
 
 
 
 
 
 
 
 
 
 
 
f1df870
 
 
 
6e408ce
f1df870
 
 
 
 
 
 
 
6e408ce
f1df870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e408ce
f1df870
 
 
 
 
6e408ce
f1df870
6e408ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
f1df870
 
 
 
 
 
 
6e408ce
 
 
 
 
f1df870
6e408ce
 
 
 
f1df870
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6e408ce
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
"""
Chimera 5.2 — full causal LM with FUNCTIONAL self-evolution.

Key changes for auto-evolution:
* SelfEvolutionEngine is called at EVERY layer during forward pass
* Semantic memory modulation is added to hidden states
* TTT updates target MLP weights in-place during forward
* Evolution loss is added to causal LM loss during training
* Contrastive evaluation tracks memory usefulness
* Loop depth classifier sets compute budget per sequence
"""

from __future__ import annotations

import json
from typing import Any, List, Optional, Tuple

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.checkpoint import checkpoint

from .quantization import BitLinear, RMSNorm
from .layers import (GatedDeltaNetLayer, MLSTMLayer, TitansMACLayer,
                     TSPSpanKnotLayer, SwiGLUMLP)
from .moe import MoELayer
from .looping import ParcaeLoopController
from .inference import (SpanInferenceEngine, GrammarFST, EntropyValve,
                        DebtLedger, BraidState)
from .evolution import SelfEvolutionEngine
from .multimodal import VisionEncoder, AudioEncoder


class CausalLMOutput(dict):
    """Light HF-compatible output dict supporting tuple unpacking."""

    def __init__(self, loss: Optional[torch.Tensor] = None,
                 logits: Optional[torch.Tensor] = None,
                 hidden_states: Optional[torch.Tensor] = None,
                 caches: Optional[list] = None,
                 evolution_metrics: Optional[dict] = None):
        super().__init__(loss=loss, logits=logits,
                         hidden_states=hidden_states, caches=caches,
                         evolution_metrics=evolution_metrics)
        self.loss = loss
        self.logits = logits
        self.hidden_states = hidden_states
        self.caches = caches
        self.evolution_metrics = evolution_metrics or {}

    def __iter__(self):
        yield self.loss
        yield self.logits


def expand_layer_pattern(config: dict) -> List[str]:
    """Expand the layer-pattern shorthand into a list."""
    backbone = config.get("backbone", {})
    pattern_str = backbone.get("layer_pattern", "GD XM GD TM GD XM GD SK")
    aliases = backbone.get("layer_aliases", {
        "GD": "gated_deltanet", "XM": "xlstm_m",
        "TM": "titans_mac", "SK": "tsp_span_knot",
    })
    pattern = pattern_str.split()
    n_layers = int(config.get("num_hidden_layers", 28))
    full = (pattern * (n_layers // len(pattern) + 1))[:n_layers]
    return [aliases.get(p, p) for p in full]


class Chimera51Block(nn.Module):
    """One block with evolution-aware forward."""

    _RECURRENT = {"gated_deltanet", "xlstm_m", "titans_mac", "tsp_span_knot"}

    def __init__(self, config: dict, layer_type: str, layer_idx: int,
                 use_moe: bool = False):
        super().__init__()
        h = int(config["hidden_size"])
        eps = float(config.get("rms_norm_eps", 1e-6))
        heads = int(config["num_heads"])
        head_dim = int(config["head_dim"])
        ternary = bool(config.get("use_ternary", True))
        chunk_sz = int(config.get("gated_deltanet", {}).get("chunk_size", 64))

        self.layer_idx = layer_idx
        self.layer_type = layer_type
        self.attn_norm = RMSNorm(h, eps=eps)

        if layer_type == "gated_deltanet":
            self.attn = GatedDeltaNetLayer(h, heads, head_dim, norm_eps=eps,
                                           chunk_size=chunk_sz, use_ternary=ternary)
        elif layer_type == "xlstm_m":
            mem_h = config.get("xlstm", {}).get("memory_size_per_head", [head_dim, head_dim])
            self.attn = MLSTMLayer(h, heads, int(mem_h[0]), norm_eps=eps,
                                   use_ternary=ternary)
        elif layer_type == "titans_mac":
            tc = config.get("titans", {})
            self.attn = TitansMACLayer(h, heads, head_dim,
                                       memory_depth=int(tc.get("memory_depth", 2)),
                                       persistent_slots=int(tc.get("persistent_memory_slots", 64)),
                                       local_window=int(tc.get("local_window_size", 1024)),
                                       norm_eps=eps, use_ternary=ternary)
        elif layer_type == "tsp_span_knot":
            self.attn = TSPSpanKnotLayer(h, heads, head_dim, norm_eps=eps,
                                         chunk_size=chunk_sz, use_ternary=ternary)
        else:
            raise ValueError(f"Unknown layer type: {layer_type}")

        self.mlp_norm = RMSNorm(h, eps=eps)
        self.use_moe = bool(use_moe)
        if self.use_moe:
            moe_cfg = config.get("backbone", {}).get("moe", {})
            self.mlp = MoELayer(
                hidden_size=h,
                moe_intermediate_size=int(moe_cfg.get("moe_intermediate_size", h * 2)),
                n_routed_experts=int(moe_cfg.get("n_routed_experts", 16)),
                n_shared_experts=int(moe_cfg.get("n_shared_experts", 1)),
                num_experts_per_tok=int(moe_cfg.get("num_experts_per_tok", 2)),
                use_ternary=ternary,
            )
        else:
            inter = int(config.get("intermediate_size", int(h * 8 / 3)))
            inter = 256 * ((inter + 255) // 256)
            self.mlp = SwiGLUMLP(h, inter, use_ternary=ternary)

        # Evolution modulation projection (learnable scale)
        self.evo_gate = nn.Linear(h, h, bias=False)
        nn.init.zeros_(self.evo_gate.weight)

    def forward(self, x: torch.Tensor, cache: Optional[dict] = None,
                evo_modulation: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, dict]:
        # Apply attention with pre-norm
        normed = self.attn_norm(x)
        attn_out, new_cache = self.attn(normed, cache=cache)
        x = x + attn_out

        # Apply MLP with pre-norm
        x = x + self.mlp(self.mlp_norm(x))

        # Apply evolution modulation (gated residual)
        if evo_modulation is not None:
            gate = torch.sigmoid(self.evo_gate(x))
            x = x + gate * evo_modulation

        return x, new_cache


class Chimera51ForCausalLM(nn.Module):
    """Chimera 5.x causal language model with functional self-evolution."""

    def __init__(self, config: dict):
        super().__init__()
        self.config = config
        h = int(config["hidden_size"])
        vocab = int(config["vocab_size"])
        n_layers = int(config["num_hidden_layers"])
        eps = float(config.get("rms_norm_eps", 1e-6))

        self.embed = nn.Embedding(vocab, h)
        layer_types = expand_layer_pattern(config)
        moe_layers = set(int(i) for i in config.get("backbone", {}).get("moe", {}).get("layers", []))

        self.layers = nn.ModuleList([
            Chimera51Block(config, layer_types[i], i, use_moe=(i in moe_layers))
            for i in range(n_layers)
        ])

        self.norm = RMSNorm(h, eps=eps)
        self.lm_head = nn.Linear(h, vocab, bias=False)

        if config.get("tie_word_embeddings", True):
            self.lm_head.weight = self.embed.weight

        # Parcae looping controller
        loop_cfg = config.get("looping", {})
        self.looping_enabled = bool(loop_cfg.get("enabled", True)) and n_layers >= 3
        if self.looping_enabled:
            self.prelude_start, self.prelude_end = loop_cfg.get("prelude", [0, min(3, n_layers - 1)])
            self.loop_start, self.loop_end = loop_cfg.get("loop", [min(4, n_layers - 1), max(4, n_layers - 4)])
            self.coda_start, self.coda_end = loop_cfg.get("coda", [max(0, n_layers - 4), n_layers - 1])
            self.loop_controller = ParcaeLoopController(
                h, loop_range=tuple(loop_cfg.get("loop_range", [1, 6])),
                loop_default=int(loop_cfg.get("loop_default", 2)),
                adaptive_exit_threshold=float(loop_cfg.get("adaptive_exit_threshold", 0.01)),
            )

        # Inference systems
        si_cfg = config.get("span_inference", {})
        self.span_engine = SpanInferenceEngine(h, si_cfg) if si_cfg.get("enabled", True) else None
        self.grammar = GrammarFST(config.get("grammar", {}))
        self.entropy_valve = EntropyValve(config.get("entropy_valve", {}))
        self.debt_ledger = DebtLedger(config.get("debt_ledger", {}))

        # Self-evolution — FUNCTIONAL
        evo_cfg = dict(config.get("self_evolution", {}))
        evo_cfg["_semantic_memory_config"] = config.get("semantic_memory", {})
        self.evolution = SelfEvolutionEngine(evo_cfg, h)
        self.evo_weight = float(config.get("evolution_loss_weight", 0.01))
        self.evo_every_n_layers = int(config.get("evolution_every_n_layers", 4))

        # Multimodal
        mm_cfg = dict(config.get("multimodal", {}))
        mm_cfg["hidden_size"] = h
        if mm_cfg.get("enabled", False):
            self.vision_encoder = VisionEncoder(mm_cfg)
            self.audio_encoder = AudioEncoder(mm_cfg)
        else:
            self.vision_encoder = None
            self.audio_encoder = None

        self.gradient_checkpointing = False
        self._init_weights()
        self._wire_semantic_memory()

    def enable_gradient_checkpointing(self) -> None:
        self.gradient_checkpointing = True

    def disable_gradient_checkpointing(self) -> None:
        self.gradient_checkpointing = False

    def _wire_semantic_memory(self) -> None:
        mem = self.evolution.semantic_memory
        for layer in self.layers:
            if hasattr(layer.attn, "set_semantic_memory"):
                layer.attn.set_semantic_memory(mem)

    def _init_weights(self) -> None:
        init_range = float(self.config.get("initializer_range", 0.006))
        for module in self.modules():
            if isinstance(module, (nn.Linear, BitLinear)):
                if module.weight is not None:
                    nn.init.normal_(module.weight, mean=0.0, std=init_range)
                if getattr(module, "bias", None) is not None:
                    nn.init.zeros_(module.bias)
            elif isinstance(module, nn.Embedding):
                nn.init.normal_(module.weight, mean=0.0, std=init_range)
        for module in self.modules():
            if isinstance(module, BitLinear):
                module.invalidate_packed()

    def _run_layers(self, x: torch.Tensor, start: int, end: int,
                    caches: Optional[list],
                    compute_logits: bool = False,
                    labels: Optional[torch.Tensor] = None) -> Tuple[torch.Tensor, Optional[torch.Tensor], list]:
        """Run layers with evolution hooks. Returns (x, logits_if_computed, caches)."""
        all_metrics = []
        logits = None
        evolution_loss = torch.tensor(0.0, device=x.device)

        for i in range(start, min(end + 1, len(self.layers))):
            layer = self.layers[i]
            cache = caches[i] if caches is not None else None

            # Evolution modulation every N layers (lightweight)
            evo_mod = None
            if i % self.evo_every_n_layers == 0 and self.evolution is not None:
                # Compute modulation from semantic memory
                # Note: loss parameter requires a scalar loss tensor for TTT/surprise;
                #       pass None during standard forward, compute explicitly for TTT
                evo_result = self.evolution(
                    hidden_states=x.detach() if not x.requires_grad else x,
                    layer_idx=i,
                    loss=None
                )
                evo_mod = evo_result['modulation']
                if evo_result['evolution_loss'] is not None:
                    evolution_loss = evolution_loss + evo_result['evolution_loss']
                all_metrics.append(evo_result.get('metrics', {}))

                # TTT update for target layers (only in training, no backprop)
                if self.training and evo_result.get('ttt_delta') is not None:
                    with torch.no_grad():
                        # Apply TTT to MLP down-projection if this is a target layer
                        if hasattr(layer.mlp, 'w_down'):
                            layer.mlp.w_down.data.add_(evo_result['ttt_delta'] * self.evolution.ttt.inner_lr)

            if self.gradient_checkpointing and self.training:
                def _ckpt_fn(x_in, layer=layer, cache=cache, evo=evo_mod):
                    out, _ = layer(x_in, cache=cache, evo_modulation=evo)
                    return out
                x = checkpoint(_ckpt_fn, x, use_reentrant=False)
            else:
                x, new_cache = layer(x, cache=cache, evo_modulation=evo_mod)
                if caches is not None:
                    caches[i] = new_cache

            # Compute probe logits for entropy valve (every few layers)
            if compute_logits and i == end:
                logits = self.lm_head(self.norm(x[:, -1:, :]))

        return x, logits, caches, evolution_loss, all_metrics

    def forward(self, input_ids: torch.Tensor,
                labels: Optional[torch.Tensor] = None,
                pixel_values: Optional[torch.Tensor] = None,
                mel_features: Optional[torch.Tensor] = None,
                num_loops: Optional[int] = None,
                caches: Optional[list] = None,
                use_cache: bool = False,
                logits_to_keep: int = 0,
                return_evolution_metrics: bool = False):
        x = self.embed(input_ids)

        # Multimodal prepend
        if pixel_values is not None and self.vision_encoder is not None:
            v = self.vision_encoder(pixel_values)
            if v is not None:
                x = torch.cat([v, x], dim=1)
        if mel_features is not None and self.audio_encoder is not None:
            a = self.audio_encoder(mel_features)
            if a is not None:
                x = torch.cat([a, x], dim=1)

        if caches is None and use_cache:
            caches = [None] * len(self.layers)

        total_evo_loss = torch.tensor(0.0, device=x.device)
        all_evo_metrics = []

        # Prelude + Loop + Coda with evolution
        if self.looping_enabled and hasattr(self, "loop_controller"):
            # Prelude
            x, probe_logits, caches, evo_loss, metrics = self._run_layers(
                x, self.prelude_start, self.prelude_end, caches,
                compute_logits=not self.training, labels=labels)
            total_evo_loss = total_evo_loss + evo_loss
            all_evo_metrics.extend(metrics)

            # Determine loop depth
            effective = num_loops
            if effective is None and not self.training and probe_logits is not None:
                effective = self.entropy_valve.get_loop_count(probe_logits)
            elif effective is None and self.evolution is not None:
                # Use loop classifier from evolution
                last_hidden = x[:, -1, :].mean(dim=0, keepdim=True)  # Average over batch
                effective = self.evolution.loop_classifier(last_hidden).item()
                effective = max(1, min(effective, 6))

            # Loop body
            loop_fn = lambda inp: self._run_layers(
                inp, self.loop_start, self.loop_end, caches, labels=labels)[0]
            x = self.loop_controller(x, loop_fn, num_loops=effective)

            # Coda
            x, _, caches, evo_loss, metrics = self._run_layers(
                x, self.coda_start, self.coda_end, caches, labels=labels)
            total_evo_loss = total_evo_loss + evo_loss
            all_evo_metrics.extend(metrics)
        else:
            x, _, caches, evo_loss, metrics = self._run_layers(
                x, 0, len(self.layers) - 1, caches,
                compute_logits=not self.training, labels=labels)
            total_evo_loss = total_evo_loss + evo_loss
            all_evo_metrics.extend(metrics)

        # Final norm and logits
        if logits_to_keep and labels is None:
            keep = int(logits_to_keep)
            tail = x[:, -keep:, :]
            tail = self.norm(tail)
            if self.span_engine is not None:
                tail = self.span_engine(tail)
            logits = self.lm_head(tail)
        else:
            x = self.norm(x)
            if self.span_engine is not None:
                x = self.span_engine(x)
            logits = self.lm_head(x)

        logits = self.grammar(logits)
        logits = self.debt_ledger(logits)

        # Self-feedback refinement check (inference only)
        if not self.training and self.evolution is not None:
            should_refine = self.evolution.self_feedback.should_refine(logits)
            if should_refine:
                all_evo_metrics.append({'refinement_triggered': True})

        # Compute loss
        loss = None
        if labels is not None:
            seq_len = min(logits.size(1), labels.size(1))
            shift_logits = logits[:, :seq_len, :].contiguous()
            shift_labels = labels[:, :seq_len].contiguous()
            ce_loss = F.cross_entropy(
                shift_logits.view(-1, shift_logits.size(-1)),
                shift_labels.view(-1),
                ignore_index=-100,
            )
            # Add evolution loss (contrastive memory evaluation)
            loss = ce_loss + self.evo_weight * total_evo_loss
        else:
            ce_loss = None

        # Store episodic case after forward (for inference mode)
        if not self.training and self.evolution is not None:
            last_hidden = x[:, -1, :].detach()
            # Schedule episodic storage for end of sequence
            # (In real use, call model.evolution.store_episodic() explicitly)

        return CausalLMOutput(
            loss=loss,
            logits=logits,
            hidden_states=x,
            caches=caches if use_cache else None,
            evolution_metrics={
                'ce_loss': ce_loss.item() if ce_loss is not None else None,
                'evo_loss': total_evo_loss.item(),
                'layer_metrics': all_evo_metrics,
            } if return_evolution_metrics else None
        )

    @torch.no_grad()
    def prepare_for_inference(self) -> None:
        """Pre-pack every BitLinear so the first generation step is fast."""
        for module in self.modules():
            if isinstance(module, BitLinear):
                module.prepare_for_inference()

    def get_mode_config(self, mode: str = "balanced") -> dict:
        modes = self.config.get("modes", {})
        return modes.get(mode, modes.get("balanced", {}))

    def count_parameters(self) -> dict:
        total = sum(p.numel() for p in self.parameters())
        ternary = sum(p.numel() for _, m in self.named_modules()
                      if isinstance(m, BitLinear) for p in m.parameters())
        return {"total": total, "ternary": ternary, "fp32": total - ternary}

    @classmethod
    def from_config_file(cls, path: str) -> "Chimera51ForCausalLM":
        with open(path, "r", encoding="utf-8") as fh:
            config = json.load(fh)
        return cls(config)


__all__ = ["Chimera51ForCausalLM", "Chimera51Block", "CausalLMOutput",
           "expand_layer_pattern"]