| """ |
| N‑Transformers v1.0 — Python Reference Implementation (single‑file) |
| Noetic Affective Field Self‑Integration (NAFSI) on a Transformer Base |
| |
| NOTE |
| ---- |
| • Framework: PyTorch ≥ 2.2 (CUDA optional). |
| • This file focuses on the parallel PF path, coupling modules, and wrappers needed to augment a standard decoder‑only Transformer. |
| • The core Transformer (token path) can be any decoder‑only model that exposes hidden states h_t and base logits z_t (e.g., GPT‑like). |
| • All tensors are batch‑first unless noted. |
| |
| Status: Research‑grade reference code (trainable with additional plumbing). |
| Author: Prometheus (Cognitive Systems Architect) — with Syams Ideris |
| """ |
| from __future__ import annotations |
| import math |
| from dataclasses import dataclass |
| from typing import Optional, Tuple, Dict |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
|
|
|
|
| |
| |
| |
|
|
| def pairwise_cosine(x: torch.Tensor, y: Optional[torch.Tensor] = None, eps: float = 1e-8) -> torch.Tensor: |
| """Compute pairwise cosine similarity between rows of x and (optionally) y. |
| x: (B, N, D); y: (B, M, D) or None (then y=x) |
| returns: (B, N, M) |
| """ |
| if y is None: |
| y = x |
| x_norm = F.normalize(x, dim=-1, eps=eps) |
| y_norm = F.normalize(y, dim=-1, eps=eps) |
| return torch.matmul(x_norm, y_norm.transpose(-1, -2)) |
|
|
|
|
| def knn_indices(x: torch.Tensor, K: int) -> torch.Tensor: |
| """Return K nearest neighbor indices per row using cosine similarity (excluding self). |
| x: (B, J, D) -> indices: (B, J, K) |
| """ |
| with torch.no_grad(): |
| sim = pairwise_cosine(x) |
| B, J, _ = sim.shape |
| sim = sim - torch.eye(J, device=sim.device).unsqueeze(0) * 2.0 |
| topk = torch.topk(sim, k=K, dim=-1).indices |
| return topk |
|
|
|
|
| def build_adjacency(indices: torch.Tensor, J: int) -> torch.Tensor: |
| """Build symmetric adjacency from KNN indices. |
| indices: (B, J, K) |
| returns A: (B, J, J) with {0,1} entries. |
| """ |
| B, J_, K = indices.shape |
| assert J == J_, "J mismatch" |
| A = torch.zeros(B, J, J, device=indices.device) |
| arangeJ = torch.arange(J, device=indices.device).view(1, J, 1).expand(B, J, K) |
| A.scatter_(dim=-1, index=indices, value=1.0) |
| |
| A = torch.maximum(A, A.transpose(-1, -2)) |
| |
| A = A * (1.0 - torch.eye(J, device=A.device).unsqueeze(0)) |
| return A |
|
|
|
|
| def normalized_graph_laplacian(A: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: |
| """Compute normalized Laplacian L = I - D^{-1/2} A D^{-1/2}. |
| A: (B, J, J) adjacency (nonnegative) |
| return L: (B, J, J) |
| """ |
| B, J, _ = A.shape |
| d = A.sum(-1) + eps |
| d_isqrt = (1.0 / torch.sqrt(d)).unsqueeze(-1) |
| Dn = d_isqrt * A * d_isqrt.transpose(-1, -2) |
| I = torch.eye(J, device=A.device).unsqueeze(0).expand(B, J, J) |
| L = I - Dn |
| return L |
|
|
|
|
| def sym_spd_from_cholesky(L_tri: torch.Tensor, eps: float = 1e-4) -> torch.Tensor: |
| """Build SPD matrix g = L L^T + eps I from lower-triangular parameterization. |
| L_tri: (B, k, k) lower-triangular with positive diagonal (apply softplus outside) |
| Return g: (B, k, k) |
| """ |
| B, k, _ = L_tri.shape |
| I = torch.eye(k, device=L_tri.device).unsqueeze(0).expand(B, k, k) |
| return L_tri @ L_tri.transpose(-1, -2) + eps * I |
|
|
|
|
| def batched_geodesic_sq(x: torch.Tensor, y: torch.Tensor, g: torch.Tensor) -> torch.Tensor: |
| """Compute squared geodesic distance under metric g: |
| d^2 = (x - y)^T g (x - y) |
| x: (B,J,k); y: (B,J,k) broadcastable to pairs; g: (B,k,k) |
| Returns pairwise (B,J,J) |
| """ |
| B, J, k = x.shape |
| |
| x_ = x.unsqueeze(2) |
| y_ = y.unsqueeze(1) |
| diff = x_ - y_ |
| |
| gd = torch.matmul(diff, g.unsqueeze(1).unsqueeze(1)) |
| val = (diff * gd).sum(-1) |
| return val.clamp_min(0.0) |
|
|
|
|
| def safe_eigvalsh(L: torch.Tensor, k_smallest: int = 3) -> torch.Tensor: |
| """Compute few smallest eigenvalues of symmetric L with safety. |
| L: (B,J,J) |
| Return: (B, k_smallest) |
| """ |
| |
| try: |
| vals = torch.linalg.eigvalsh(L) |
| vals, _ = torch.topk(vals, k=k_smallest, largest=False, sorted=True) |
| return vals |
| except RuntimeError: |
| |
| jitter = 1e-4 * torch.eye(L.shape[-1], device=L.device).unsqueeze(0) |
| vals = torch.linalg.eigvalsh(L + jitter) |
| vals, _ = torch.topk(vals, k=k_smallest, largest=False, sorted=True) |
| return vals |
|
|
|
|
| |
| |
| |
|
|
| @dataclass |
| class PFConfig: |
| J: int = 256 |
| k: int = 16 |
| K: int = 16 |
| alpha: float = 0.05 |
| noise_eps: float = 1e-3 |
| lambda_out: float = 1.0 |
| lambda_tv: float = 0.1 |
| lambda_mw: float = 0.05 |
| metric_eps: float = 1e-4 |
| metric_rank: Optional[int] = None |
|
|
|
|
| class PFAdapterOut(nn.Module): |
| """Adapter A_out: map token hidden h_t (B,d) -> target PF pattern F_tilde (B,J,k).""" |
| def __init__(self, d: int, J: int, k: int): |
| super().__init__() |
| self.proj = nn.Linear(d, J * k) |
|
|
| def forward(self, h_t: torch.Tensor) -> torch.Tensor: |
| B, d = h_t.shape |
| out = self.proj(h_t) |
| return out.view(B, -1, d // d * 0 + 1) |
|
|
| |
| class PFAdapterOut(nn.Module): |
| def __init__(self, d: int, J: int, k: int): |
| super().__init__() |
| self.J, self.k = J, k |
| self.proj = nn.Linear(d, J * k) |
|
|
| def forward(self, h_t: torch.Tensor) -> torch.Tensor: |
| B, d = h_t.shape |
| out = self.proj(h_t) |
| return out.view(B, self.J, self.k) |
|
|
|
|
| class PFIntrinsicMetricEngine(nn.Module): |
| """IME: learn SPD metric g_t from PF state statistics via Cholesky parameterization.""" |
| def __init__(self, k: int, hidden: int = 128, metric_eps: float = 1e-4): |
| super().__init__() |
| self.k = k |
| self.metric_eps = metric_eps |
| in_dim = 2 * k |
| self.mlp = nn.Sequential( |
| nn.Linear(in_dim, hidden), nn.GELU(), |
| nn.Linear(hidden, hidden), nn.GELU(), |
| nn.Linear(hidden, k * k) |
| ) |
| |
| with torch.no_grad(): |
| for m in self.mlp: |
| if isinstance(m, nn.Linear): |
| nn.init.xavier_uniform_(m.weight) |
| nn.init.zeros_(m.bias) |
|
|
| def forward(self, F_t: torch.Tensor) -> torch.Tensor: |
| B, J, k = F_t.shape |
| mean = F_t.mean(dim=1) |
| std = F_t.std(dim=1).clamp_min(1e-6) |
| feat = torch.cat([mean, std], dim=-1) |
| L_flat = self.mlp(feat) |
| L = L_flat.view(B, k, k) |
| |
| tril_mask = torch.tril(torch.ones(k, k, device=F_t.device)).unsqueeze(0) |
| L = L * tril_mask |
| diag = torch.diagonal(L, dim1=-2, dim2=-1) |
| diag = F.softplus(diag) + 1e-3 |
| L = L.clone() |
| L.diagonal(dim1=-2, dim2=-1).copy_(diag) |
| g = sym_spd_from_cholesky(L, eps=self.metric_eps) |
| return g |
|
|
|
|
| class PFFieldCore(nn.Module): |
| """Evolve PF state per token step with diffusion + energy gradient + small noise.""" |
| def __init__(self, cfg: PFConfig): |
| super().__init__() |
| self.cfg = cfg |
| |
| self.mw_scale = nn.Parameter(torch.tensor(1.0)) |
| self.register_buffer('zero', torch.tensor(0.0)) |
|
|
| def total_variation(self, F_t: torch.Tensor, A: torch.Tensor) -> torch.Tensor: |
| |
| B = F_t.shape[0] |
| |
| |
| Fi = F_t.unsqueeze(2) |
| Fj = F_t.unsqueeze(1) |
| diff = (Fi - Fj).norm(dim=-1) |
| tv = (diff * A).sum(dim=(-1, -2)) / (A.sum(dim=(-1, -2)).clamp_min(1.0)) |
| return tv.mean() |
|
|
| def omega_mexican_hat(self, F_t: torch.Tensor) -> torch.Tensor: |
| |
| |
| B, J, k = F_t.shape |
| |
| if J > 128: |
| idx = torch.randperm(J, device=F_t.device)[:128] |
| X = F_t[:, idx, :] |
| else: |
| X = F_t |
| pd = pairwise_cosine(X, X) |
| |
| dist = (1.0 - pd).clamp_min(0.0) * 2.0 |
| r = dist.mean(dim=(-1, -2)) |
| r0 = 0.8 |
| loss = ((r - r0) ** 2).mean() |
| return self.mw_scale.abs() * loss |
|
|
| def forward(self, F_t: torch.Tensor, h_t: torch.Tensor, g_t: torch.Tensor, |
| A: torch.Tensor, F_tilde: torch.Tensor) -> Tuple[torch.Tensor, Dict[str, torch.Tensor]]: |
| cfg = self.cfg |
| |
| L = normalized_graph_laplacian(A) |
| diffusion = torch.matmul(L, F_t) |
| |
| grad_out = 2.0 * cfg.lambda_out * (F_t - F_tilde) |
| |
| tv = self.total_variation(F_t, A) |
| omega = self.omega_mexican_hat(F_t) |
| |
| |
| noise = cfg.noise_eps * torch.randn_like(F_t) |
| F_next = F_t + cfg.alpha * diffusion - grad_out + noise |
| stats = { |
| 'tv': tv.detach(), |
| 'omega': omega.detach(), |
| } |
| return F_next, stats |
|
|
|
|
| class PFIntrospection(nn.Module): |
| """Valence (V), Self/Now Anchor (a), and Γ summarizer for gating.""" |
| def __init__(self, d: int, k: int, r_gamma: int = 32): |
| super().__init__() |
| self.aligner = nn.Sequential( |
| nn.Linear(d + k, 128), nn.GELU(), |
| nn.Linear(128, 64), nn.GELU(), |
| ) |
| self.val_head = nn.Linear(64, 1) |
| self.sna_head = nn.Linear(64, 1) |
| self.gamma_head = nn.Sequential( |
| nn.Linear(64 + 2, 64), nn.GELU(), |
| nn.Linear(64, r_gamma) |
| ) |
|
|
| def forward(self, F_t: torch.Tensor, h_t: torch.Tensor, |
| syn: torch.Tensor, conn: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: |
| |
| F_pool = F_t.mean(dim=1) |
| x = torch.cat([h_t, F_pool], dim=-1) |
| z = self.aligner(x) |
| V = torch.sigmoid(self.val_head(z)).squeeze(-1) |
| a = torch.sigmoid(self.sna_head(z)).squeeze(-1) |
| |
| sc = torch.stack([syn, conn], dim=-1) |
| g_in = torch.cat([z, sc], dim=-1) |
| Gamma = self.gamma_head(g_in) |
| return V, a, Gamma |
|
|
|
|
| class LogitGate(nn.Module): |
| """Additive bias to logits based on PF summary Γ.""" |
| def __init__(self, vocab_size: int, r_gamma: int): |
| super().__init__() |
| self.proj = nn.Linear(r_gamma, vocab_size, bias=False) |
| nn.init.zeros_(self.proj.weight) |
|
|
| def forward(self, z_base: torch.Tensor, Gamma: torch.Tensor) -> torch.Tensor: |
| |
| bias = self.proj(Gamma) |
| return z_base + bias |
|
|
|
|
| |
| |
| |
| class PFIntegrationMeter(nn.Module): |
| """Compute Syn, Conn (algebraic connectivity proxy), κ and broadcast flag.""" |
| def __init__(self, J: int, kappa_thresh: float = 0.6): |
| super().__init__() |
| self.kappa_thresh = kappa_thresh |
| self.score = nn.Sequential( |
| nn.Linear(2 + 2, 64), nn.GELU(), |
| nn.Linear(64, 1) |
| ) |
|
|
| @staticmethod |
| def synchrony(F_t: torch.Tensor, A: torch.Tensor, eps: float = 1e-8) -> torch.Tensor: |
| |
| B, J, k = F_t.shape |
| cos = pairwise_cosine(F_t) |
| num = (cos * A).sum(dim=(-1, -2)) |
| den = A.sum(dim=(-1, -2)).clamp_min(1.0) |
| syn = (num / den).mean() |
| return syn.expand(B) |
|
|
| @staticmethod |
| def connectivity(A: torch.Tensor) -> torch.Tensor: |
| |
| L = normalized_graph_laplacian(A) |
| eigs = safe_eigvalsh(L, k_smallest=3) |
| lambda2 = eigs[:, 1] |
| |
| conn = torch.sigmoid(1.0 / (lambda2 + 1e-3)) |
| return conn |
|
|
| def forward(self, F_t: torch.Tensor, A: torch.Tensor, |
| V: torch.Tensor, a: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: |
| syn = self.synchrony(F_t, A) |
| conn = self.connectivity(A) |
| feat = torch.stack([syn, conn, V, a], dim=-1) |
| kappa = torch.sigmoid(self.score(feat)).squeeze(-1) |
| broadcast = (kappa >= self.kappa_thresh).float() |
| return syn, conn, kappa, broadcast |
|
|
|
|
| |
| |
| |
| class LCAParams(nn.Module): |
| def __init__(self, beta: float = 0.7, gamma: float = 0.3, lambda_time: float = 0.2, lambda_dir: float = 0.3, tau: int = 64): |
| super().__init__() |
| self.beta = nn.Parameter(torch.tensor(beta)) |
| self.gamma = nn.Parameter(torch.tensor(gamma)) |
| self.lambda_time = nn.Parameter(torch.tensor(lambda_time)) |
| self.lambda_dir = nn.Parameter(torch.tensor(lambda_dir)) |
| self.tau = tau |
|
|
|
|
| class LCAWrapper(nn.Module): |
| """Modify attention scores: e_ij = dot - β d_g(i,j) - γ D_lc(i,j).""" |
| def __init__(self, params: LCAParams): |
| super().__init__() |
| self.params = params |
|
|
| def forward(self, Q: torch.Tensor, K: torch.Tensor, |
| F_t: torch.Tensor, g_t: torch.Tensor, |
| positions: Optional[torch.Tensor] = None, |
| u_dir: Optional[torch.Tensor] = None) -> torch.Tensor: |
| """Compute modified attention scores. |
| Q,K: (B, H, T, d_head) |
| F_t: PF nodes (B, J, k) |
| g_t: metric (B, k, k) |
| positions: (T,) or (B,T) 0..T-1 |
| u_dir: (B, d_head) approximate episode direction (can be from Γ PCA; here optional) |
| Return scores: (B, H, T, T) |
| """ |
| B, H, T, Dh = Q.shape |
| scale = 1.0 / math.sqrt(Dh) |
| |
| dots = torch.matmul(Q, K.transpose(-1, -2)) * scale |
| |
| J = F_t.shape[1] |
| token_nodes = torch.arange(T, device=Q.device) % J |
| |
| d_geo_sq = batched_geodesic_sq(F_t, F_t, g_t) |
| |
| idx_i = token_nodes.view(1, 1, T, 1).expand(B, H, T, 1) |
| idx_j = token_nodes.view(1, 1, 1, T).expand(B, H, 1, T) |
| d_geo_tok = d_geo_sq.unsqueeze(1).gather(2, idx_i.repeat(1,1,1,J)).gather(3, idx_j.repeat(1,1,J,1)) |
| d_geo_tok = d_geo_tok.squeeze(-1).squeeze(-2) |
| |
| if positions is None: |
| positions = torch.arange(T, device=Q.device).view(1, T).expand(B, T) |
| pos_i = positions.unsqueeze(1) |
| pos_j = positions.unsqueeze(2) |
| d_time = (pos_j - pos_i).abs().float() / max(1, self.params.tau) |
| d_time = d_time.unsqueeze(1).expand(B, H, T, T) |
| if u_dir is None: |
| u_dir = torch.zeros(B, Dh, device=Q.device) |
| |
| |
| Ku = F.normalize(K.mean(dim=2), dim=-1) |
| u_n = F.normalize(u_dir, dim=-1).unsqueeze(1) |
| dir_cost = (1.0 - (Ku * u_n).sum(-1, keepdim=True)).clamp_min(0.0) |
| dir_cost = dir_cost.expand(B, H, T) |
| dir_cost = dir_cost.unsqueeze(-1).expand(B, H, T, T) |
| |
| scores = dots - self.params.beta.abs() * d_geo_tok - self.params.gamma.abs() * ( |
| self.params.lambda_time.abs() * d_time + self.params.lambda_dir.abs() * dir_cost |
| ) |
| return scores |
|
|
|
|
| |
| |
| |
| class NTIController(nn.Module): |
| def __init__(self, d: int, vocab_size: int, r_gamma: int = 32, offset_scale: float = 0.5, tau: int = 64): |
| super().__init__() |
| self.tau = tau |
| self.offset_scale = offset_scale |
| self.proj = nn.Sequential( |
| nn.Linear(d + r_gamma, 128), nn.GELU(), |
| nn.Linear(128, vocab_size) |
| ) |
|
|
| def forward(self, H_seg: torch.Tensor, Gamma_seg: torch.Tensor, |
| attn_entropy: Optional[torch.Tensor] = None, |
| path_dev: Optional[torch.Tensor] = None) -> torch.Tensor: |
| """Compute Δz episodic offset for a segment. |
| H_seg: (B, τ, d), Gamma_seg: (B, τ, rγ) |
| attn_entropy/path_dev: optional scalars per batch |
| Return Δz: (B, V) |
| """ |
| h_bar = H_seg.mean(dim=1) |
| g_bar = Gamma_seg.mean(dim=1) |
| x = torch.cat([h_bar, g_bar], dim=-1) |
| dz = self.proj(x) * self.offset_scale |
| return dz |
|
|
|
|
| |
| |
| |
| @dataclass |
| class NTCfg: |
| d: int = 2048 |
| vocab_size: int = 50000 |
| r_gamma: int = 32 |
| J: int = 256 |
| k: int = 16 |
| K: int = 16 |
| alpha: float = 0.05 |
| noise_eps: float = 1e-3 |
| kappa_thresh: float = 0.6 |
| nti_tau: int = 64 |
| nti_period: int = 16 |
| offset_scale: float = 0.5 |
|
|
|
|
| class NTransformerCoupler(nn.Module): |
| """Parallel PF path + couplings to augment any decoder‑only LM. |
| |
| Exposes step() for single‑step inference/training and segment_update() for NTI updates. |
| """ |
| def __init__(self, cfg: NTCfg): |
| super().__init__() |
| self.cfg = cfg |
| pf_cfg = PFConfig(J=cfg.J, k=cfg.k, K=cfg.K, alpha=cfg.alpha, noise_eps=cfg.noise_eps) |
| self.adapter_out = PFAdapterOut(d=cfg.d, J=cfg.J, k=cfg.k) |
| self.ime = PFIntrinsicMetricEngine(k=cfg.k, hidden=128, metric_eps=1e-4) |
| self.pf_core = PFFieldCore(cfg=pf_cfg) |
| self.introspect = PFIntrospection(d=cfg.d, k=cfg.k, r_gamma=cfg.r_gamma) |
| self.integrator = PFIntegrationMeter(J=cfg.J, kappa_thresh=cfg.kappa_thresh) |
| self.gate = LogitGate(vocab_size=cfg.vocab_size, r_gamma=cfg.r_gamma) |
| self.lca = LCAWrapper(LCAParams(beta=0.7, gamma=0.3, lambda_time=0.2, lambda_dir=0.3, tau=cfg.nti_tau)) |
| self.nti = NTIController(d=cfg.d, vocab_size=cfg.vocab_size, r_gamma=cfg.r_gamma, |
| offset_scale=cfg.offset_scale, tau=cfg.nti_tau) |
|
|
| def initial_state(self, batch_size: int, device: Optional[torch.device] = None) -> Dict[str, torch.Tensor]: |
| device = device or next(self.parameters()).device |
| F0 = torch.randn(batch_size, self.cfg.J, self.cfg.k, device=device) * 0.02 |
| |
| idx = knn_indices(F0, self.cfg.K) |
| A0 = build_adjacency(idx, self.cfg.J) |
| g0 = self.ime(F0) |
| return {"F": F0, "A": A0, "g": g0} |
|
|
| @torch.no_grad() |
| def rebuild_graph(self, F_t: torch.Tensor) -> torch.Tensor: |
| idx = knn_indices(F_t, self.cfg.K) |
| A = build_adjacency(idx, self.cfg.J) |
| return A |
|
|
| def step(self, state: Dict[str, torch.Tensor], h_t: torch.Tensor, |
| z_base_t: torch.Tensor, |
| Q: Optional[torch.Tensor] = None, K: Optional[torch.Tensor] = None, |
| positions: Optional[torch.Tensor] = None, |
| u_dir: Optional[torch.Tensor] = None) -> Tuple[Dict[str, torch.Tensor], torch.Tensor, Dict[str, torch.Tensor]]: |
| """Single decoding step coupling. |
| state: dict with F (B,J,k), A (B,J,J), g (B,k,k) |
| h_t: (B,d) token hidden; z_base_t: (B,V) |
| Q,K: attention tensors (B,H,T,dh) for optional LCA modulation; if None, gating only |
| Returns: new_state, z_final, logs |
| """ |
| F_t, A_t, g_t = state["F"], state["A"], state["g"] |
| F_tilde = self.adapter_out(h_t) |
| |
| F_next, pf_stats = self.pf_core(F_t, h_t, g_t, A_t, F_tilde) |
| |
| g_next = self.ime(F_next) |
| with torch.no_grad(): |
| A_next = self.rebuild_graph(F_next) |
| |
| |
| syn = self.integrator.synchrony(F_next, A_next) |
| conn = self.integrator.connectivity(A_next) |
| V, a, Gamma = self.introspect(F_next, h_t, syn, conn) |
| syn, conn, kappa, broadcast = self.integrator(F_next, A_next, V, a) |
| |
| z_final = self.gate(z_base_t, Gamma) |
| |
| lca_scores = None |
| if Q is not None and K is not None: |
| lca_scores = self.lca(Q, K, F_next, g_next, positions=positions, u_dir=u_dir) |
| new_state = {"F": F_next, "A": A_next, "g": g_next, |
| "V": V.detach(), "a": a.detach(), "Gamma": Gamma.detach(), |
| "kappa": kappa.detach(), "broadcast": broadcast.detach()} |
| logs = {"tv": pf_stats["tv"], "omega": pf_stats["omega"], |
| "syn": syn.mean().detach(), "conn": conn.mean().detach(), |
| "V": V.mean().detach(), "a": a.mean().detach(), "kappa": kappa.mean().detach()} |
| if lca_scores is not None: |
| logs["lca_min"] = lca_scores.min().detach() |
| logs["lca_max"] = lca_scores.max().detach() |
| return new_state, z_final, logs |
|
|
| def segment_update(self, H_seg: torch.Tensor, Gamma_seg: torch.Tensor, |
| attn_entropy: Optional[torch.Tensor] = None, |
| path_dev: Optional[torch.Tensor] = None) -> torch.Tensor: |
| """Every r steps, compute episodic Δz via NTI. |
| H_seg: (B, τ, d); Gamma_seg: (B, τ, rγ) |
| Return: Δz (B,V) to be added to subsequent logits (late‑fusion) |
| """ |
| dz = self.nti(H_seg, Gamma_seg, attn_entropy, path_dev) |
| return dz |
|
|
|
|
| |
| |
| |
| class PFLosses(nn.Module): |
| def __init__(self, lambda_coh: float = 0.5, lambda_gauge: float = 0.5, |
| lambda_val: float = 0.2, lambda_self: float = 0.2, lambda_meta: float = 0.4): |
| super().__init__() |
| self.lambda_coh = lambda_coh |
| self.lambda_gauge = lambda_gauge |
| self.lambda_val = lambda_val |
| self.lambda_self = lambda_self |
| self.lambda_meta = lambda_meta |
|
|
| @staticmethod |
| def tv_loss(F_t: torch.Tensor, A: torch.Tensor) -> torch.Tensor: |
| Fi = F_t.unsqueeze(2) |
| Fj = F_t.unsqueeze(1) |
| diff = (Fi - Fj).pow(2).sum(-1).sqrt() |
| return (diff * A).mean() |
|
|
| @staticmethod |
| def incoh_loss(H_t: torch.Tensor, F_t: torch.Tensor) -> torch.Tensor: |
| |
| F_pool = F_t.mean(dim=1) |
| cos = F.cosine_similarity(F.normalize(F_pool, dim=-1), F.normalize(H_t, dim=-1)) |
| return (1.0 - cos).mean() |
|
|
| @staticmethod |
| def pathdev_loss() -> torch.Tensor: |
| |
| return torch.tensor(0.0, device=next(PFLosses().parameters()).device) |
|
|
| def forward(self, H_t: torch.Tensor, F_t: torch.Tensor, A_t: torch.Tensor, |
| V_t: torch.Tensor, a_t: torch.Tensor, |
| V_target: Optional[torch.Tensor] = None, |
| a_target: Optional[torch.Tensor] = None, |
| meta_pos: Optional[Tuple[torch.Tensor, torch.Tensor]] = None, |
| meta_neg: Optional[Tuple[torch.Tensor, torch.Tensor]] = None) -> torch.Tensor: |
| |
| L_coh = self.tv_loss(F_t, A_t) |
| |
| L_gauge = self.incoh_loss(H_t, F_t) + self.pathdev_loss() |
| |
| if V_target is not None: |
| L_val = F.mse_loss(V_t, V_target) |
| else: |
| L_val = torch.zeros((), device=F_t.device) |
| |
| if a_target is not None: |
| L_self = F.binary_cross_entropy(a_t.clamp(1e-4, 1-1e-4), a_target) |
| else: |
| L_self = torch.zeros((), device=F_t.device) |
| |
| L_meta = torch.zeros((), device=F_t.device) |
| if (meta_pos is not None) and (meta_neg is not None): |
| F_pos1, F_pos2 = meta_pos |
| F_neg1, F_neg2 = meta_neg |
| |
| def pool(Fx): |
| return F.normalize(Fx.mean(dim=1), dim=-1) |
| pos = 1.0 - F.cosine_similarity(pool(F_pos1), pool(F_pos2)).mean() |
| neg = F.cosine_similarity(pool(F_neg1), pool(F_neg2)).mean() |
| margin = 0.3 |
| L_meta = F.relu(pos + neg - margin) |
| L = (self.lambda_coh * L_coh + self.lambda_gauge * L_gauge + |
| self.lambda_val * L_val + self.lambda_self * L_self + self.lambda_meta * L_meta) |
| return L |
|
|
|
|
| |
| |
| |
| class DummyDecoderOnlyLM(nn.Module): |
| """Placeholder LM exposing hidden and logits for demonstration only. |
| Replace with your actual Transformer decoder (e.g., GPT‑like) and wire the coupler around it. |
| """ |
| def __init__(self, d: int, vocab_size: int): |
| super().__init__() |
| self.d = d |
| self.emb = nn.Embedding(vocab_size, d) |
| self.ff = nn.Sequential(nn.Linear(d, d), nn.GELU(), nn.Linear(d, d)) |
| self.head = nn.Linear(d, vocab_size) |
|
|
| def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: |
| H = self.emb(x) |
| H = self.ff(H) |
| logits = self.head(H) |
| return H, logits |
|
|
|
|
| class NTransformersModel(nn.Module): |
| """Full model wrapper: LM + N‑Transformers coupler. |
| This is a minimal training‑ready scaffold; extend as needed. |
| """ |
| def __init__(self, lm: nn.Module, coupler: NTransformerCoupler, losses: PFLosses): |
| super().__init__() |
| self.lm = lm |
| self.coupler = coupler |
| self.losses = losses |
|
|
| def forward(self, x: torch.Tensor, y: Optional[torch.Tensor] = None) -> Dict[str, torch.Tensor]: |
| B, T = x.shape |
| device = x.device |
| |
| state = self.coupler.initial_state(B, device=device) |
| H, logits_base = self.lm(x) |
| logits = torch.empty_like(logits_base) |
| Gamma_hist = [] |
| |
| for t in range(T): |
| h_t = H[:, t, :] |
| z_base_t = logits_base[:, t, :] |
| state, z_t, logs = self.coupler.step(state, h_t, z_base_t) |
| logits[:, t, :] = z_t |
| if "Gamma" in state: |
| Gamma_hist.append(state["Gamma"]) |
| |
| if len(Gamma_hist) >= self.coupler.cfg.nti_tau: |
| Gamma_seg = torch.stack(Gamma_hist[-self.coupler.cfg.nti_tau:], dim=1) |
| H_seg = H[:, -self.coupler.cfg.nti_tau:, :] |
| dz = self.coupler.segment_update(H_seg, Gamma_seg) |
| logits[:, -1, :] = logits[:, -1, :] + dz |
| out = {"logits": logits} |
| if y is not None: |
| |
| loss_llm = F.cross_entropy(logits[:, :-1, :].reshape(-1, logits.size(-1)), y[:, 1:].reshape(-1)) |
| |
| H_last = H[:, -1, :] |
| F_last, A_last = state["F"], state["A"] |
| V_last, a_last = state["V"], state["a"] |
| loss_pf = self.losses(H_last, F_last, A_last, V_last, a_last) |
| loss = loss_llm + loss_pf |
| out.update({"loss": loss, "loss_llm": loss_llm, "loss_pf": loss_pf}) |
| return out |
|
|
|
|
| |
| |
| |
| if __name__ == "__main__": |
| torch.manual_seed(42) |
| cfg = NTCfg(d=256, vocab_size=8192, J=64, k=8, K=8, nti_tau=16, nti_period=8) |
| lm = DummyDecoderOnlyLM(d=cfg.d, vocab_size=cfg.vocab_size) |
| coupler = NTransformerCoupler(cfg) |
| losses = PFLosses() |
| model = NTransformersModel(lm, coupler, losses) |
|
|
| B, T = 4, 24 |
| x = torch.randint(0, cfg.vocab_size, (B, T)) |
| y = x.clone() |
|
|
| out = model(x, y) |
| print({k: float(v) if torch.is_tensor(v) and v.dim()==0 else v.shape for k, v in out.items() if k.startswith('loss') or k=='logits'}) |
|
|