| |
| """ |
| ============================================================================= |
| BENCHMARK v7: LEARNABLE-FREQUENCY NEURON (not MoE, not routing) |
| ============================================================================= |
| |
| THE INSIGHT: |
| sin(ΟΒ·x) with Ο β 0 gives sin(ΟΒ·x) β ΟΒ·x β it BECOMES linear. |
| sin(ΟΒ·x) with Ο large gives rich periodic features. |
| |
| So instead of routing between branches, let the neuron learn its OWN |
| frequency. One forward path. One computation. No gates. No branches. |
| The neuron smoothly morphs between linear and periodic. |
| |
| THE ARCHITECTURE: |
| Ο_i = softplus(w_Ο Β· x + b_Ο)_i # per-neuron, input-dependent frequency |
| y_i = (W_val Β· x)_i Β· sin(Ο_i Β· (W Β· x)_i) # multiplicative + learned-freq periodic |
| y = LN(y + W_val Β· x) # residual |
| |
| WHY THIS IS NOT MoE: |
| - MoE: discrete routing between separate expert networks |
| - This: single continuous computation, no branches, no gating sigmoid |
| - The "routing" is implicit in Ο β when Οβ0, sin(Οx)βΟx (linear) |
| - No top-k selection, no load balancing loss, no expert capacity |
| |
| WHY THIS MIGHT SOLVE OOD: |
| - On training data: Ο learns task-appropriate frequencies |
| - On OOD data: Ο has never seen these inputs, softplus(garbage) is bounded |
| - Key: softplus saturates gracefully, doesn't explode like raw Ο would |
| |
| ============================================================================= |
| """ |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import numpy as np |
| import math |
| import time |
| import json |
|
|
| SEEDS = [0, 1, 2] |
|
|
| def set_seed(s): |
| torch.manual_seed(s) |
| np.random.seed(s) |
|
|
| |
| |
| |
|
|
| class VanillaMLP(nn.Module): |
| def __init__(self, in_dim, out_dim, hidden_dim, n_hidden): |
| super().__init__() |
| layers = [] |
| prev = in_dim |
| for _ in range(n_hidden): |
| layers.extend([nn.Linear(prev, hidden_dim), nn.ReLU()]) |
| prev = hidden_dim |
| layers.append(nn.Linear(prev, out_dim)) |
| self.net = nn.Sequential(*layers) |
| def forward(self, x): return self.net(x) |
|
|
|
|
| class SinGLULayer(nn.Module): |
| def __init__(self, in_dim, out_dim, mid_dim, omega_0=30.0): |
| super().__init__() |
| self.W_gate = nn.Linear(in_dim, mid_dim, bias=False) |
| self.W_val = nn.Linear(in_dim, mid_dim, bias=False) |
| self.W_out = nn.Linear(mid_dim, out_dim, bias=True) |
| self.omega_0 = omega_0 |
| self.ln = nn.LayerNorm(out_dim) |
| with torch.no_grad(): |
| bound = math.sqrt(6.0 / in_dim) / omega_0 |
| self.W_gate.weight.uniform_(-bound, bound) |
| nn.init.xavier_uniform_(self.W_val.weight) |
| nn.init.xavier_uniform_(self.W_out.weight) |
| def forward(self, x): |
| return self.ln(self.W_out(torch.sin(self.omega_0 * self.W_gate(x)) * self.W_val(x))) |
|
|
| class SinGLUNet(nn.Module): |
| def __init__(self, in_dim, out_dim, hidden_dim, n_hidden, omega_0=30.0): |
| super().__init__() |
| mid = max(2, int(hidden_dim * 2 / 3)) |
| layers = [] |
| prev = in_dim |
| for _ in range(n_hidden): |
| layers.append(SinGLULayer(prev, hidden_dim, mid, omega_0)) |
| prev = hidden_dim |
| layers.append(nn.Linear(prev, out_dim)) |
| self.layers = nn.ModuleList(layers) |
| def forward(self, x): |
| for l in self.layers: x = l(x) |
| return x |
|
|
|
|
| class HybridLayer(nn.Module): |
| def __init__(self, in_dim, out_dim, mid_dim, omega_0=30.0): |
| super().__init__() |
| self.W1 = nn.Linear(in_dim, mid_dim, bias=False) |
| self.W2 = nn.Linear(in_dim, mid_dim, bias=False) |
| self.phase = nn.Parameter(torch.empty(mid_dim)) |
| self.W3 = nn.Linear(mid_dim, out_dim, bias=True) |
| self.omega_0 = omega_0 |
| self.ln = nn.LayerNorm(out_dim) |
| self.res = nn.Linear(in_dim, out_dim, bias=False) if in_dim != out_dim else nn.Identity() |
| with torch.no_grad(): |
| nn.init.xavier_uniform_(self.W1.weight) |
| bound = math.sqrt(6.0 / in_dim) / omega_0 |
| self.W2.weight.uniform_(-bound, bound) |
| self.phase.uniform_(-math.pi, math.pi) |
| nn.init.xavier_uniform_(self.W3.weight) |
| def forward(self, x): |
| return self.ln(self.W3(self.W1(x) * torch.sin(self.omega_0 * self.W2(x) + self.phase)) + self.res(x)) |
|
|
| class HybridNet(nn.Module): |
| def __init__(self, in_dim, out_dim, hidden_dim, n_hidden, omega_0=30.0): |
| super().__init__() |
| mid = max(2, int(hidden_dim * 0.55)) |
| layers = [] |
| prev = in_dim |
| for _ in range(n_hidden): |
| layers.append(HybridLayer(prev, hidden_dim, mid, omega_0)) |
| prev = hidden_dim |
| layers.append(nn.Linear(prev, out_dim)) |
| self.layers = nn.ModuleList(layers) |
| def forward(self, x): |
| for l in self.layers: x = l(x) |
| return x |
|
|
|
|
| |
| |
| |
|
|
| class LearnableFreqLayer(nn.Module): |
| """ |
| The key idea: frequency Ο is NOT fixed. It's learned per-neuron per-input. |
| |
| Ο(x) = softplus(W_freq Β· x + b_freq) # input-dependent, always positive |
| features = W_val Β· x # value features |
| periodic = sin(Ο(x) Β· W_per Β· x + phase) # frequency-adapted periodic |
| y = LN(features β periodic + features) # multiplicative + residual |
| |
| When Ο β 0: sin(Οx) β Οx β linear behavior emerges naturally |
| When Ο large: rich periodic features |
| |
| Param budget: |
| W_val(hΓd) + W_per(hΓd) + W_freq(hΓd) + phase(h) + b_freq(h) + LN(2h) |
| = 3 matrices. Use ~h/β3 effective width, or low-rank W_freq. |
| |
| Efficient version: W_freq is low-rank (hΓr)(rΓd) to save params. |
| Then total β 2 full matrices + 1 low-rank = fits nicely. |
| """ |
| def __init__(self, in_dim, out_dim, omega_init=10.0, freq_rank=None): |
| super().__init__() |
| r = freq_rank or max(2, min(in_dim // 3, 8)) |
| |
| |
| self.W_val = nn.Linear(in_dim, out_dim, bias=True) |
| |
| |
| self.W_per = nn.Linear(in_dim, out_dim, bias=False) |
| self.phase = nn.Parameter(torch.empty(out_dim)) |
| |
| |
| self.freq_down = nn.Linear(in_dim, r, bias=False) |
| self.freq_up = nn.Linear(r, out_dim, bias=True) |
| |
| self.ln = nn.LayerNorm(out_dim) |
| self.omega_init = omega_init |
| |
| with torch.no_grad(): |
| nn.init.xavier_uniform_(self.W_val.weight) |
| bound = math.sqrt(6.0 / in_dim) / omega_init |
| self.W_per.weight.uniform_(-bound, bound) |
| self.phase.uniform_(-math.pi, math.pi) |
| |
| |
| nn.init.xavier_uniform_(self.freq_down.weight) |
| nn.init.zeros_(self.freq_up.weight) |
| nn.init.constant_(self.freq_up.bias, math.log(math.exp(omega_init) - 1)) |
| |
| def forward(self, x): |
| |
| omega = F.softplus(self.freq_up(self.freq_down(x))) |
| |
| |
| val = self.W_val(x) |
| |
| |
| per_input = self.W_per(x) |
| periodic = torch.sin(omega * per_input + self.phase) |
| |
| |
| return self.ln(val * periodic + val) |
| |
| def get_omega(self, x): |
| """For analysis: get the learned frequencies""" |
| with torch.no_grad(): |
| return F.softplus(self.freq_up(self.freq_down(x))) |
|
|
|
|
| class LearnableFreqNet(nn.Module): |
| def __init__(self, in_dim, out_dim, hidden_dim, n_hidden, omega_init=10.0): |
| super().__init__() |
| layers = [] |
| prev = in_dim |
| for _ in range(n_hidden): |
| layers.append(LearnableFreqLayer(prev, hidden_dim, omega_init)) |
| prev = hidden_dim |
| layers.append(nn.Linear(prev, out_dim)) |
| self.layers = nn.ModuleList(layers) |
| |
| def forward(self, x): |
| for l in self.layers: x = l(x) |
| return x |
| |
| def get_all_omegas(self, x): |
| """Get Ο values from all learnable-freq layers""" |
| omegas = [] |
| h = x |
| for l in self.layers: |
| if isinstance(l, LearnableFreqLayer): |
| omegas.append(l.get_omega(h)) |
| h = l(h) |
| else: |
| h = l(h) |
| return omegas |
|
|
|
|
| |
| |
| |
|
|
| def count_params(m): |
| return sum(p.numel() for p in m.parameters() if p.requires_grad) |
|
|
| def find_hidden(in_d, out_d, n_h, target_p, model_cls, **kw): |
| lo, hi, best_h = 2, 512, 2 |
| while lo <= hi: |
| mid = (lo + hi) // 2 |
| p = count_params(model_cls(in_d, out_d, mid, n_h, **kw)) |
| if abs(p - target_p) < abs(count_params(model_cls(in_d, out_d, best_h, n_h, **kw)) - target_p): |
| best_h = mid |
| if p < target_p: lo = mid + 1 |
| else: hi = mid - 1 |
| return best_h |
|
|
| def train_reg(model, xtr, ytr, xte, yte, epochs, lr, bs=256): |
| opt = torch.optim.Adam(model.parameters(), lr=lr) |
| sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=epochs) |
| best = float('inf') |
| n = len(xtr) |
| for ep in range(epochs): |
| model.train() |
| perm = torch.randperm(n) |
| for i in range(0, n, bs): |
| idx = perm[i:i+bs] |
| loss = F.mse_loss(model(xtr[idx]), ytr[idx]) |
| opt.zero_grad(); loss.backward() |
| torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) |
| opt.step() |
| sch.step() |
| if (ep+1) % max(1, epochs//10) == 0: |
| model.eval() |
| with torch.no_grad(): |
| best = min(best, F.mse_loss(model(xte), yte).item()) |
| model.eval() |
| with torch.no_grad(): |
| best = min(best, F.mse_loss(model(xte), yte).item()) |
| return best |
|
|
| def train_clf(model, xtr, ytr, xte, yte, epochs, lr, bs=256): |
| opt = torch.optim.Adam(model.parameters(), lr=lr) |
| sch = torch.optim.lr_scheduler.CosineAnnealingLR(opt, T_max=epochs) |
| best = 0 |
| n = len(xtr) |
| for ep in range(epochs): |
| model.train() |
| perm = torch.randperm(n) |
| for i in range(0, n, bs): |
| idx = perm[i:i+bs] |
| loss = F.cross_entropy(model(xtr[idx]), ytr[idx]) |
| opt.zero_grad(); loss.backward() |
| torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0) |
| opt.step() |
| sch.step() |
| if (ep+1) % max(1, epochs//10) == 0: |
| model.eval() |
| with torch.no_grad(): |
| best = max(best, (model(xte).argmax(1) == yte).float().mean().item()) |
| model.eval() |
| with torch.no_grad(): |
| best = max(best, (model(xte).argmax(1) == yte).float().mean().item()) |
| return best |
|
|
| |
| |
| |
|
|
| def data_complex(n=1000): |
| x = torch.rand(n,4)*2-1 |
| y = torch.exp(torch.sin(x[:,0]**2+x[:,1]**2)+torch.sin(x[:,2]**2+x[:,3]**2)) |
| return x, y.unsqueeze(1) |
|
|
| def data_nested(n=1000): |
| x = torch.rand(n,2)*2-1 |
| y = torch.sin(math.pi*(x[:,0]**2+x[:,1]**2))*torch.cos(3*math.pi*x[:,0]*x[:,1]) |
| return x, y.unsqueeze(1) |
|
|
| def data_spiral(n=1000): |
| t = torch.linspace(0, 4*np.pi, n//2) |
| r = torch.linspace(0.3, 2, n//2) |
| x1 = torch.stack([r*torch.cos(t), r*torch.sin(t)], 1) |
| x2 = torch.stack([r*torch.cos(t+np.pi), r*torch.sin(t+np.pi)], 1) |
| x = torch.cat([x1,x2]) + torch.randn(n,2)*0.05 |
| y = torch.cat([torch.zeros(n//2), torch.ones(n//2)]).long() |
| p = torch.randperm(n); return x[p], y[p] |
|
|
| def data_checker(n=1000): |
| x = torch.rand(n,2)*2-1 |
| y = ((torch.sin(3*math.pi*x[:,0])*torch.sin(3*math.pi*x[:,1])) > 0).long() |
| return x, y |
|
|
| def data_highfreq(n=1000): |
| x = torch.linspace(-1,1,n).unsqueeze(1) |
| y = torch.sin(20*x)+torch.sin(50*x)+0.5*torch.sin(100*x) |
| return x, y |
|
|
| def data_memorize(n=200): |
| return torch.randn(n, 8), torch.randn(n, 4) |
|
|
| def data_ood_train(n=800): |
| x = torch.rand(n,2)*2-1 |
| y = torch.sin(3*math.pi*x[:,0]) * torch.cos(3*math.pi*x[:,1]) + x[:,0]*x[:,1] |
| return x, y.unsqueeze(1) |
|
|
| def data_ood_test(n=300): |
| x = torch.rand(n,2) + 1 |
| y = torch.sin(3*math.pi*x[:,0]) * torch.cos(3*math.pi*x[:,1]) + x[:,0]*x[:,1] |
| return x, y.unsqueeze(1) |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| print("="*80) |
| print(" BENCHMARK v7: LEARNABLE-FREQUENCY NEURON") |
| print(" sin(omega(x) Β· Wx) where omega is input-dependent") |
| print(" omega->0: linear | omega large: periodic | NO routing, NO MoE") |
| print("="*80) |
| |
| N_H = 3 |
| models = { |
| 'Vanilla': (VanillaMLP, {}), |
| 'SinGLU': (SinGLUNet, {'omega_0': None}), |
| 'Hybrid': (HybridNet, {'omega_0': None}), |
| 'LearnFreq': (LearnableFreqNet, {'omega_init': None}), |
| } |
| |
| tasks = [ |
| ("Complex Fn (4D)", "reg", data_complex, 4,1, 5000, 300, 1e-3, 30.0, 750), |
| ("Nested Fn (2D)", "reg", data_nested, 2,1, 3000, 300, 1e-3, 20.0, 750), |
| ("Spiral", "clf", data_spiral, 2,2, 3000, 250, 1e-3, 15.0, 700), |
| ("Checkerboard", "clf", data_checker, 2,2, 3000, 250, 1e-3, 20.0, 700), |
| ("High-Freq", "reg", data_highfreq, 1,1, 8000, 300, 1e-3, 60.0, 700), |
| ("Memorization", "reg", data_memorize, 8,4, 5000, 400, 1e-3, 10.0, 200), |
| ] |
| |
| all_results = {} |
| omega_analysis = {} |
| |
| for tname, ttype, dfn, ind, outd, budget, epochs, lr, omega, split in tasks: |
| print(f"\n{'β'*80}") |
| print(f" {tname} | budget ~{budget:,}") |
| print(f"{'β'*80}") |
| |
| hdims = {} |
| for mn, (mc, mk) in models.items(): |
| kw = {k: (omega if v is None else v) for k,v in mk.items()} |
| hdims[mn] = find_hidden(ind, outd, N_H, budget, mc, **kw) |
| |
| task_res = {} |
| |
| for mn, (mc, mk) in models.items(): |
| kw = {k: (omega if v is None else v) for k,v in mk.items()} |
| h = hdims[mn] |
| scores = [] |
| |
| for seed in SEEDS: |
| set_seed(seed) |
| x, y = dfn() |
| if split >= len(x): xtr,ytr,xte,yte = x,y,x,y |
| else: xtr,ytr,xte,yte = x[:split],y[:split],x[split:],y[split:] |
| |
| set_seed(seed + 100) |
| model = mc(ind, outd, h, N_H, **kw) |
| |
| if ttype == 'reg': s = train_reg(model, xtr, ytr, xte, yte, epochs, lr) |
| else: s = train_clf(model, xtr, ytr, xte, yte, epochs, lr) |
| scores.append(s) |
| |
| |
| if mn == 'LearnFreq' and seed == SEEDS[-1]: |
| model.eval() |
| with torch.no_grad(): |
| omegas = model.get_all_omegas(xte[:100]) |
| all_om = torch.cat([o.flatten() for o in omegas]) |
| omega_analysis[tname] = { |
| 'mean': all_om.mean().item(), |
| 'std': all_om.std().item(), |
| 'min': all_om.min().item(), |
| 'max': all_om.max().item(), |
| 'pct_low': (all_om < 1.0).float().mean().item(), |
| 'pct_high': (all_om > 20.0).float().mean().item(), |
| } |
| |
| p = count_params(mc(ind, outd, h, N_H, **kw)) |
| task_res[mn] = {'mean': np.mean(scores), 'std': np.std(scores), |
| 'scores': scores, 'params': p, 'hidden': h} |
| |
| is_reg = ttype == 'reg' |
| metric = "MSE β" if is_reg else "Acc β" |
| |
| if is_reg: best_mn = min(task_res, key=lambda k: task_res[k]['mean']) |
| else: best_mn = max(task_res, key=lambda k: task_res[k]['mean']) |
| |
| print(f"\n {'Model':<12} {'H':>4} {'Params':>7} {metric+' (meanΒ±std)':>28}") |
| print(f" {'β'*56}") |
| for mn, r in task_res.items(): |
| m, s = r['mean'], r['std'] |
| if is_reg: |
| ms = f"{m:.2e}Β±{s:.1e}" if m < 0.001 else f"{m:.4f}Β±{s:.4f}" |
| else: |
| ms = f"{m:.1%}Β±{s:.3f}" |
| mark = " β
" if mn == best_mn else "" |
| print(f" {mn:<12} {r['hidden']:>4} {r['params']:>7,} {ms:>28}{mark}") |
| |
| print(f" β Winner: {best_mn}") |
| |
| if tname in omega_analysis: |
| oa = omega_analysis[tname] |
| print(f" β LearnFreq Ο: mean={oa['mean']:.1f}, range=[{oa['min']:.1f}, {oa['max']:.1f}]" |
| f" | {oa['pct_low']:.0%} linear (Ο<1) | {oa['pct_high']:.0%} periodic (Ο>20)") |
| |
| all_results[tname] = task_res |
| |
| |
| |
| |
| print(f"\n{'β'*80}") |
| print(f" OOD: Train [-1,1] β Test [1,2]") |
| print(f" Key test: does Ο shrink on OOD (β linear fallback)?") |
| print(f"{'β'*80}") |
| |
| ood_res = {} |
| ood_omega = {} |
| |
| for mn, (mc, mk) in models.items(): |
| kw = {k: (20.0 if v is None else v) for k,v in mk.items()} |
| h = find_hidden(2, 1, N_H, 5000, mc, **kw) |
| id_scores, ood_scores = [], [] |
| |
| for seed in SEEDS: |
| set_seed(seed) |
| xtr, ytr = data_ood_train() |
| set_seed(seed+50) |
| xid = torch.rand(200,2)*2-1 |
| yid = (torch.sin(3*math.pi*xid[:,0])*torch.cos(3*math.pi*xid[:,1])+xid[:,0]*xid[:,1]).unsqueeze(1) |
| set_seed(seed+50) |
| xood, yood = data_ood_test() |
| |
| set_seed(seed+100) |
| model = mc(2, 1, h, N_H, **kw) |
| s_id = train_reg(model, xtr, ytr, xid, yid, 300, 1e-3) |
| model.eval() |
| with torch.no_grad(): |
| s_ood = F.mse_loss(model(xood), yood).item() |
| id_scores.append(s_id); ood_scores.append(s_ood) |
| |
| if mn == 'LearnFreq' and seed == SEEDS[-1]: |
| model.eval() |
| with torch.no_grad(): |
| om_id = torch.cat([o.flatten() for o in model.get_all_omegas(xid[:100])]) |
| om_ood = torch.cat([o.flatten() for o in model.get_all_omegas(xood[:100])]) |
| ood_omega = { |
| 'id_mean': om_id.mean().item(), 'id_std': om_id.std().item(), |
| 'ood_mean': om_ood.mean().item(), 'ood_std': om_ood.std().item(), |
| } |
| |
| p = count_params(mc(2, 1, h, N_H, **kw)) |
| ood_res[mn] = { |
| 'id_mean': np.mean(id_scores), 'ood_mean': np.mean(ood_scores), |
| 'id_std': np.std(id_scores), 'ood_std': np.std(ood_scores), |
| 'params': p, |
| 'degradation': np.mean(ood_scores)/max(np.mean(id_scores), 1e-10), |
| } |
| |
| best_ood = min(ood_res, key=lambda k: ood_res[k]['ood_mean']) |
| |
| print(f"\n {'Model':<12} {'ID MSE':>14} {'OOD MSE':>14} {'Degrad.':>9}") |
| print(f" {'β'*52}") |
| for mn, r in ood_res.items(): |
| mark = " β
" if mn == best_ood else "" |
| print(f" {mn:<12} {r['id_mean']:>9.4f}Β±{r['id_std']:.3f}" |
| f" {r['ood_mean']:>9.4f}Β±{r['ood_std']:.3f} {r['degradation']:>8.1f}x{mark}") |
| print(f" β Best OOD: {best_ood}") |
| |
| if ood_omega: |
| shift = ood_omega['ood_mean'] - ood_omega['id_mean'] |
| print(f"\n LearnFreq Ο SHIFT:") |
| print(f" In-distribution: Ο = {ood_omega['id_mean']:.2f} Β± {ood_omega['id_std']:.2f}") |
| print(f" Out-of-distribution: Ο = {ood_omega['ood_mean']:.2f} Β± {ood_omega['ood_std']:.2f}") |
| if shift < -1: |
| print(f" β Ο DROPPED by {abs(shift):.1f} on OOD β automatic linear fallback! β
") |
| elif shift > 1: |
| print(f" β Ο INCREASED by {shift:.1f} on OOD β no fallback β") |
| else: |
| print(f" β Ο shift = {shift:+.2f} (small)") |
| |
| all_results['OOD'] = {mn: {'mean': r['ood_mean'], 'std': r['ood_std']} for mn, r in ood_res.items()} |
| |
| |
| |
| |
| print(f"\n{'β'*80}") |
| print(f" WHAT FREQUENCIES DID THE NEURON LEARN?") |
| print(f"{'β'*80}") |
| |
| print(f"\n {'Task':<22} {'Mean Ο':>8} {'Range':>16} {'%Linear':>9} {'%Periodic':>10}") |
| print(f" {'β'*68}") |
| for tname, oa in omega_analysis.items(): |
| rng = f"[{oa['min']:.1f}, {oa['max']:.1f}]" |
| print(f" {tname:<22} {oa['mean']:>8.1f} {rng:>16} {oa['pct_low']:>8.0%} {oa['pct_high']:>9.0%}") |
| |
| |
| |
| |
| print(f"\n{'='*80}") |
| print(f" GRAND SUMMARY") |
| print(f"{'='*80}") |
| |
| win_counts = {k: 0 for k in models} |
| |
| print(f"\n {'Task':<20}", end="") |
| for mn in models: print(f" {mn:>12}", end="") |
| print(f" {'Winner':>10}") |
| print(f" {'β'*72}") |
| |
| for tname, tr in all_results.items(): |
| scores = {k: v['mean'] for k,v in tr.items()} |
| max_s = max(scores.values()) |
| is_clf = max_s > 0.5 and max_s <= 1.0 and min(scores.values()) >= 0 |
| if min(scores.values()) < 0.001: is_clf = False |
| |
| if tname == 'OOD': winner = min(scores, key=scores.get) |
| elif is_clf: winner = max(scores, key=scores.get) |
| else: winner = min(scores, key=scores.get) |
| win_counts[winner] += 1 |
| |
| row = f" {tname:<20}" |
| for mn in models: |
| s = scores[mn] |
| if is_clf: row += f" {s:>11.1%}" |
| elif s < 0.001: row += f" {s:>11.2e}" |
| else: row += f" {s:>11.4f}" |
| row += f" {'->'+winner:>10}" |
| print(row) |
| |
| print(f"\n {'β'*72}") |
| for mn, c in sorted(win_counts.items(), key=lambda x: -x[1]): |
| print(f" {mn:<14} {c} wins {'β'*c*3}") |
| |
| print(f""" |
| ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| β LEARNABLE-FREQUENCY NEURON: VERDICT β |
| β β |
| β NOT MoE β single forward path, no routing, no branch selection. β |
| β The frequency Ο itself is the learned parameter. β |
| β When Οβ0: sin(Οx)βΟx, neuron becomes linear automatically. β |
| β β |
| β Check the Ο analysis above: β |
| β β’ Different Ο for different tasks = it adapts β β |
| β β’ Ο shrinks on OOD = automatic linear fallback β β |
| β β’ Mix of linear + periodic neurons per layer = specialization β β |
| ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ |
| """) |
| |
| |
| save = {'tasks': {}, 'ood': {}, 'omega_analysis': omega_analysis, 'ood_omega': ood_omega} |
| for tname, tr in all_results.items(): |
| save['tasks'][tname] = { |
| mn: {'mean': float(r['mean']), 'std': float(r.get('std',0)), |
| 'scores': [float(s) for s in r.get('scores',[r['mean']])], |
| 'params': r.get('params',0), 'hidden': r.get('hidden',0)} |
| for mn, r in tr.items() |
| } |
| save['ood'] = {mn: {k: float(v) if isinstance(v,(float,np.floating)) else v |
| for k,v in r.items()} for mn,r in ood_res.items()} |
| with open('/app/results_v7.json', 'w') as f: |
| json.dump(save, f, indent=2, default=str) |
| print(" Saved to /app/results_v7.json") |
|
|
| if __name__ == "__main__": |
| main() |
|
|