| """ |
| spade_unrolled.py — SPADE Unrolled (Algorithm Unrolling + Context Encoder) |
| ================================================================================ |
| |
| Replaces the fixed S-SPADE solver (v12) with a learned parameter predictor. |
| |
| Architecture |
| ------------ |
| |
| Input (limited audio frame + K context frames) |
| ↓ |
| SpectralFeatureExtractor |
| • log-mel spectrogram (n_mels=32) per frame |
| • short-time loudness proxy (RMS in dB) |
| → shape: (B, K+1, n_mels+1) |
| ↓ |
| ContextEncoder (causal GRU) |
| • 2-layer GRU, hidden_size=128 |
| • Only K previous frames are seen (strict causality) |
| → h_t: (B, 128) |
| ↓ |
| ParameterHead (linear → 5 outputs per frame) |
| • lambda_lf : soft-threshold for LF bins (≥ 0) |
| • lambda_hf : soft-threshold for HF bins (≥ 0) |
| • delta_factor: scales delta_db ∈ [0.5, 2.0] |
| • gmax_factor : scales max_gain_db ∈ [0.5, 2.0] |
| • eps_factor : scales convergence eps ∈ [0.5, 2.0] |
| ↓ |
| UnrolledADMM (K_unroll=8 fixed layers, fully differentiable) |
| Each layer: |
| 1. Analysis: z = frana(x, frame) — DCT / RDFT |
| 2. Soft-thresh: z̃ = S_λ(z + u) — stratified LF/HF |
| 3. Synthesis: Dv = frsyn(z̃ - u, frame, M) — reconstruction |
| 4. Projection: pDv = proj_Γ(Dv, yc, masks, g_max) |
| 5. Residual: z ← z̃ - u - frana(Dv - pDv, frame) |
| 6. Dual update: u ← u + z - z̃ |
| → x̂ = frsyn(z_K, frame, M) |
| ↓ |
| Output: restored audio frame (B, M) |
| |
| Key differences from v12 (classical SPADE) |
| ------------------------------------------- |
| • Hard thresholding H_k (L0) → differentiable soft thresholding S_λ (L1 proxy) |
| • Fixed hyperparameters → predicted per-frame by ContextEncoder |
| • Fixed iteration count → exactly K_unroll unrolled layers (no convergence loop) |
| • Global sparsity level k → independent LF/HF soft-threshold budgets |
| |
| Transform operators (GPU-compatible, differentiable) |
| ------------------------------------------------------ |
| The DCT-II / RDFT analysis-synthesis operators from spade_declip_v12 are |
| re-implemented in PyTorch so gradients flow through them. Numerically they |
| match scipy to float32 precision. |
| |
| Projection operator |
| ------------------- |
| proj_Γ is already differentiable (clamp + max/min). Gradients flow through |
| the Icp / Icm branches; Ir samples are pinned (zero gradient, correct). |
| |
| WOLA (Weighted Overlap-Add) integration |
| ---------------------------------------- |
| The model processes individual frames. The full WOLA loop lives in |
| SPADEUnrolledInference which wraps UnrolledSPADE with frame extraction + |
| accumulation. Training uses individual frames to allow per-sample gradient |
| computation without materialising the full signal in the graph. |
| |
| References |
| ---------- |
| [1] Gregor & LeCun, "Learning Fast Approximations of Sparse Coding", ICML 2010. |
| [2] Adler et al., "Learned Primal-Dual Reconstruction", IEEE TMI 2018. |
| [3] Kitić et al., "SPADE", LVA/ICA 2015 (arXiv:1506.01830). |
| [4] Záviška et al., "Revisiting SPADE", 2018 (arXiv:1807.03612). |
| """ |
|
|
| from __future__ import annotations |
|
|
| import math |
| from dataclasses import dataclass, field |
| from typing import Literal, Optional, Tuple |
|
|
| import numpy as np |
|
|
| try: |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| _TORCH_OK = True |
| except ImportError: |
| _TORCH_OK = False |
| raise ImportError("PyTorch is required for spade_unrolled.py (pip install torch)") |
|
|
|
|
| |
| |
| |
|
|
| @dataclass |
| class UnrolledConfig: |
| """All hyperparameters for the SPADE-Unrolled model.""" |
|
|
| |
| |
| |
| window_length: int = 2048 |
| hop_length: int = 512 |
| frame: Literal["dct", "rdft"] = "rdft" |
| sample_rate: int = 44100 |
|
|
| |
| |
| |
| |
| K_unroll: int = 4 |
|
|
| |
| K_context: int = 8 |
| n_mels: int = 32 |
| gru_hidden: int = 128 |
| gru_layers: int = 2 |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| lambda_lf_range: Tuple[float, float] = (1e-3, 0.50) |
| |
| |
| |
| |
| |
| lambda_hf_range: Tuple[float, float] = (1e-3, 0.08) |
| delta_factor_range: Tuple[float, float] = (0.5, 1.5) |
| |
| |
| |
| |
| |
| |
| gmax_factor_range: Tuple[float, float] = (0.2, 2.0) |
| eps_factor_range: Tuple[float, float] = (0.5, 1.5) |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| lf_cutoff_hz: float = 8000.0 |
|
|
| |
| |
| |
| |
| |
| |
| |
| base_delta_db: float = 3.5 |
| base_max_gain_db: float = 6.0 |
| base_eps: float = 0.05 |
|
|
| |
| |
| |
| |
| lf_delta_ratio: float = 0.286 |
|
|
|
|
| |
| |
| |
|
|
| def _dct2(x: torch.Tensor) -> torch.Tensor: |
| """Batched orthonormal DCT-II. x: (..., N) → (..., N). |
| Matches scipy.fft.dct(x, type=2, norm='ortho') to float32. |
| Makhoul (1980) FFT-based algorithm. |
| """ |
| N = x.shape[-1] |
| v = torch.cat([x[..., ::2], x[..., 1::2].flip(-1)], dim=-1) |
| V = torch.fft.fft(v.double(), dim=-1) |
| k = torch.arange(N, device=x.device, dtype=torch.float64) |
| tw = torch.exp(-1j * math.pi * k / (2.0 * N)) |
| C = (tw * V).real * math.sqrt(2.0 / N) |
| C = C.clone() |
| C[..., 0] /= math.sqrt(2.0) |
| return C.to(x.dtype) |
|
|
|
|
| def _idct2(X: torch.Tensor) -> torch.Tensor: |
| """Batched orthonormal IDCT-II. X: (..., N) → (..., N). |
| Inverse of _dct2. BUG-GPU-3 fix included. |
| """ |
| N = X.shape[-1] |
| C = X.double() * math.sqrt(N / 2.0) |
| C = C.clone() |
| C[..., 0] *= math.sqrt(2.0) |
| ipart = torch.zeros_like(C) |
| ipart[..., 1:] = -C.flip(-1)[..., :-1] |
| W = torch.view_as_complex(torch.stack([C, ipart], dim=-1)) |
| k = torch.arange(N, device=X.device, dtype=torch.float64) |
| V = W * torch.exp(1j * math.pi * k / (2.0 * N)) |
| v = torch.fft.ifft(V, dim=-1).real |
| half = (N + 1) // 2 |
| x = torch.empty_like(v) |
| x[..., ::2] = v[..., :half] |
| x[..., 1::2] = v[..., half:].flip(-1) |
| return x.to(X.dtype) |
|
|
|
|
| def frana(x: torch.Tensor, frame: str) -> torch.Tensor: |
| """Analysis operator A: (..., M) → (..., P). |
| DCT: P = M; RDFT: P = 2M. |
| Differentiable. |
| """ |
| if frame == "dct": |
| return _dct2(x) |
| s2 = math.sqrt(2.0) |
| return torch.cat([_dct2(x) / s2, _dct2(x.flip(-1)) / s2], dim=-1) |
|
|
|
|
| def frsyn(z: torch.Tensor, frame: str, M: int) -> torch.Tensor: |
| """Synthesis operator D = A^H: (..., P) → (..., M). |
| Adjoint of frana. BUG-1 fix: flip output (not input) for DST part. |
| Differentiable. |
| """ |
| if frame == "dct": |
| return _idct2(z) |
| s2 = math.sqrt(2.0) |
| cos_part = _idct2(z[..., :M]) / s2 |
| sin_part = _idct2(z[..., M:]).flip(-1) / s2 |
| return cos_part + sin_part |
|
|
|
|
| def build_lf_mask(M: int, frame: str, sr: int, lf_cutoff_hz: float, |
| device: torch.device) -> torch.Tensor: |
| """Boolean mask: True for LF bins (freq < lf_cutoff_hz). Shape: (P,).""" |
| P = M if frame == "dct" else 2 * M |
| mask = torch.zeros(P, dtype=torch.bool, device=device) |
| k_cut = int(math.ceil(lf_cutoff_hz * 2.0 * M / sr)) |
| k_cut = max(1, min(k_cut, M)) |
| if frame == "dct": |
| mask[:k_cut] = True |
| else: |
| mask[:k_cut] = True |
| mask[M:M + k_cut] = True |
| return mask |
|
|
|
|
| |
| |
| |
|
|
| def proj_gamma_torch( |
| w: torch.Tensor, |
| yc: torch.Tensor, |
| Ir: torch.Tensor, |
| Icp: torch.Tensor, |
| Icm: torch.Tensor, |
| g_max: float = float("inf"), |
| ) -> torch.Tensor: |
| """ |
| Differentiable projection onto the consistency set Γ. |
| |
| Reliable samples: pin to yc (zero gradient — correct for training). |
| Positive clipped: lower bound max(w, yc), optional upper bound yc*g_max. |
| Negative clipped: upper bound min(w, yc), optional lower bound yc*g_max. |
| |
| NOTE: The gradient through Ir positions is zero by construction — the |
| model cannot change reliable samples. This is the physically correct |
| inductive bias: SPADE must be transparent on non-limited regions. |
| """ |
| v = w.clone() |
|
|
| |
| v = torch.where(Ir, yc, v) |
|
|
| |
| lower_p = yc * Icp.float() |
| if math.isfinite(g_max): |
| upper_p = (lower_p * g_max).clamp(min=lower_p) |
| v = torch.where(Icp, torch.clamp(torch.maximum(v, lower_p), |
| min=lower_p, max=upper_p), v) |
| else: |
| v = torch.where(Icp, torch.maximum(v, yc), v) |
|
|
| |
| upper_m = yc * Icm.float() |
| if math.isfinite(g_max): |
| lower_m_cap = (upper_m * g_max).clamp(max=upper_m) |
| v = torch.where(Icm, torch.clamp(torch.minimum(v, upper_m), |
| min=lower_m_cap, max=upper_m), v) |
| else: |
| v = torch.where(Icm, torch.minimum(v, yc), v) |
|
|
| return v |
|
|
|
|
| |
| |
| |
|
|
| def soft_thresh_stratified( |
| z: torch.Tensor, |
| u: torch.Tensor, |
| lambda_lf: torch.Tensor, |
| lambda_hf: torch.Tensor, |
| lf_mask: torch.Tensor, |
| ) -> torch.Tensor: |
| """ |
| Differentiable soft-thresholding S_λ(z+u) with separate LF/HF budgets. |
| |
| S_λ(x) = sign(x) * max(|x| - λ, 0) |
| |
| LF bins (lf_mask=True) : threshold = lambda_lf |
| HF bins (lf_mask=False): threshold = lambda_hf |
| |
| Replaces the hard (non-differentiable) H_k thresholding in classical SPADE. |
| """ |
| x = z + u |
| |
| lf = lf_mask.view(*([1] * (x.dim() - 1)), -1) |
| lam = torch.where(lf, lambda_lf, lambda_hf) |
| return torch.sign(x) * F.relu(x.abs() - lam) |
|
|
|
|
| |
| |
| |
|
|
| class SpectralFeatureExtractor(nn.Module): |
| """ |
| Converts a raw audio frame (shape: B × M) into a feature vector |
| suitable for the ContextEncoder. |
| |
| Features per frame: |
| • log-mel spectrogram: n_mels values (shape of spectral envelope) |
| • short-time loudness: 1 value (RMS in dB, proxy for LUFS) |
| → total: n_mels + 1 features |
| |
| Implementation note: |
| Uses a fixed (non-trained) triangular mel filterbank computed from |
| the DCT-II power spectrum. Mel filters are registered as buffers so |
| they move with the module to the correct device automatically. |
| """ |
|
|
| def __init__(self, cfg: UnrolledConfig): |
| super().__init__() |
| self.M = cfg.window_length |
| self.sr = cfg.sample_rate |
| self.n_mels = cfg.n_mels |
| self.P = self.M if cfg.frame == "dct" else 2 * self.M |
|
|
| |
| |
| |
| mel_filters = self._build_mel_filterbank() |
| self.register_buffer("mel_filters", mel_filters) |
|
|
| def _build_mel_filterbank(self) -> torch.Tensor: |
| """Triangular mel filterbank as a (n_mels, M) matrix.""" |
| def hz_to_mel(f): |
| return 2595.0 * math.log10(1.0 + f / 700.0) |
|
|
| def mel_to_hz(m): |
| return 700.0 * (10.0 ** (m / 2595.0) - 1.0) |
|
|
| M = self.M |
| sr = self.sr |
| n_mels = self.n_mels |
|
|
| mel_lo = hz_to_mel(20.0) |
| mel_hi = hz_to_mel(min(sr / 2.0, 20000.0)) |
| mel_pts = torch.linspace(mel_lo, mel_hi, n_mels + 2) |
| hz_pts = torch.tensor([mel_to_hz(m.item()) for m in mel_pts]) |
|
|
| |
| freqs = torch.arange(M, dtype=torch.float32) * sr / (2.0 * M) |
| filters = torch.zeros(n_mels, M) |
|
|
| for m in range(n_mels): |
| f_lo = hz_pts[m].item() |
| f_c = hz_pts[m + 1].item() |
| f_hi = hz_pts[m + 2].item() |
| |
| mask_r = (freqs >= f_lo) & (freqs <= f_c) |
| if (f_c - f_lo) > 0: |
| filters[m][mask_r] = (freqs[mask_r] - f_lo) / (f_c - f_lo) |
| |
| mask_f = (freqs > f_c) & (freqs <= f_hi) |
| if (f_hi - f_c) > 0: |
| filters[m][mask_f] = (f_hi - freqs[mask_f]) / (f_hi - f_c) |
|
|
| |
| area = filters.sum(dim=-1, keepdim=True).clamp(min=1e-8) |
| return filters / area |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| """ |
| x: (B, M) — raw audio frame (windowed or not) |
| returns: (B, n_mels + 1) — spectral features |
| """ |
| B, M = x.shape |
|
|
| |
| dct_coeff = _dct2(x.float()) |
| power_spec = dct_coeff[:, :M] ** 2 |
| mel_spec = torch.matmul(power_spec, |
| self.mel_filters.T) |
| log_mel = torch.log(mel_spec.clamp(min=1e-10)) |
|
|
| |
| rms = x.pow(2).mean(dim=-1, keepdim=True).clamp(min=1e-10).sqrt() |
| lufs = 20.0 * torch.log10(rms.clamp(min=1e-10)) |
|
|
| return torch.cat([log_mel, lufs], dim=-1) |
|
|
|
|
| |
| |
| |
|
|
| class ContextEncoder(nn.Module): |
| """ |
| Causal GRU encoder that predicts per-frame SPADE parameters from |
| the spectral context of K previous frames + the current frame. |
| |
| Input: (B, K_context+1, n_feats) — spectral features, last dim = current |
| Output: (B, 5) — [lambda_lf, lambda_hf, delta_factor, gmax_factor, eps_factor] |
| All values are in their configured physical range. |
| |
| Architecture: |
| Input linear projection → 2-layer GRU → last hidden state → ParameterHead |
| |
| Causality: |
| The GRU processes the context sequence [frame_{t-K}, …, frame_{t-1}, frame_t] |
| in forward order. Only the hidden state at the LAST position (frame_t) is |
| used to predict parameters for frame_t. No future frames are seen. |
| |
| Parameter ~count: |
| input_proj: (n_feats, 64) → 64 * (n_feats+1) ≈ 2 K |
| GRU layer 1: input=64, hidden=128 → 3 * 128 * (64+128+1) ≈ 74 K |
| GRU layer 2: input=128, hidden=128 → 3 * 128 * (128+128+1) ≈ 98 K |
| head: 128 → 64 → 5 → ~ 8 K |
| ───────────────────────────────────────────────────────────────────── |
| Total: ~ 182 K (target ≤ 200 K) |
| """ |
|
|
| def __init__(self, cfg: UnrolledConfig): |
| super().__init__() |
| self.cfg = cfg |
| n_feats = cfg.n_mels + 1 |
| proj_dim = 64 |
|
|
| self.input_proj = nn.Sequential( |
| nn.Linear(n_feats, proj_dim), |
| nn.LayerNorm(proj_dim), |
| nn.GELU(), |
| ) |
| self.gru = nn.GRU( |
| input_size=proj_dim, |
| hidden_size=cfg.gru_hidden, |
| num_layers=cfg.gru_layers, |
| batch_first=True, |
| dropout=0.1 if cfg.gru_layers > 1 else 0.0, |
| ) |
| self.head = nn.Sequential( |
| nn.Linear(cfg.gru_hidden, 64), |
| nn.GELU(), |
| nn.Linear(64, 5), |
| ) |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| lf_ratio = getattr(cfg, "lf_delta_ratio", 1.0) |
| lf_bias_offset = math.log(max(lf_ratio, 1e-3)) |
| with torch.no_grad(): |
| self.head[-1].bias[0] = -1.4 + lf_bias_offset |
| |
| |
| |
| |
| |
| self.head[-1].bias[1] = -0.54 |
|
|
| def forward(self, feat_seq: torch.Tensor) -> torch.Tensor: |
| """ |
| feat_seq: (B, K_context+1, n_feats) — spectral features, ordered t-K … t |
| returns: (B, 5) — physical parameter values |
| """ |
| B, T, _ = feat_seq.shape |
| projected = self.input_proj(feat_seq) |
| gru_out, _ = self.gru(projected) |
| h_t = gru_out[:, -1, :] |
|
|
| raw = self.head(h_t) |
| params = self._scale_outputs(raw) |
| return params |
|
|
| def _scale_outputs(self, raw: torch.Tensor) -> torch.Tensor: |
| """Apply sigmoid + affine rescaling to map raw logits → physical ranges.""" |
| s = torch.sigmoid(raw) |
|
|
| def rescale(x_01, lo, hi): |
| return lo + (hi - lo) * x_01 |
|
|
| cfg = self.cfg |
| lambda_lf = rescale(s[:, 0], *cfg.lambda_lf_range) |
| lambda_hf = rescale(s[:, 1], *cfg.lambda_hf_range) |
| delta_factor = rescale(s[:, 2], *cfg.delta_factor_range) |
| gmax_factor = rescale(s[:, 3], *cfg.gmax_factor_range) |
| eps_factor = rescale(s[:, 4], *cfg.eps_factor_range) |
|
|
| return torch.stack([lambda_lf, lambda_hf, delta_factor, |
| gmax_factor, eps_factor], dim=-1) |
|
|
|
|
| |
| |
| |
|
|
| class UnrolledADMM(nn.Module): |
| """ |
| K_unroll unrolled S-SPADE ADMM layers with differentiable soft thresholding. |
| |
| Each layer follows the S-SPADE update equations from [4] eq.(12), |
| with hard thresholding H_k replaced by stratified soft thresholding S_λ: |
| |
| z̄^(l) = S_{λ_LF, λ_HF}( z^(l-1) + u^(l-1) ) ← stratified soft thresh |
| v^(l) = z̄^(l) - u^(l-1) |
| Dv = frsyn(v^(l), frame, M) |
| pDv = proj_Γ(Dv, yc, masks, g_max) |
| z^(l) = v^(l) - frana(Dv - pDv, frame) |
| u^(l) = u^(l-1) + z^(l) - z̄^(l) ← dual update |
| |
| The frame parameters (lambda_lf, lambda_hf, g_max) are computed ONCE before |
| the loop from the ContextEncoder output and held constant across all layers |
| for the current frame. |
| |
| Learnable per-layer scalings |
| ---------------------------- |
| Following Gregor & LeCun (2010), we add a learnable scale per layer for |
| both the threshold and the dual step: |
| layer_lf_scale[l] : multiplied on lambda_lf before thresholding |
| layer_hf_scale[l] : multiplied on lambda_hf before thresholding |
| layer_dual_scale[l] : multiplied on the dual update magnitude |
| |
| These are initialised to 1.0 and learned jointly with the encoder. |
| They allow the unrolled ADMM to adapt the effective threshold per layer |
| (e.g. coarser thresh early, finer late) without changing the architecture. |
| |
| Total learnable params here: 3 × K_unroll ≈ 24 scalars (negligible) |
| """ |
|
|
| def __init__(self, cfg: UnrolledConfig): |
| super().__init__() |
| self.cfg = cfg |
| self.M = cfg.window_length |
| self.frame = cfg.frame |
| self.K = cfg.K_unroll |
|
|
| |
| |
| |
| |
| |
| n = self.K |
| init_scales = torch.linspace(1.5, 0.3, n) |
| self.layer_lf_scale = nn.Parameter(init_scales.clone()) |
| self.layer_hf_scale = nn.Parameter(init_scales.clone()) |
| self.layer_dual_scale = nn.Parameter(torch.ones(n)) |
|
|
| def forward( |
| self, |
| yc_w: torch.Tensor, |
| Ir: torch.Tensor, |
| Icp: torch.Tensor, |
| Icm: torch.Tensor, |
| lambda_lf: torch.Tensor, |
| lambda_hf: torch.Tensor, |
| g_max: torch.Tensor, |
| lf_mask: torch.Tensor, |
| ) -> Tuple[torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: |
| """ |
| Returns |
| ------- |
| x_hat : (B, M) — restored frame (final layer) |
| z_thresh : (B, P) — thresholded coefficients at final layer |
| (used for sparsity loss: L1 penalty) |
| x_hat_mid : (B, M) | None — restored frame at K//2 layer |
| (used for deep supervision auxiliary loss) |
| """ |
| B, M = yc_w.shape |
| P = M if self.frame == "dct" else 2 * M |
|
|
| |
| |
| |
| |
| |
| |
| yc_d = yc_w.double() |
| z_init = frana(yc_d, self.frame) |
| frame_scale = z_init.abs().amax(dim=-1, keepdim=True).clamp(min=1e-8).detach() |
| yc_d_norm = yc_d / frame_scale |
|
|
| |
| zi = frana(yc_d_norm, self.frame) |
| ui = torch.zeros_like(zi) |
|
|
| |
| lam_lf = lambda_lf.unsqueeze(-1).double() |
| lam_hf = lambda_hf.unsqueeze(-1).double() |
|
|
| |
| Ir_d = Ir.bool() |
| Icp_d = Icp.bool() |
| Icm_d = Icm.bool() |
| yc_norm_d = yc_d_norm |
|
|
| |
|
|
| |
| mid_layer = self.K // 2 |
| x_hat_mid: Optional[torch.Tensor] = None |
| zb_last = torch.zeros_like(zi) |
|
|
| for l in range(self.K): |
| scale_lf = self.layer_lf_scale[l].double().clamp(min=0.1) |
| scale_hf = self.layer_hf_scale[l].double().clamp(min=0.1) |
| scale_dual = self.layer_dual_scale[l].double() |
|
|
| |
| zb = soft_thresh_stratified( |
| zi, ui, |
| lam_lf * scale_lf, |
| lam_hf * scale_hf, |
| lf_mask, |
| ) |
| zb_last = zb |
|
|
| |
| v_c = zb - ui |
| Dv = frsyn(v_c, self.frame, M) |
|
|
| |
| pDv = Dv.clone() |
| pDv = torch.where(Ir_d, yc_norm_d, pDv) |
|
|
| |
| lo_p = yc_norm_d |
| hi_p = lo_p * g_max.unsqueeze(-1).double().clamp(min=1.0) |
| pDv = torch.where(Icp_d, torch.clamp(torch.maximum(pDv, lo_p), |
| min=lo_p, max=hi_p), pDv) |
|
|
| |
| up_m = yc_norm_d |
| lo_m = up_m * g_max.unsqueeze(-1).double().clamp(min=1.0) |
| lo_m_c = torch.minimum(lo_m, up_m) |
| pDv = torch.where(Icm_d, torch.clamp(torch.minimum(pDv, up_m), |
| min=lo_m_c, max=up_m), pDv) |
|
|
| |
| zi = v_c - frana(Dv - pDv, self.frame) |
|
|
| |
| ui = ui + (zi - zb) * scale_dual |
|
|
| |
| if l == mid_layer - 1: |
| x_mid_norm = frsyn(zi, self.frame, M) |
| x_hat_mid = (x_mid_norm * frame_scale).float() |
|
|
| |
| x_hat_norm = frsyn(zi, self.frame, M) |
| x_hat = (x_hat_norm * frame_scale).float() |
|
|
| |
| z_thresh = (zb_last * frame_scale).float() |
|
|
| return x_hat, z_thresh, x_hat_mid |
|
|
|
|
| |
| |
| |
|
|
| class SPADEUnrolled(nn.Module): |
| """ |
| Full SPADE-Unrolled model. |
| |
| Combines: |
| 1. SpectralFeatureExtractor — raw frames → spectral features |
| 2. ContextEncoder — spectral context → per-frame SPADE params |
| 3. UnrolledADMM — K differentiable ADMM layers |
| |
| Forward pass (single-frame mode for training): |
| • Takes a batch of (limited frame, K context frames, clipping masks) |
| • Returns the restored frame and the predicted parameters (for logging) |
| |
| Inference mode (WOLA loop): |
| • Use SPADEUnrolledInference wrapper to process full signals |
| """ |
|
|
| def __init__(self, cfg: UnrolledConfig): |
| super().__init__() |
| self.cfg = cfg |
|
|
| self.feature_extractor = SpectralFeatureExtractor(cfg) |
| self.context_encoder = ContextEncoder(cfg) |
| self.unrolled_admm = UnrolledADMM(cfg) |
|
|
| |
| |
| self._lf_mask: Optional[torch.Tensor] = None |
|
|
| def _get_lf_mask(self, device: torch.device) -> torch.Tensor: |
| if self._lf_mask is None or self._lf_mask.device != device: |
| self._lf_mask = build_lf_mask( |
| self.cfg.window_length, self.cfg.frame, |
| self.cfg.sample_rate, self.cfg.lf_cutoff_hz, |
| device, |
| ) |
| return self._lf_mask |
|
|
| def forward( |
| self, |
| yc_w: torch.Tensor, |
| ctx_frames: torch.Tensor, |
| Ir: torch.Tensor, |
| Icp: torch.Tensor, |
| Icm: torch.Tensor, |
| ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, Optional[torch.Tensor]]: |
| """ |
| Returns |
| ------- |
| x_hat : (B, M) — restored frame (float32) |
| params : (B, 5) — predicted per-frame parameters |
| z_thresh : (B, P) — thresholded coefficients (for sparsity loss) |
| x_hat_mid: (B, M)|None — mid-layer reconstruction (for deep supervision) |
| """ |
| B = yc_w.shape[0] |
| device = yc_w.device |
|
|
| |
| |
| feat_curr = self.feature_extractor(yc_w) |
|
|
| |
| B, K, M = ctx_frames.shape |
| ctx_flat = ctx_frames.reshape(B * K, M) |
| feat_ctx_flat = self.feature_extractor(ctx_flat) |
| feat_ctx = feat_ctx_flat.reshape(B, K, -1) |
|
|
| |
| feat_seq = torch.cat([feat_ctx, feat_curr.unsqueeze(1)], dim=1) |
|
|
| |
| params = self.context_encoder(feat_seq) |
|
|
| lambda_lf = params[:, 0] |
| lambda_hf = params[:, 1] |
| delta_factor = params[:, 2] |
| gmax_factor = params[:, 3] |
| |
|
|
| |
| g_max_db = self.cfg.base_max_gain_db * gmax_factor |
| g_max = 10.0 ** (g_max_db / 20.0) |
|
|
| |
| lf_mask = self._get_lf_mask(device) |
|
|
| x_hat, z_thresh, x_hat_mid = self.unrolled_admm( |
| yc_w=yc_w, |
| Ir=Ir, Icp=Icp, Icm=Icm, |
| lambda_lf=lambda_lf, |
| lambda_hf=lambda_hf, |
| g_max=g_max, |
| lf_mask=lf_mask, |
| ) |
|
|
| return x_hat, params, z_thresh, x_hat_mid |
|
|
| def parameter_count(self) -> int: |
| """Total trainable parameters.""" |
| return sum(p.numel() for p in self.parameters() if p.requires_grad) |
|
|
|
|
| |
| |
| |
|
|
| class SPADEUnrolledLoss(nn.Module): |
| """ |
| Composite loss for SPADE-Unrolled training. |
| |
| Components |
| ---------- |
| 1. Mask MSE (w_mask=2.0) MSE on Icp|Icm — recovery region only |
| 2. Transparency (w_transp=0.1) MSE on Ir — non-limited must be unchanged |
| 3. STFT (w_stft=0.05) Multi-scale L1. |
| 4. LF coeff MSE (w_lf_coeff=2.0) PRIMARY LF loss. |
| 5. LF energy (w_lf_energy=0.5) One-sided RMS under-recovery penalty. |
| 6. Over-recovery (w_over=0.3) LF energy > GT+3dB. |
| 7. λ reg (w_reg=5.0) Anti-saturation: L2 from target center. |
| Centers: λ_lf=0.034, λ_hf=0.03 — calibrated for M=2048, sr=44100. |
| IMPORTANT: λ_hf_center was previously 0.10 (calibrated for M=1024). |
| For M=2048, HF DCT coefficients post-normalisation are 0.01–0.05; |
| λ_hf=0.10 zeroed all of them → dsdr_high < −1.8 dB every epoch. |
| λ_hf_center=0.03 allows real HF transient content to pass through. |
| 7b.g_fac floor (w_gfac_floor=3.0) ReLU(floor - g_fac)^2 penalty. |
| Blocks the attenuative shortcut: without this, g_fac collapses toward |
| the range lower bound (0.5) making g_max ≈ 3 dB, which attenuates the |
| signal rather than declipping it (observed: dsdr_high/mid both < 0 from |
| epoch 1, worsening steadily to −1.2 / −0.7 dB by ep22). |
| floor=0.85 → g_max ≥ 5.1 dB — conservative but blocks the shortcut. |
| 8. Sparsity (w_sparsity=0.5) L1 of thresholded coefficients z_thresh. |
| Penalises passing too many coefficients: forces λ to actually zero things out. |
| Combined with λ-reg this creates a stable equilibrium where the ADMM does |
| real sparse solving rather than degenerating to an identity mapping. |
| 9. Deep supervision (w_ds=0.5) Auxiliary mask+LF loss at mid-layer (K//2). |
| Directly injects gradient into the GRU, preventing vanishing gradients |
| across K unrolled ADMM layers. |
| """ |
|
|
| def __init__( |
| self, |
| w_mask: float = 2.0, |
| w_transp: float = 0.1, |
| w_stft: float = 0.05, |
| w_lf_coeff: float = 2.0, |
| w_lf_energy: float = 0.5, |
| w_over: float = 0.3, |
| w_reg: float = 5.0, |
| w_sparsity: float = 0.5, |
| w_ds: float = 0.15, |
| w_gfac_floor: float = 3.0, |
| gfac_floor: float = 0.5, |
| sample_rate: int = 44100, |
| lf_cutoff_hz: float = 500.0, |
| lambda_lf_center: float = 0.034, |
| |
| |
| |
| |
| lambda_hf_center: float = 0.03, |
| ): |
| super().__init__() |
| self.w_mask = w_mask |
| self.w_transp = w_transp |
| self.w_stft = w_stft |
| self.w_lf_coeff = w_lf_coeff |
| self.w_lf_energy = w_lf_energy |
| self.w_over = w_over |
| self.w_reg = w_reg |
| self.w_sparsity = w_sparsity |
| self.w_ds = w_ds |
| self.w_gfac_floor = w_gfac_floor |
| self.gfac_floor = gfac_floor |
| self.sr = sample_rate |
| self.lf_cutoff = lf_cutoff_hz |
| self.lf_center = lambda_lf_center |
| self.hf_center = lambda_hf_center |
| self.stft_wins = [256, 512, 1024] |
|
|
| def _frame_loss( |
| self, |
| x_hat: torch.Tensor, |
| x_clean: torch.Tensor, |
| yc_w: torch.Tensor, |
| Ir: torch.Tensor, |
| Icp: torch.Tensor, |
| Icm: torch.Tensor, |
| ) -> Tuple[torch.Tensor, dict]: |
| """Compute mask + transparency + STFT + LF losses for one frame estimate.""" |
| B, M = x_hat.shape |
| losses = {} |
|
|
| mask_active = (Icp | Icm).float() |
| mask_ir = Ir.float() |
| n_active = mask_active.sum(dim=-1).clamp(min=1) |
| n_ir = mask_ir.sum(dim=-1).clamp(min=1) |
|
|
| |
| sq_err_active = ((x_hat - x_clean) ** 2) * mask_active |
| loss_mask = (sq_err_active.sum(dim=-1) / n_active).mean() |
| losses["mask"] = loss_mask.item() |
|
|
| |
| sq_err_ir = ((x_hat - x_clean) ** 2) * mask_ir |
| loss_transp = (sq_err_ir.sum(dim=-1) / n_ir).mean() |
| losses["transp"] = loss_transp.item() |
|
|
| |
| loss_stft = x_hat.new_zeros(1) |
| for win in self.stft_wins: |
| hop = win // 4 |
| wnd = torch.hann_window(win, device=x_hat.device) |
| def _stft(x, _w=wnd, _win=win, _hop=hop): |
| return torch.stft(x, n_fft=_win, hop_length=_hop, |
| win_length=_win, window=_w, return_complex=True) |
| loss_stft = loss_stft + F.l1_loss(_stft(x_hat.float()).abs(), |
| _stft(x_clean.float()).abs()) |
| loss_stft = loss_stft / len(self.stft_wins) |
| losses["stft"] = loss_stft.item() |
|
|
| |
| k_cut = int(math.ceil(self.lf_cutoff * 2.0 * M / self.sr)) |
| k_cut = max(1, min(k_cut, M)) |
| dct_res_hat = _dct2(x_hat.float() - yc_w.float())[:, :k_cut] |
| dct_res_clean = _dct2(x_clean.float() - yc_w.float())[:, :k_cut] |
| loss_lf_coeff = F.mse_loss(dct_res_hat, dct_res_clean) |
| losses["lf_coeff"] = loss_lf_coeff.item() |
|
|
| |
| dct_hat = _dct2(x_hat.float())[:, :k_cut] |
| dct_clean = _dct2(x_clean.float())[:, :k_cut] |
| rms_lf_hat = dct_hat.pow(2).mean(dim=-1).clamp(min=1e-10).sqrt() |
| rms_lf_clean = dct_clean.pow(2).mean(dim=-1).clamp(min=1e-10).sqrt() |
| loss_lf_energy = F.relu(rms_lf_clean - rms_lf_hat).pow(2).mean() |
| losses["lf_energy"] = loss_lf_energy.item() |
|
|
| |
| loss_over = F.relu(rms_lf_hat - rms_lf_clean * 10.0 ** (3.0/20.0)).pow(2).mean() |
| losses["over"] = loss_over.item() |
|
|
| total = (self.w_mask * loss_mask |
| + self.w_transp * loss_transp |
| + self.w_stft * loss_stft.squeeze() |
| + self.w_lf_coeff * loss_lf_coeff |
| + self.w_lf_energy * loss_lf_energy |
| + self.w_over * loss_over) |
| return total, losses |
|
|
| def forward( |
| self, |
| x_hat: torch.Tensor, |
| x_clean: torch.Tensor, |
| yc_w: torch.Tensor, |
| Ir: torch.Tensor, |
| Icp: torch.Tensor, |
| Icm: torch.Tensor, |
| params: Optional[torch.Tensor] = None, |
| z_thresh: Optional[torch.Tensor] = None, |
| x_hat_mid: Optional[torch.Tensor] = None, |
| ) -> Tuple[torch.Tensor, dict]: |
| losses = {} |
|
|
| |
| total, frame_losses = self._frame_loss(x_hat, x_clean, yc_w, Ir, Icp, Icm) |
| losses.update(frame_losses) |
|
|
| |
| loss_reg = x_hat.new_zeros(1) |
| if params is not None and self.w_reg > 0: |
| loss_reg = ((params[:, 0] - self.lf_center).pow(2).mean() + |
| (params[:, 1] - self.hf_center).pow(2).mean()) |
| losses["reg"] = loss_reg.item() |
| total = total + self.w_reg * loss_reg.squeeze() |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| loss_gfac_floor = x_hat.new_zeros(1) |
| if params is not None and self.w_gfac_floor > 0: |
| g_fac = params[:, 3] |
| loss_gfac_floor = F.relu(self.gfac_floor - g_fac).pow(2).mean() |
| losses["gfac_floor"] = loss_gfac_floor.item() |
| total = total + self.w_gfac_floor * loss_gfac_floor.squeeze() |
|
|
| |
| |
| |
| loss_sparsity = x_hat.new_zeros(1) |
| if z_thresh is not None and self.w_sparsity > 0: |
| loss_sparsity = z_thresh.abs().mean() |
| losses["sparsity"] = loss_sparsity.item() |
| total = total + self.w_sparsity * loss_sparsity.squeeze() |
|
|
| |
| |
| |
| loss_ds = x_hat.new_zeros(1) |
| if x_hat_mid is not None and self.w_ds > 0: |
| ds_total, _ = self._frame_loss(x_hat_mid, x_clean, yc_w, Ir, Icp, Icm) |
| loss_ds = ds_total |
| losses["ds"] = loss_ds.item() |
| total = total + self.w_ds * loss_ds.squeeze() |
|
|
| losses["total"] = total.item() |
| return total, losses |
|
|
|
|
|
|
| |
| |
| |
|
|
| class SPADEUnrolledInference: |
| """ |
| Wraps SPADEUnrolled to process a full audio signal via WOLA. |
| |
| Equivalent to _declip_mono_gpu but calls the learned model instead of |
| the classical SPADE solver. Used at test time only (not differentiable |
| end-to-end because of the frame-level sliding window). |
| |
| Usage |
| ----- |
| model = SPADEUnrolled(cfg) |
| model.load_state_dict(...) |
| model.eval() |
| |
| infer = SPADEUnrolledInference(model, delta_db=2.5, device="cuda") |
| x_hat = infer.process(y_limited, sample_rate=44100) |
| """ |
|
|
| def __init__( |
| self, |
| model: SPADEUnrolled, |
| delta_db: float = 2.5, |
| max_gain_db: float = 6.0, |
| device: str = "cuda", |
| batch_frames: int = 256, |
| ): |
| self.model = model.to(device) |
| self.model.eval() |
| self.cfg = model.cfg |
| self.delta_db = delta_db |
| self.max_gain_db = max_gain_db |
| self.device = device |
| self.batch_frames = batch_frames |
|
|
| @torch.no_grad() |
| def process(self, y_limited: np.ndarray, sample_rate: int = 44100) -> np.ndarray: |
| """ |
| y_limited : (N,) or (N, C) — limited audio |
| returns : (N,) or (N, C) — restored audio |
| """ |
| from scipy.signal.windows import hann as _hann |
| try: |
| from spade_declip_v12 import _compute_masks, _dilate_masks_soft |
| except ImportError: |
| raise ImportError("spade_declip_v12.py must be in the Python path") |
|
|
| mono = y_limited.ndim == 1 |
| if mono: |
| y_limited = y_limited[:, None] |
| _, C = y_limited.shape |
| outputs = [] |
|
|
| for ch in range(C): |
| yc = y_limited[:, ch].astype(np.float64) |
| dc = float(np.mean(yc)) |
| yc -= dc |
|
|
| ceiling = float(np.max(np.abs(yc))) |
| thresh = ceiling * (10.0 ** (-self.delta_db / 20.0)) |
| if thresh <= 0: |
| outputs.append(yc) |
| continue |
|
|
| masks_obj = _compute_masks(yc, thresh) |
|
|
| M = self.cfg.window_length |
| a = self.cfg.hop_length |
| N = int(np.ceil(len(yc) / a)) |
| win = np.sqrt(_hann(M, sym=False)) |
|
|
| out_buf = np.zeros(len(yc) + M) |
| norm_buf = np.zeros(len(yc) + M) |
| L = len(yc) |
|
|
| |
| K_ctx = self.cfg.K_context |
| ctx_buf = np.zeros((K_ctx, M), dtype=np.float32) |
|
|
| for i in range(N): |
| idx1 = i * a |
| idx2 = min(idx1 + M, L) |
| seg_len = idx2 - idx1 |
|
|
| yc_frame = np.zeros(M) |
| yc_frame[:seg_len] = yc[idx1:idx2] |
| win_frame = yc_frame * win |
|
|
| |
| frame_peak = np.max(np.abs(yc[idx1:idx2])) |
| if frame_peak < thresh: |
| out_buf[idx1:idx1+M] += win_frame * win |
| norm_buf[idx1:idx1+M] += win ** 2 |
| ctx_buf = np.roll(ctx_buf, -1, axis=0) |
| ctx_buf[-1] = win_frame.astype(np.float32) |
| continue |
|
|
| |
| Ir_f = masks_obj.Ir[idx1:idx2] |
| Icp_f = masks_obj.Icp[idx1:idx2] |
| Icm_f = masks_obj.Icm[idx1:idx2] |
|
|
| |
| Ir_p = np.zeros(M, dtype=bool); Ir_p[:seg_len] = Ir_f |
| Icp_p = np.zeros(M, dtype=bool); Icp_p[:seg_len] = Icp_f |
| Icm_p = np.zeros(M, dtype=bool); Icm_p[:seg_len] = Icm_f |
| Ir_p[seg_len:] = True |
|
|
| |
| def _t(arr, dtype=torch.float32): |
| return torch.tensor(arr, dtype=dtype, |
| device=self.device).unsqueeze(0) |
|
|
| yc_t = _t(win_frame.astype(np.float32)) |
| ctx_t = torch.tensor(ctx_buf, dtype=torch.float32, |
| device=self.device).unsqueeze(0) |
| Ir_t = _t(Ir_p, dtype=torch.bool) |
| Icp_t = _t(Icp_p, dtype=torch.bool) |
| Icm_t = _t(Icm_p, dtype=torch.bool) |
|
|
| with torch.no_grad(): |
| x_hat_t, _, _, _ = self.model(yc_t, ctx_t, Ir_t, Icp_t, Icm_t) |
|
|
| x_hat = x_hat_t.squeeze(0).cpu().numpy() |
|
|
| out_buf[idx1:idx1+M] += x_hat * win |
| norm_buf[idx1:idx1+M] += win ** 2 |
|
|
| |
| ctx_buf = np.roll(ctx_buf, -1, axis=0) |
| ctx_buf[-1] = win_frame.astype(np.float32) |
|
|
| |
| safe_norm = np.where(norm_buf > 1e-8, norm_buf, 1.0) |
| recovered = out_buf / safe_norm |
| recovered = recovered[:L] + dc |
| outputs.append(recovered) |
|
|
| result = np.column_stack(outputs) |
| return result[:, 0] if mono else result |
|
|
|
|
|
|
| |
| |
| |
|
|
| class HybridSPADEInference: |
| """ |
| Hybrid audio delimiting inference. |
| |
| Architecture |
| ------------ |
| 1. LR crossover split at `crossover_hz` (default 8000 Hz). |
| Uses the same phase-perfect Butterworth HP = x − LP formula as v11 |
| `_lr_split`, ensuring lf + hf == x exactly (no energy loss or leakage). |
| |
| 2. HF band (≥ crossover_hz): |
| → ``spade_declip_v11._sspade_batch_gpu`` (GPU) or |
| ``spade_declip_v11.tight_sspade`` (CPU) |
| Algorithm is BYTE-FOR-BYTE identical to v11. Hard thresholding H_k |
| with progressive relaxation (k starts at hf_s, increments by hf_s |
| every hf_r iterations up to hf_max_iter). |
| |
| 3. LF band (< crossover_hz): |
| → ``SPADEUnrolledInference.process()`` — learned reconstruction. |
| ContextEncoder predicts per-frame lambda_lf, g_max, delta from the |
| K previous frames; UnrolledADMM applies K_unroll differentiable |
| soft-threshold layers. |
| |
| 4. Output = lf_recovered + hf_recovered. |
| |
| Rationale for 8 kHz crossover |
| ------------------------------ |
| v11 S-SPADE recovers HF transients (cymbal snap, hi-hat attack) well: |
| the DCT coefficients above 8 kHz are sparse and hard thresholding with |
| small k finds them reliably. Below 8 kHz (kick body, bass fundamental) |
| v11 under-recovers because: |
| • The "true" sparsity level k is content-dependent and poorly set |
| by the fixed s/r schedule in the time available (K_unroll layers). |
| • Tonal/sustain content is not globally sparse, so H_k wastes budget |
| zeroing low-energy HF coefficients instead of recovering LF energy. |
| The learned model addresses both issues via adaptive lambda_lf and g_max. |
| |
| Parameters |
| ---------- |
| model : trained SPADEUnrolled (loaded from checkpoint) |
| crossover_hz : LR crossover frequency (default 8000 Hz) |
| lf_delta_db : threshold for LF band mask detection (dB below ceiling) |
| lf_max_gain_db : gain cap for LF band recovery |
| lf_release_ms : mask dilation for LF band (limiter release smear) |
| hf_delta_db : threshold for HF band mask detection |
| hf_s : v11 sparsity step (k starts at hf_s, increments by hf_s) |
| hf_r : v11 sparsity relaxation period (k incremented every hf_r iter) |
| hf_eps : v11 convergence threshold |
| hf_max_iter : v11 max iterations per frame |
| hf_max_gain_db : v11 ratio-aware gain cap for HF band |
| hf_release_ms : v11 mask dilation for HF band |
| hf_window_length : v11 WOLA window for HF band (default 2048) |
| hf_hop_length : v11 WOLA hop for HF band (default 512) |
| device : 'cuda' | 'cpu' | 'auto' |
| batch_frames : GPU batch size for SPADEUnrolled LF processing |
| """ |
|
|
| def __init__( |
| self, |
| model: "SPADEUnrolled", |
| crossover_hz: float = 8000.0, |
| lf_delta_db: float = 1.5, |
| lf_max_gain_db: float = 6.0, |
| lf_release_ms: float = 0.0, |
| hf_delta_db: float = 1.5, |
| hf_s: int = 1, |
| hf_r: int = 1, |
| hf_eps: float = 0.05, |
| hf_max_iter: int = 500, |
| hf_max_gain_db: float = 6.0, |
| hf_release_ms: float = 0.0, |
| hf_window_length: int = 2048, |
| hf_hop_length: int = 512, |
| device: str = "auto", |
| batch_frames: int = 256, |
| ): |
| if device == "auto": |
| try: |
| import torch as _t |
| device = "cuda" if _t.cuda.is_available() else "cpu" |
| except ImportError: |
| device = "cpu" |
|
|
| self.model = model.to(device) |
| self.model.eval() |
| self.cfg = model.cfg |
|
|
| self.crossover_hz = crossover_hz |
| self.lf_delta_db = lf_delta_db |
| self.lf_max_gain_db = lf_max_gain_db |
| self.lf_release_ms = lf_release_ms |
| self.hf_delta_db = hf_delta_db |
| self.hf_s = hf_s |
| self.hf_r = hf_r |
| self.hf_eps = hf_eps |
| self.hf_max_iter = hf_max_iter |
| self.hf_max_gain_db = hf_max_gain_db |
| self.hf_release_ms = hf_release_ms |
| self.hf_window_length = hf_window_length |
| self.hf_hop_length = hf_hop_length |
| self.device = device |
| self.batch_frames = batch_frames |
|
|
| |
| self._lf_infer = SPADEUnrolledInference( |
| model, |
| delta_db = lf_delta_db, |
| max_gain_db = lf_max_gain_db, |
| device = device, |
| batch_frames = batch_frames, |
| ) |
|
|
| @staticmethod |
| def _lr_split( |
| x: np.ndarray, |
| crossover_hz: float, |
| sr: int, |
| ) -> "Tuple[np.ndarray, np.ndarray]": |
| """ |
| Phase-perfect Linkwitz-Riley crossover. lp + hp == x exactly. |
| Identical to spade_declip_v11._lr_split. |
| """ |
| from scipy.signal import butter, sosfiltfilt |
| fc = float(np.clip(crossover_hz, 1.0, sr / 2.0 - 1.0)) |
| sos = butter(2, fc, btype="low", fs=sr, output="sos") |
| lp = sosfiltfilt(sos, x) |
| hp = x - lp |
| return lp, hp |
|
|
| def _process_hf_band( |
| self, |
| hf_mono: np.ndarray, |
| sr: int, |
| ) -> np.ndarray: |
| """ |
| Run v11 S-SPADE on the HF band. Algorithm is identical to v11. |
| Imports lazily so spade_declip_v11 is only required at inference. |
| """ |
| try: |
| from spade_declip_v11 import ( |
| declip as _v11_declip, |
| DeclipParams as _V11Params, |
| ) |
| except ImportError: |
| raise ImportError( |
| "spade_declip_v11.py must be in the Python path for HF processing." |
| ) |
|
|
| params = _V11Params( |
| algo = "sspade", |
| frame = self.cfg.frame, |
| mode = "soft", |
| delta_db = self.hf_delta_db, |
| window_length = self.hf_window_length, |
| hop_length = self.hf_hop_length, |
| s = self.hf_s, |
| r = self.hf_r, |
| eps = self.hf_eps, |
| max_iter = self.hf_max_iter, |
| max_gain_db = self.hf_max_gain_db, |
| release_ms = self.hf_release_ms, |
| sample_rate = sr, |
| use_gpu = (self.device != "cpu"), |
| gpu_device = (self.device if self.device != "cpu" else "auto"), |
| show_progress = False, |
| verbose = False, |
| ) |
| fixed, _ = _v11_declip(hf_mono, params) |
| return fixed |
|
|
| @torch.no_grad() |
| def process( |
| self, |
| y_limited: np.ndarray, |
| sample_rate: int = 44100, |
| ) -> np.ndarray: |
| """ |
| y_limited : (N,) or (N, C) — limited audio at any sample rate |
| returns : (N,) or (N, C) — hybrid-recovered audio |
| |
| Pipeline per channel |
| -------------------- |
| 1. LR crossover split at self.crossover_hz |
| → lf_band (0 – crossover_hz) |
| → hf_band (crossover_hz – Nyquist) |
| 2. HF: spade_declip_v11 S-SPADE (identical algorithm, unchanged) |
| 3. LF: SPADEUnrolledInference (learned soft-threshold ADMM) |
| 4. hf_recovered + lf_recovered = full signal |
| """ |
| mono = y_limited.ndim == 1 |
| if mono: |
| y_limited = y_limited[:, None] |
| _, C = y_limited.shape |
| out_channels = [] |
|
|
| for ch in range(C): |
| yc = y_limited[:, ch].astype(np.float64) |
|
|
| |
| lf_band, hf_band = self._lr_split(yc, self.crossover_hz, sample_rate) |
|
|
| |
| hf_rec = self._process_hf_band(hf_band.astype(np.float64), sample_rate) |
|
|
| |
| lf_rec = self._lf_infer.process( |
| lf_band.astype(np.float32), sample_rate |
| ) |
|
|
| |
| L = min(len(lf_rec), len(hf_rec)) |
| combined = lf_rec[:L].astype(np.float64) + hf_rec[:L] |
| out_channels.append(combined) |
|
|
| result = np.column_stack(out_channels) |
| return result[:, 0] if mono else result |
|
|
|
|
| |
| |
| |
|
|
| def build_model(cfg: Optional[UnrolledConfig] = None) -> SPADEUnrolled: |
| """Construct a SPADEUnrolled model with default or custom config.""" |
| if cfg is None: |
| cfg = UnrolledConfig() |
| model = SPADEUnrolled(cfg) |
| n = model.parameter_count() |
| print(f"[SPADEUnrolled] Built model: {n:,} trainable parameters") |
| return model |
|
|
|
|
| |
| |
| |
|
|
| def _smoke_test(): |
| """Run a forward pass with random data to verify shapes and dtypes.""" |
| print("=" * 60) |
| print("SPADE Unrolled — Smoke Test") |
| print("=" * 60) |
|
|
| cfg = UnrolledConfig( |
| window_length=512, |
| hop_length=128, |
| K_unroll=4, |
| K_context=4, |
| n_mels=16, |
| gru_hidden=64, |
| gru_layers=1, |
| ) |
| model = build_model(cfg) |
| model.eval() |
|
|
| B = 4 |
| M = cfg.window_length |
| K = cfg.K_context |
|
|
| |
| yc = torch.randn(B, M) * 0.5 |
| ctx = torch.randn(B, K, M) * 0.5 |
| thresh = 0.3 |
| Ir = yc.abs() < thresh |
| Icp = yc >= thresh |
| Icm = yc <= -thresh |
|
|
| with torch.no_grad(): |
| x_hat, params, z_thresh, x_hat_mid = model(yc, ctx, Ir, Icp, Icm) |
|
|
| print(f" Input yc: {tuple(yc.shape)} dtype={yc.dtype}") |
| print(f" Output x_hat: {tuple(x_hat.shape)} dtype={x_hat.dtype}") |
| print(f" Params: {tuple(params.shape)} dtype={params.dtype}") |
| print(f" Param ranges:") |
| print(f" lambda_lf ∈ [{params[:,0].min():.4f}, {params[:,0].max():.4f}]") |
| print(f" lambda_hf ∈ [{params[:,1].min():.4f}, {params[:,1].max():.4f}]") |
| print(f" delta_fac ∈ [{params[:,2].min():.4f}, {params[:,2].max():.4f}]") |
| print(f" gmax_fac ∈ [{params[:,3].min():.4f}, {params[:,3].max():.4f}]") |
| print(f" eps_fac ∈ [{params[:,4].min():.4f}, {params[:,4].max():.4f}]") |
|
|
| |
| x_clean = yc + torch.randn_like(yc) * 0.1 |
| loss_fn = SPADEUnrolledLoss() |
| loss, details = loss_fn(x_hat, x_clean, yc, Ir, Icp, Icm) |
| print(f"\n Loss: {loss.item():.6f}") |
| for k, v in details.items(): |
| print(f" {k:12s}: {v:.6f}") |
|
|
| |
| model.train() |
| x_hat2, _, z2, xm2 = model(yc, ctx, Ir, Icp, Icm) |
| loss2, _ = loss_fn(x_hat2, x_clean, yc, Ir, Icp, Icm, z_thresh=z2, x_hat_mid=xm2) |
| loss2.backward() |
| grad_norms = { |
| name: p.grad.norm().item() |
| for name, p in model.named_parameters() |
| if p.grad is not None |
| } |
| print(f"\n Gradient norms (sample):") |
| for k, v in list(grad_norms.items())[:6]: |
| print(f" {k:40s}: {v:.6f}") |
|
|
| print("\n ✓ Smoke test passed.") |
|
|
|
|
| if __name__ == "__main__": |
| _smoke_test() |
|
|