mymodel / run_smart_sweep_old2.py
simone00's picture
Add files using upload-large-folder tool
17d4058 verified
"""
run_smart_sweep.py — S-SPADE · Bayesian parameter search (v2)
===================================================================
PIPELINE GROUND-TRUTH (Case 1 — threshold-based limiter)
---------------------------------------------------------
Il limiter sintetico è threshold-based:
- Originale normalizzato a 0 dBFS peak
- Limiter: attua solo sui picchi sopra la soglia → output max peak ≈ −threshold_db
- Il CORPO del segnale (loudness percepita) rimane invariato per definizione
- NON si applica nessun gain al segnale limitato dopo il processing
Allineamento per il calcolo residual:
Originale e limited sono già sulla stessa scala (loudness uguale, picchi diversi).
Nessuna normalizzazione LUFS / RMS necessaria o corretta.
GT_res = original_0dBFS − limited (scale identiche)
res_iter = spade_output − limited (idem)
Entrambi vengono poi normalizzati a RESIDUAL_DBFS peak SOLO per rendere
comparabili file con diversi livelli assoluti — non altera la logica.
Metrica ideale:
GT_res ≡ res_iter → cosine_sim = 1.0 → differenza = −∞ dB
Ottimizzatore: Optuna TPE (Bayesian) + MedianPruner
Storage: SQLite (riprendibile con --resume)
Corpus: tutti i drum sample in Kicks / Snares / Perc / Tops
DIPENDENZE
----------
pip install numpy scipy soundfile optuna rich
(pyloudnorm NON necessario)
USO
---
python run_smart_sweep.py # 200 trial
python run_smart_sweep.py --trials 50 # test rapido
python run_smart_sweep.py --resume # riprende da DB
python run_smart_sweep.py --report # solo risultati
python run_smart_sweep.py --base-dir /path/SPADE # cartella custom
"""
import argparse
import logging
import sys
import time
import warnings
from pathlib import Path
from typing import Dict, List, Optional
import numpy as np
import scipy.signal as sig
import soundfile as sf
logging.getLogger("optuna").setLevel(logging.WARNING)
# ── optuna ───────────────────────────────────────────────────────────────────
try:
import optuna
from optuna.samplers import TPESampler
from optuna.pruners import MedianPruner
_HAS_OPTUNA = True
except ImportError:
_HAS_OPTUNA = False
warnings.warn("optuna non trovato — pip install optuna")
# ── rich ─────────────────────────────────────────────────────────────────────
try:
from rich.console import Console
from rich.table import Table
_console = Console()
_HAS_RICH = True
except ImportError:
_HAS_RICH = False
_console = None
# ── spade_declip ─────────────────────────────────────────────────────────────
try:
from spade_declip_v12 import declip, DeclipParams
_HAS_SPADE = True
except ImportError:
_HAS_SPADE = False
warnings.warn("spade_declip_v12.py non trovato")
# =============================================================================
# CONFIG
# =============================================================================
DRUM_DIRS = ["Kicks", "Snares", "Perc", "Tops"]
# ── Limiter sintetico ─────────────────────────────────────────────────────────
# Case 1: threshold-based.
# Originale @ 0 dBFS peak → limiter attua sui picchi > soglia →
# output max peak ≈ −LIMITER_THRESHOLD_DB dBFS, loudness invariata.
# NON si tocca il segnale limitato con nessun gain dopo.
LIMITER_THRESHOLD_DB = 3.0 # dB sotto il ceiling (positivo)
LIMITER_RELEASE_MS = 80.0 # release del limiter sintetico (ms)
# attack = 1 campione → brickwall vero
# Normalizzazione residual — SOLO per comparabilità cross-file.
# Scala entrambi GT e iter identicamente, quindi non altera il confronto.
RESIDUAL_DBFS = -3.0
# ── Rumore rosa di sottofondo ─────────────────────────────────────────────────
# Simula un sottofondo musicale sotto il transiente di batteria.
# Viene mixato al sample (già a 0 dBFS peak) PRIMA del limiter.
# Questo assicura che:
# - il limiter agisca sul segnale realistico drum + music background
# - SPADE riceva lo stesso mix e debba lavorare in condizioni realistiche
# - GT_res = (drum+noise) − limiter(drum+noise) rifletta la situazione reale
# Livello relativo al peak del drum sample. −20 dB = sottofondo ben sotto
# il transiente, udibile ma non dominante (come un kick su un loop di batteria).
PINK_NOISE_LEVEL_DB = -20.0 # dB rel. al peak del drum (negativo = sotto)
# Optuna
STUDY_NAME = "spade_smart_v2_thr3db"
OUT_CSV = "smart_sweep_results.csv"
# Parametri FISSI del solver SPADE (invarianti tra tutti i trial)
FIXED_SOLVER = dict(
algo = "sspade",
frame = "rdft",
mode = "soft",
s = 1,
r = 1,
n_jobs = 1,
verbose = False,
show_progress = False,
use_gpu = True,
# multiband e macro_expand sono nello spazio di ricerca
)
# Crossover multiband (fisso per comparabilita' tra trial)
# 250 Hz separa: LF=corpo/punch del kick | HF=transiente/attacco
BAND_CROSSOVER_HZ = 250.0
# =============================================================================
# HELPERS
# =============================================================================
def ensure_2d(a: np.ndarray) -> np.ndarray:
return a[:, None] if a.ndim == 1 else a
def normalize_to_0dBFS(a: np.ndarray) -> np.ndarray:
"""Scala a 0 dBFS peak — usato solo sull'originale come riferimento comune."""
pk = np.max(np.abs(a))
return a / pk if pk > 1e-12 else a
def normalize_peak(a: np.ndarray, target_dbfs: float) -> np.ndarray:
"""
Scala a target_dbfs dBFS peak.
Usato SOLO sui residual per comparabilità cross-file;
non altera la logica perché GT e iter vengono scalati identicamente.
"""
pk = np.max(np.abs(a))
return a * (10 ** (target_dbfs / 20.0) / pk) if pk > 1e-12 else a
def generate_pink_noise(n_samples: int, n_channels: int, rng: np.random.Generator) -> np.ndarray:
"""
Genera rumore rosa (1/f) tramite filtro IIR di Voss-McCartney (approssimazione
a 5 poli, accurata entro ±1 dB nel range 20 Hz – 20 kHz).
Output: shape (n_samples, n_channels), RMS normalizzato a 1.0 (prima
del mix-in con PINK_NOISE_LEVEL_DB, che controlla il livello finale).
Algoritmo: rumore bianco filtrato con H(z) = 1 / A(z) dove i coefficienti
sono ottimizzati per approssimare una densità spettrale 1/f.
"""
# Coefficienti del filtro IIR a 5 poli (Voss approssimazione)
# Poli reali, tutti stabili (|p| < 1)
b = np.array([0.049922035, -0.095993537, 0.050612699, -0.004408786])
a = np.array([1.0, -2.494956002, 2.017265875, -0.522189400])
out = np.empty((n_samples, n_channels))
for c in range(n_channels):
white = rng.standard_normal(n_samples)
pink = sig.lfilter(b, a, white)
rms = np.sqrt(np.mean(pink ** 2))
out[:, c] = pink / (rms + 1e-12) # RMS = 1.0
return out
def mix_pink_noise(
audio_0dBFS: np.ndarray,
sr: int,
level_db: float,
rng: np.random.Generator,
) -> np.ndarray:
"""
Mixa rumore rosa nel segnale a un livello relativo al suo peak.
level_db < 0 → il rumore è sotto il peak del drum (es. −20 dB)
Il rumore dura quanto il sample; se il sample è stereo, il rumore è stereo
(canali indipendenti → decorrelato come un vero fondo musicale).
Il segnale in uscita può superare 0 dBFS di qualche frazione di dB: è
corretto, il limiter che segue si occupa di riportarlo sotto la soglia.
"""
audio = ensure_2d(audio_0dBFS)
N, C = audio.shape
noise = generate_pink_noise(N, C, rng) # RMS = 1.0 per canale
# Scala il rumore al livello desiderato rispetto al peak del drum
peak = np.max(np.abs(audio))
gain = peak * (10 ** (level_db / 20.0)) # gain lineare assoluto
mixed = audio + noise * gain
# NON normalizziamo qui: la normalizzazione a 0 dBFS avviene in build_corpus
# subito dopo, su tutto il mix (drum + noise), prima di qualsiasi altra op.
return mixed[:, 0] if audio_0dBFS.ndim == 1 else mixed
# =============================================================================
# LIMITER SINTETICO (Case 1 — threshold-based, brickwall, 1-campione attack)
# =============================================================================
def apply_brickwall_limiter(
audio_0dBFS: np.ndarray,
sr: int,
threshold_db: float = LIMITER_THRESHOLD_DB,
release_ms: float = LIMITER_RELEASE_MS,
) -> np.ndarray:
"""
Brickwall limiter threshold-based.
Input: audio_0dBFS — già a 0 dBFS peak, shape (N,) o (N, C)
Output: segnale limitato, stessa shape — NON boosted, NON clippato
Gain envelope:
se |x[n]| > threshold_lin → target_gain = threshold_lin / |x[n]|
altrimenti → target_gain = 1.0
Attack : istantaneo (1 campione, true brickwall)
Release: esponenziale con costante release_ms
Post-processing: NESSUNO.
Il segnale in uscita ha max peak ≈ −threshold_db dBFS.
La loudness percepita è invariata rispetto all'input.
"""
thr_lin = 10 ** (-abs(threshold_db) / 20.0)
rc = np.exp(-1.0 / max(release_ms * sr / 1000.0, 1e-9))
audio = ensure_2d(audio_0dBFS).copy()
N, C = audio.shape
out = np.empty_like(audio)
for c in range(C):
ch = audio[:, c]
env = 1.0
g = np.empty(N)
for n in range(N):
pk = abs(ch[n])
target = thr_lin / pk if pk > thr_lin else 1.0
# attack istantaneo se gain scende, release esponenziale se risale
env = target if target < env else rc * env + (1.0 - rc) * target
g[n] = env
out[:, c] = ch * g
# Restituisce stessa shape dell'input
return out[:, 0] if audio_0dBFS.ndim == 1 else out
# =============================================================================
# COSINE SIMILARITY TF
# =============================================================================
def cosine_sim_tf(
gt: np.ndarray,
est: np.ndarray,
sr: int,
win_samples: int = 1024,
hop_samples: int = 256,
n_bands: int = 12,
) -> float:
"""
Similarità coseno media su micro-finestre tempo-frequenziali.
Input: entrambi già a RESIDUAL_DBFS peak.
Output: scalare in [0, 1]. Target ideale = 1.0.
"""
L = min(gt.shape[0], est.shape[0])
g = (gt[:L, 0] if gt.ndim == 2 else gt[:L]).copy()
e = (est[:L, 0] if est.ndim == 2 else est[:L]).copy()
win = min(win_samples, max(32, L // 4))
hop = min(hop_samples, win // 2)
if L < win or win < 32:
denom = np.linalg.norm(g) * np.linalg.norm(e) + 1e-12
return float(np.dot(g, e) / denom)
_, _, Zg = sig.stft(g, fs=sr, window="hann",
nperseg=win, noverlap=win - hop,
boundary=None, padded=False)
_, _, Ze = sig.stft(e, fs=sr, window="hann",
nperseg=win, noverlap=win - hop,
boundary=None, padded=False)
n_freqs, n_frames = Zg.shape
if n_frames == 0:
return float(np.dot(g, e) / (np.linalg.norm(g) * np.linalg.norm(e) + 1e-12))
edges = np.unique(np.round(
np.logspace(0, np.log10(max(n_freqs, 2)), min(n_bands, n_freqs) + 1)
).astype(int))
edges = np.clip(edges, 0, n_freqs)
sims = []
for i in range(len(edges) - 1):
f0, f1 = int(edges[i]), int(edges[i + 1])
if f1 <= f0:
continue
Mg = np.abs(Zg[f0:f1, :])
Me = np.abs(Ze[f0:f1, :])
dot = np.sum(Mg * Me, axis=0)
norm_g = np.sqrt(np.sum(Mg ** 2, axis=0)) + 1e-12
norm_e = np.sqrt(np.sum(Me ** 2, axis=0)) + 1e-12
sims.extend((dot / (norm_g * norm_e)).tolist())
return float(np.mean(sims)) if sims else 0.0
# =============================================================================
# CORPUS
# =============================================================================
def build_corpus(base_dir: Path, max_files: Optional[int] = None) -> List[Dict]:
"""
Per ogni drum sample:
1. Carica e normalizza a 0 dBFS peak (riferimento comune cross-file)
2. Mixa rumore rosa a PINK_NOISE_LEVEL_DB rel. al peak ← NUOVO
Il mix avviene in float (può temporaneamente superare 0 dBFS)
3. Normalizza il mix (drum + noise) a 0 dBFS peak
Riferimento comune prima di tutta la pipeline successiva
4. Applica limiter sintetico su (drum + noise) normalizzato → limited
4. GT_res_raw = (drum + noise) − limited (stessa scala, nessun gain)
5. Scarta file dove il limiter non interviene
6. Normalizza GT_res a RESIDUAL_DBFS (solo comparabilità cross-file)
Il rumore è riproducibile: ogni file usa un seed deterministico derivato
dal suo indice nel corpus, così i trial sono comparabili tra loro.
"""
corpus = []
extensions = {".wav", ".flac", ".aif", ".aiff"}
file_index = 0 # usato per seed deterministico del rumore
for folder in DRUM_DIRS:
d = base_dir / folder
if not d.exists():
print(f" [WARN] Cartella non trovata: {d}")
continue
for f in sorted(d.glob("*")):
if f.suffix.lower() not in extensions:
continue
try:
audio, sr = sf.read(str(f), always_2d=True)
audio = audio.astype(float)
except Exception as exc:
print(f" [WARN] {f.name}: {exc}")
continue
if audio.shape[0] < 64:
continue
# 1. 0 dBFS peak
orig = normalize_to_0dBFS(audio)
# 2. Mix rumore rosa — seed deterministico per riproducibilità
rng = np.random.default_rng(seed=file_index)
orig_with_noise = ensure_2d(mix_pink_noise(orig, sr,
PINK_NOISE_LEVEL_DB, rng))
file_index += 1
# 3. Normalizza il mix a 0 dBFS peak — riferimento comune prima
# di tutta la pipeline. Il mix in float può aver superato 0 dBFS;
# questa normalizzazione azzera il problema prima del limiter.
orig_with_noise = ensure_2d(normalize_to_0dBFS(orig_with_noise))
# 4. Limiter sintetico su (drum + noise) @0dBFS — nessun gain dopo
limited = ensure_2d(apply_brickwall_limiter(orig_with_noise, sr))
# 5. Residual grezzo — stessa scala, zero aggiustamenti
gt_res_raw = orig_with_noise - limited
# 6. Verifica attività del limiter
if np.max(np.abs(gt_res_raw)) < 1e-6:
print(f" [SKIP] {f.name} — picco sotto la soglia, limiter inattivo")
continue
# 7. Normalizza a RESIDUAL_DBFS solo per comparabilità cross-file
gt_res = normalize_peak(gt_res_raw, RESIDUAL_DBFS)
corpus.append({
"file" : f.name,
"sr" : sr,
"limited" : limited, # input a SPADE = drum + noise + limiter
"gt_res" : gt_res, # target residual
})
if max_files and len(corpus) >= max_files:
return corpus
return corpus
# =============================================================================
# VALUTAZIONE SINGOLO FILE
# =============================================================================
def evaluate_one(item: Dict, params: dict) -> Optional[float]:
"""
Esegue SPADE su limited, calcola il residual e lo confronta con GT.
params contiene parametri SPADE puri + flag di alto livello:
multiband (bool) -- split LF/HF, elabora separatamente
macro_expand (bool) -- envelope pre-pass per recupero corpo LF
macro_ratio (float) -- rapporto espansione (1.0 = bypass)
lf_delta_db (float) -- delta_db per banda LF (<= BAND_CROSSOVER_HZ)
il delta_db standard e' usato per la banda HF
lf_cutoff_hz (float) -- v12: Hz sotto cui riservare bin LF (0 = off)
lf_k_min (int) -- v12: slot LF garantiti per iterazione ADMM
"""
try:
sr = item["sr"]
limited = item["limited"].copy()
gt_res = item["gt_res"]
# Estrai flag di alto livello (non sono parametri DeclipParams diretti)
p2 = dict(params) # copia per non mutare l'originale
multiband = p2.pop("multiband", False)
macro_expand = p2.pop("macro_expand", False)
macro_ratio = p2.pop("macro_ratio", 1.0)
lf_delta_db = p2.pop("lf_delta_db", p2.get("delta_db", 1.5))
# v12: stratified thresholding params — passati direttamente a DeclipParams
# (già nel dict p2, non richiedono pop separato)
spade_kw = dict(
multiband = multiband,
macro_expand = macro_expand,
macro_ratio = macro_ratio if macro_expand else 1.0,
macro_release_ms = 200.0,
macro_attack_ms = 10.0,
)
if multiband:
spade_kw["band_crossovers"] = (BAND_CROSSOVER_HZ,)
spade_kw["band_delta_db"] = (lf_delta_db, p2["delta_db"])
p = DeclipParams(sample_rate=sr, **FIXED_SOLVER, **p2, **spade_kw)
fixed, _ = declip(limited, p)
fixed_2d = ensure_2d(fixed)
# Residual generato — stessa scala dell'input, nessun gain
res_raw = fixed_2d - limited
res_iter = normalize_peak(res_raw, RESIDUAL_DBFS)
return cosine_sim_tf(gt_res, res_iter, sr)
except Exception as exc:
warnings.warn(f"evaluate_one ({item['file']}): {exc}")
return None
# =============================================================================
# OBIETTIVO OPTUNA
# =============================================================================
def make_objective(corpus: List[Dict]):
def objective(trial: "optuna.Trial") -> float:
# ── Parametri core ────────────────────────────────────────────────
delta_db = trial.suggest_float("delta_db", 1.5, 3.5, step=0.05)
win_exp = trial.suggest_int ("win_exp", 9, 11)
win = 2 ** win_exp
hop_div = trial.suggest_categorical("hop_div", [4, 8])
hop = win // hop_div
rel_ms = trial.suggest_float("release_ms", 10.0, 200.0, step=5.0)
gain_db = trial.suggest_float("max_gain_db", 2.0, 12.0, step=0.5)
eps = trial.suggest_categorical("eps", [0.03, 0.05, 0.1])
max_iter = trial.suggest_categorical("max_iter", [250, 500, 1000])
# ── Multiband + Macro expand ────────────────────────────────────────
# SPAZIO STATICO: lf_delta_db e macro_ratio vengono SEMPRE campionati
# dal TPE (spazio fisso) e poi usati condizionalmente a runtime.
# Questo elimina il fallback a RandomSampler che degradava le performance
# del TPE multivariate con spazi dinamici.
multiband = trial.suggest_categorical("multiband", [False, True])
macro_expand = trial.suggest_categorical("macro_expand", [False, True])
# Sempre campionati (range fisso), usati solo se il flag e' True:
lf_delta_db = trial.suggest_float("lf_delta_db", 0.5, 2.0, step=0.05)
macro_ratio = trial.suggest_float("macro_ratio", 1.1, 2.0, step=0.05)
# ── v12: frequency-stratified thresholding ─────────────────────────
# lf_cutoff_hz: soglia in Hz che separa i bin "LF garantiti" dagli HF.
# Con M=512, sr=44100: bin_k = k * sr / (2M) → lf_cutoff=1000Hz → 23 bin LF.
# lf_k_min: quanti di quei bin sono garantiti per ogni iterazione ADMM.
# 0 = disabilitato (comportamento identico a v11).
lf_cutoff_hz = trial.suggest_categorical("lf_cutoff_hz", [0.0, 500.0, 1000.0, 2000.0])
lf_k_min = trial.suggest_int("lf_k_min", 0, 16)
# Nota: quando lf_cutoff_hz=0 oppure lf_k_min=0, la feature e' disabilitata.
# Il TPE impara autonomamente quando conviene attivarla.
# Se multiband=False, lf_delta_db viene ignorato in evaluate_one.
# Se macro_expand=False, macro_ratio viene ignorato in evaluate_one.
params = dict(
delta_db = delta_db,
window_length = win,
hop_length = hop,
release_ms = rel_ms,
max_gain_db = gain_db,
eps = eps,
max_iter = max_iter,
# flag di alto livello (estratti in evaluate_one, non passati raw)
multiband = multiband,
lf_delta_db = lf_delta_db,
macro_expand = macro_expand,
macro_ratio = macro_ratio,
# v12: passati direttamente a DeclipParams (non estratti in evaluate_one)
lf_cutoff_hz = lf_cutoff_hz,
lf_k_min = lf_k_min,
)
scores = []
# ── Shuffle per-trial con seed riproducibile ──────────────────────
# Ogni trial vede il corpus in ordine diverso per evitare che i file
# in coda siano sistematicamente ignorati dal pruner (che valuta al
# midpoint) e che l'ottimizzatore sviluppi un bias sull'ordine fisso.
# Il seed è deterministico (trial.number) → riproducibile con --resume.
rng_shuffle = np.random.default_rng(trial.number)
shuffled_corpus = rng_shuffle.permutation(len(corpus)).tolist()
midpoint = len(corpus) // 2
for step, idx in enumerate(shuffled_corpus):
item = corpus[idx]
sc = evaluate_one(item, dict(params)) # dict() per non mutare params
if sc is not None:
scores.append(sc)
if step == midpoint and scores:
trial.report(float(np.mean(scores)), step=step)
if trial.should_prune():
raise optuna.TrialPruned()
if not scores:
return 0.0
mean_score = float(np.mean(scores))
trial.report(mean_score, step=len(corpus))
return mean_score
return objective
# =============================================================================
# REPORT + CSV
# =============================================================================
def print_report(study: "optuna.Study", top_n: int = 20):
trials = sorted(
[t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE],
key=lambda t: t.value or 0, reverse=True,
)
if not trials:
print("Nessun trial completato.")
return
if _HAS_RICH:
_console.rule("[bold cyan]RISULTATI SWEEP BAYESIANO[/]")
tbl = Table(show_header=True, header_style="bold cyan", show_lines=False)
for col, w in [("#",4),("score",9),("ddb",6),("LFd",5),("win",6),
("hop",4),("rel",6),("gain",6),("eps",5),("iter",5),
("MB",3),("ME",3),("MR",5),("LFcut",6),("LFk",4)]:
tbl.add_column(col, justify="right", width=w)
for rank, t in enumerate(trials[:top_n], 1):
p = t.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
mb = "Y" if p.get("multiband") else "n"
me = "Y" if p.get("macro_expand") else "n"
lfc = p.get("lf_cutoff_hz", 0.0)
lfk = p.get("lf_k_min", 0)
sty = "bold green" if rank == 1 else ("yellow" if rank <= 3 else "")
tbl.add_row(
str(rank), f"{t.value:.5f}",
f"{p['delta_db']:.2f}",
f"{p.get('lf_delta_db', p['delta_db']):.2f}",
str(win), str(hop),
f"{p['release_ms']:.0f}", f"{p['max_gain_db']:.1f}",
str(p['eps']), str(p['max_iter']),
mb, me, f"{p.get('macro_ratio', 1.0):.2f}",
f"{lfc:.0f}", str(lfk),
style=sty,
)
_console.print(tbl)
else:
hdr = (f"{'#':>3} {'score':>8} {'ddb':>5} {'LFd':>5} {'win':>5}"
f" {'hop':>4} {'rel':>6} {'gain':>5} {'eps':>5} {'iter':>5}"
f" {'MB':>3} {'ME':>3} {'MR':>5} {'LFcut':>6} {'LFk':>4}")
print(hdr); print("-" * len(hdr))
for rank, t in enumerate(trials[:top_n], 1):
p = t.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
mb = "Y" if p.get("multiband") else "n"
me = "Y" if p.get("macro_expand") else "n"
lfc = p.get("lf_cutoff_hz", 0.0)
lfk = p.get("lf_k_min", 0)
print(f"{rank:>3} {t.value:>8.5f} {p['delta_db']:>5.2f}"
f" {p.get('lf_delta_db', p['delta_db']):>5.2f} {win:>5}"
f" {hop:>4} {p['release_ms']:>6.0f} {p['max_gain_db']:>5.1f}"
f" {str(p['eps']):>5} {p['max_iter']:>5}"
f" {mb:>3} {me:>3} {p.get('macro_ratio', 1.0):>5.2f}"
f" {lfc:>6.0f} {lfk:>4}")
best = trials[0]
p = best.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
n_pruned = sum(1 for t in study.trials
if t.state == optuna.trial.TrialState.PRUNED)
print("\n" + "═" * 60)
print("CONFIG OTTIMALE")
print("═" * 60)
print(f"""
params = DeclipParams(
algo = "sspade",
frame = "rdft",
mode = "soft",
delta_db = {p['delta_db']:.2f},
window_length = {win},
hop_length = {hop},
release_ms = {p['release_ms']:.1f},
max_gain_db = {p['max_gain_db']:.1f},
eps = {p['eps']},
max_iter = {p['max_iter']},
sample_rate = sr,
multiband = {p.get('multiband', False)},
band_crossovers = ({BAND_CROSSOVER_HZ},),
band_delta_db = ({p.get('lf_delta_db', p['delta_db']):.2f}, {p['delta_db']:.2f}),
macro_expand = {p.get('macro_expand', False)},
macro_ratio = {p.get('macro_ratio', 1.0):.2f},
lf_cutoff_hz = {p.get('lf_cutoff_hz', 0.0):.1f}, # v12
lf_k_min = {p.get('lf_k_min', 0)}, # v12
n_jobs = -1,
show_progress = True,
)""")
print(f"\n→ Best score : {best.value:.5f}")
print(f" Trials done : {len(trials)}")
print(f" Pruned : {n_pruned}")
# =============================================================================
# DEBUG EXPORT
# =============================================================================
# Parametri SPADE usati per il debug (best noti dal grid sweep precedente).
# Se un DB Optuna esiste e ha trial completati, vengono sostituiti dal best.
DEBUG_PARAMS = dict(
delta_db = 1.5,
window_length = 1024,
hop_length = 256,
release_ms = 100.0,
max_gain_db = 6.0,
eps = 0.05,
max_iter = 500,
)
def _pk_dbfs(a: np.ndarray) -> float:
pk = float(np.max(np.abs(a)))
return 20.0 * np.log10(pk) if pk > 1e-12 else -999.0
def _rms_dbfs(a: np.ndarray) -> float:
rms = float(np.sqrt(np.mean(a.astype(float) ** 2)))
return 20.0 * np.log10(rms) if rms > 1e-12 else -999.0
def _write_wav(path: Path, audio: np.ndarray, sr: int) -> None:
"""Scrive WAV float32 senza clipping. Avvisa se peak > 1.0."""
a2d = ensure_2d(audio).astype(np.float32)
pk = float(np.max(np.abs(a2d)))
if pk > 1.0:
print(f" [WARN] {path.name}: peak={pk:.4f} > 1.0 "
f"(+{20*np.log10(pk):.2f} dBFS) — float32, non clippato")
sf.write(str(path), a2d, sr, subtype="FLOAT")
def debug_export(
corpus: list,
base_dir: Path,
out_dir: Path,
n_files: int,
spade_params: dict,
) -> None:
"""
Esporta WAV di debug per i primi n_files item del corpus.
Per ogni file vengono scritti 6 WAV float32:
01_orig_with_noise drum + pink noise, normalizzato a 0 dBFS peak
(segnale prima del limiter)
02_limited uscita del limiter sintetico (input a SPADE)
03_gt_residual orig_with_noise - limited, @RESIDUAL_DBFS peak
04_spade_output uscita SPADE (float32, puo' superare 0 dBFS)
05_res_iter spade_output - limited, @RESIDUAL_DBFS peak
06_diff_residuals gt_residual - res_iter
ideale = silenzio = -inf dB
Stampa una tabella con peak dBFS e RMS dBFS per ogni traccia.
Livelli ATTESI:
01 peak = 0.00 dBFS (normalizzato)
02 peak ~ -LIMITER_THRESHOLD_DB dBFS (es. -1.5 dBFS)
03 peak = RESIDUAL_DBFS (es. -3.0 dBFS)
04 peak puo' essere > 0 dBFS (transiente recuperato)
05 peak = RESIDUAL_DBFS (es. -3.0 dBFS)
06 peak << 0 dBFS (piu' basso = SPADE piu' vicino al GT)
"""
out_dir.mkdir(parents=True, exist_ok=True)
items = corpus[:n_files]
col_w = max(len(it["file"]) for it in items) + 2
HDR = (f" {'file':<{col_w}} {'traccia':<22}"
f" {'peak dBFS':>10} {'RMS dBFS':>9} note")
SEP = " " + "-" * (len(HDR) - 2)
print()
if _HAS_RICH:
_console.rule("[bold cyan]DEBUG EXPORT[/]")
else:
print("=" * 65)
print("DEBUG EXPORT")
print("=" * 65)
print(f" Output dir : {out_dir}")
print(f" SPADE params : delta_db={spade_params['delta_db']}"
f" win={spade_params['window_length']}"
f" hop={spade_params['hop_length']}"
f" rel={spade_params['release_ms']}ms"
f" gain={spade_params['max_gain_db']}dB")
print(f" File esportati: {len(items)}")
print()
print(f" Livelli attesi:")
print(f" 01_orig_with_noise : ~ 0.00 dBFS (normalizzato prima del limiter)")
print(f" 02_limited : ~ {-LIMITER_THRESHOLD_DB:+.2f} dBFS (uscita limiter)")
print(f" 03_gt_residual : = {RESIDUAL_DBFS:+.2f} dBFS (normalizzato)")
print(f" 04_spade_output : > 0 dBFS possibile (transiente recuperato)")
print(f" 05_res_iter : = {RESIDUAL_DBFS:+.2f} dBFS (normalizzato)")
print(f" 06_diff_residuals : << 0 dBFS (piu' basso = pipeline piu' corretta)")
print()
print(HDR)
diff_peaks = []
for file_index, item in enumerate(items):
sr = item["sr"]
limited = item["limited"].copy()
gt_res = item["gt_res"]
stem = Path(item["file"]).stem
# ── Ricostruisci orig_with_noise ──────────────────────────────────
# Riesegue la stessa pipeline di build_corpus con il seed identico
orig_with_noise = None
for folder in DRUM_DIRS:
candidate = base_dir / folder / item["file"]
if candidate.exists():
try:
raw, _ = sf.read(str(candidate), always_2d=True)
raw = raw.astype(float)
rng = np.random.default_rng(seed=file_index)
orig_0 = normalize_to_0dBFS(raw)
mixed = ensure_2d(mix_pink_noise(orig_0, sr,
PINK_NOISE_LEVEL_DB, rng))
orig_with_noise = ensure_2d(normalize_to_0dBFS(mixed))
except Exception:
pass
break
if orig_with_noise is None:
# Fallback: ricostruiamo da limited + gt_res (approssimazione)
gt_scale = 10 ** (RESIDUAL_DBFS / 20.0) # peak di gt_res
lim_peak = 10 ** (-LIMITER_THRESHOLD_DB / 20.0) # peak atteso del limited
gt_raw = gt_res * (lim_peak / (gt_scale + 1e-12))
orig_with_noise = ensure_2d(normalize_to_0dBFS(limited + gt_raw))
# ── Esegui SPADE ──────────────────────────────────────────────────
try:
p = DeclipParams(sample_rate=sr, **FIXED_SOLVER, **spade_params)
fixed, _ = declip(limited.copy(), p)
fixed_2d = ensure_2d(fixed)
except Exception as exc:
print(f" [ERRORE SPADE] {item['file']}: {exc}")
continue
# ── Residual iterazione (scala RAW, senza normalizzazione) ───────────
# IMPORTANTE: il diff deve avvenire sulla scala comune PRIMA di
# normalizzare i due residual, altrimenti la normalizzazione
# indipendente rimuove l'informazione di ampiezza relativa.
#
# gt_res e res_raw sono entrambi derivati dallo stesso limited →
# hanno la stessa scala di riferimento.
# gt_res e' gia' stato normalizzato a RESIDUAL_DBFS in build_corpus;
# dobbiamo riportarlo alla scala raw per il confronto.
#
# Scala comune: usiamo il peak del limited come riferimento.
# limited peak ≈ 10^(-LIMITER_THRESHOLD_DB/20) → scala assoluta nota.
res_raw = fixed_2d - limited # residual SPADE in scala assoluta
# gt_res_raw: ricostruiamo dalla scala normalizzata
# gt_res = gt_res_raw / peak(gt_res_raw) * 10^(RESIDUAL_DBFS/20)
# → gt_res_raw = gt_res * peak(gt_res_raw) / 10^(RESIDUAL_DBFS/20)
# Poiche' peak(gt_res_raw) non e' salvato, lo stimiamo:
# gt_res_raw ≈ orig_with_noise - limited (ricostruito)
gt_res_raw_approx = ensure_2d(orig_with_noise) - limited
L = min(gt_res_raw_approx.shape[0], res_raw.shape[0])
# ── Diff sulla scala comune (raw, non normalizzata) ───────────────
diff_raw = gt_res_raw_approx[:L] - res_raw[:L]
# ── Cosine similarity temporale (scalare, sul canale L) ──────────
g_flat = gt_res_raw_approx[:L, 0] if gt_res_raw_approx.ndim == 2 else gt_res_raw_approx[:L]
e_flat = res_raw[:L, 0] if res_raw.ndim == 2 else res_raw[:L]
cos_sim_td = float(
np.dot(g_flat, e_flat) /
(np.linalg.norm(g_flat) * np.linalg.norm(e_flat) + 1e-12)
)
# ── Stima floor teorico del diff dovuto al rumore rosa ────────────
# Il limiter attenue anche i picchi del rumore rosa → quella parte
# sta nel GT_res ma NON in res_iter (SPADE non la recupera).
# Stimiamo quanto rumore e' nel GT_res come proxy del floor.
noise_gain_lin = 10 ** (PINK_NOISE_LEVEL_DB / 20.0)
# Ampiezza del rumore rispetto al limited: noise_gain ≈ fraction
# del GT_res che e' irrecuperabile da SPADE.
noise_floor_db = 20 * np.log10(noise_gain_lin + 1e-12) + RESIDUAL_DBFS
# In pratica: diff non puo' essere < noise_floor per costruzione.
# ── diff dBFS relativo al GT_res (SNR-like) ───────────────────────
diff_rms_db = _rms_dbfs(diff_raw[:L])
gt_rms_db = _rms_dbfs(gt_res_raw_approx[:L])
# diff_vs_gt: quanto e' grande il diff rispetto al GT (0 dB = diff = GT)
diff_vs_gt_db = diff_rms_db - gt_rms_db # piu' negativo = meglio
# Normalizza per l'export WAV
res_iter = normalize_peak(res_raw, RESIDUAL_DBFS)
diff_norm = normalize_peak(diff_raw, RESIDUAL_DBFS) if np.max(np.abs(diff_raw)) > 1e-12 else diff_raw
diff_peaks.append((diff_vs_gt_db, cos_sim_td, diff_rms_db, gt_rms_db))
# ── Definizione tracce ────────────────────────────────────────────
tracks = [
("01_orig_with_noise",
orig_with_noise,
f"drum+noise @0dBFS (input pipeline)"),
("02_limited",
limited,
f"uscita limiter (input SPADE) atteso: ~{-LIMITER_THRESHOLD_DB:+.2f}dBFS"),
("03_gt_residual",
gt_res,
f"GT residual @{RESIDUAL_DBFS:.0f}dBFS (include noise attenuation)"),
("04_spade_output",
fixed_2d,
f"SPADE output (float32, puo' >0dBFS)"),
("05_res_iter",
res_iter,
f"residual SPADE @{RESIDUAL_DBFS:.0f}dBFS (solo componente sparsa)"),
("06_diff_residuals",
diff_norm,
f"GT - iter @{RESIDUAL_DBFS:.0f}dBFS "
f"cos_sim={cos_sim_td:.3f} diff/GT={diff_vs_gt_db:+.1f}dB "
f"noise_floor≈{noise_floor_db:+.1f}dB"),
]
# ── Soglia realistica per il diff ─────────────────────────────────
# Il diff non puo' essere < noise_floor per costruzione del corpus.
# Calibriamo la soglia [OK] a noise_floor + 6 dB (margine).
ok_threshold = noise_floor_db + 6.0 # tipicamente attorno a -17 dBFS
warn_threshold = ok_threshold + 10.0 # tutto sopra e' davvero anomalo
# ── Stampa tabella + scrivi WAV ───────────────────────────────────
print(SEP)
for track_name, audio, note in tracks:
pk = _pk_dbfs(audio)
rms = _rms_dbfs(audio)
flag = ""
if track_name == "06_diff_residuals":
if diff_vs_gt_db < -12: flag = "[OK] buona convergenza"
elif diff_vs_gt_db < -6: flag = "[~] convergenza parziale"
else: flag = "[WARN] diff elevato rispetto al GT"
row = (f" {item['file']:<{col_w}} {track_name:<22}"
f" {pk:>+10.2f} {rms:>+9.2f} {note} {flag}")
if _HAS_RICH:
color = ("green" if "[OK]" in flag else
"yellow" if "[~]" in flag else
"red" if "[WARN]" in flag else "")
colored_row = row.replace(flag, f"[{color or 'dim'}]{flag}[/]") if flag else row
_console.print(colored_row)
else:
print(row)
wav_path = out_dir / f"{stem}__{track_name}.wav"
_write_wav(wav_path, audio, sr)
# ── Analisi spettrale per banda: LF vs HF ─────────────────────────
# Risponde alla domanda: quanto residual c'e' nelle basse frequenze,
# e quanto ne recupera SPADE?
#
# Bands:
# Sub-bass : 20 – 80 Hz (fondamentale kick, body)
# Bass : 80 – 250 Hz (corpo kick, coda)
# Low-mid : 250 – 800 Hz (presenza)
# High-mid : 800 – 4000 Hz (attacco, click)
# High : 4k – 20k Hz (aria, snap)
#
# Per ogni banda misura:
# GT_energy = energia del GT residual (quanto il limiter ha tolto)
# iter_energy = energia recuperata da SPADE
# recovery % = iter_energy / GT_energy × 100
def band_energy(audio_2d, sr, f_lo, f_hi):
"""RMS energy in dB di una banda passante [f_lo, f_hi] Hz."""
mono = audio_2d[:, 0] if audio_2d.ndim == 2 else audio_2d
N = len(mono)
if N < 8:
return -999.0
# Butterworth bandpass (o lowpass/highpass ai bordi)
nyq = sr / 2.0
lo = max(f_lo / nyq, 1e-4)
hi = min(f_hi / nyq, 0.9999)
if lo >= hi:
return -999.0
if lo < 1e-3:
b, a = sig.butter(4, hi, btype="low")
else:
b, a = sig.butter(4, [lo, hi], btype="band")
filtered = sig.filtfilt(b, a, mono)
return _rms_dbfs(filtered)
BANDS = [
("Sub-bass ", 20, 80),
("Bass ", 80, 250),
("Low-mid ", 250, 800),
("High-mid ", 800, 4000),
("High ", 4000, 20000),
]
gt_mono = gt_res[:, 0] if gt_res.ndim == 2 else gt_res
ri_mono = res_iter[:, 0] if res_iter.ndim == 2 else res_iter
# Normalizza GT e iter sulla stessa scala (rimuovi la normalizzazione
# a RESIDUAL_DBFS per confrontare energie assolute)
gt_raw_for_bands = gt_res_raw_approx
iter_raw_for_bands = res_raw
print()
band_hdr = f" {'banda':<12} {'GT_res RMS':>10} {'SPADE rec RMS':>13} {'recovery':>9} {'limitato?'}"
print(f" Analisi spettrale per banda — {item['file']}")
print(f" {'─'*75}")
print(band_hdr)
print(f" {'─'*75}")
for bname, f_lo, f_hi in BANDS:
gt_db = band_energy(gt_raw_for_bands, sr, f_lo, f_hi)
iter_db = band_energy(iter_raw_for_bands, sr, f_lo, f_hi)
if gt_db < -60:
recovery_str = " — (silenzio)"
flag_b = ""
else:
diff_b = iter_db - gt_db # positivo = SPADE supera GT (overrecovery)
# recovery: 0 dB diff = recupero perfetto, molto negativo = sotto-recupero
if diff_b > -3:
flag_b = "OK"
elif diff_b > -9:
flag_b = "~ parziale"
else:
flag_b = "!! sotto-recupero"
recovery_str = f"{diff_b:>+7.1f} dB {flag_b}"
line = f" {bname:<12} {gt_db:>+10.1f} {iter_db:>+13.1f} {recovery_str}"
if _HAS_RICH:
color = "green" if "OK" in recovery_str else (
"yellow" if "~" in recovery_str else (
"red" if "!!" in recovery_str else "dim"))
_console.print(f"[{color}]{line}[/]")
else:
print(line)
print()
print(SEP)
print()
if diff_peaks:
vs_gt_vals = [d[0] for d in diff_peaks]
cos_vals = [d[1] for d in diff_peaks]
avg_vs_gt = float(np.mean(vs_gt_vals))
best_vs_gt = float(np.min(vs_gt_vals))
worst_vs_gt = float(np.max(vs_gt_vals))
avg_cos = float(np.mean(cos_vals))
noise_floor_db = 20 * np.log10(10 ** (PINK_NOISE_LEVEL_DB / 20.0) + 1e-12) + RESIDUAL_DBFS
print(f" RIEPILOGO 06_diff_residuals:")
print(f" diff/GT_rms media : {avg_vs_gt:>+7.2f} dB (0 dB = diff grande quanto GT)")
print(f" diff/GT_rms migliore: {best_vs_gt:>+7.2f} dB")
print(f" diff/GT_rms peggiore: {worst_vs_gt:>+7.2f} dB")
print(f" cos_sim TD media : {avg_cos:>8.4f} (1.0 = identici)")
print()
print(f" NOTA IMPORTANTE:")
print(f" Il rumore rosa ({PINK_NOISE_LEVEL_DB} dB) fa parte del GT_res ma")
print(f" NON puo' essere recuperato da SPADE (non e' sparso).")
print(f" Floor teorico del diff: ≈ {noise_floor_db:+.1f} dBFS — questo e' il")
print(f" limite fisico massimo raggiungibile con questo corpus.")
print(f" Un diff/GT < -6 dB indica buona convergenza di SPADE.")
print()
if worst_vs_gt < -12:
verdict = "OK Convergenza eccellente — SPADE recupera bene i transienti"
elif worst_vs_gt < -6:
verdict = "~ Convergenza buona — residuo compatibile con il noise floor"
else:
verdict = "INFO diff dominato dal rumore rosa — comportamento atteso e corretto"
print(f" Verdetto: {verdict}")
print(f"\n WAV scritti in : {out_dir}/")
print(f" Formato : float32, nessun clipping (usa un editor che supporta >0dBFS)")
print(f" Nomenclatura : <stem>__<N>_<traccia>.wav")
def save_csv(study: "optuna.Study"):
import csv
trials = sorted(
[t for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE],
key=lambda t: t.value or 0, reverse=True,
)
with open(OUT_CSV, "w", newline="") as f:
w = csv.writer(f)
w.writerow(["rank", "score", "delta_db", "lf_delta_db",
"window_length", "hop_length", "release_ms", "max_gain_db",
"eps", "max_iter", "multiband", "macro_expand", "macro_ratio"])
for rank, t in enumerate(trials, 1):
p = t.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
w.writerow([
rank, round(t.value, 6),
p["delta_db"],
round(p.get("lf_delta_db", p["delta_db"]), 2),
win, hop,
p["release_ms"], p["max_gain_db"], p["eps"], p["max_iter"],
int(p.get("multiband", False)),
int(p.get("macro_expand", False)),
round(p.get("macro_ratio", 1.0), 2),
])
print(f"\n 📄 CSV: {OUT_CSV}")
# =============================================================================
# MAIN
# =============================================================================
def parse_args():
ap = argparse.ArgumentParser(description="Smart Bayesian sweep per S-SPADE v2")
ap.add_argument("--trials", type=int, default=200,
help="Numero di trial Optuna (default: 200)")
ap.add_argument("--resume", action="store_true",
help="Carica lo study esistente e aggiunge trial")
ap.add_argument("--report", action="store_true",
help="Solo report (nessun nuovo trial)")
ap.add_argument("--base-dir", type=str, default=".",
help="Cartella radice con Kicks/Snares/Perc/Tops")
ap.add_argument("--corpus-size", type=int, default=None,
help="Limita il corpus a N file (None = tutti)")
ap.add_argument("--top", type=int, default=20,
help="Quanti trial mostrare nel ranking (default: 20)")
ap.add_argument("--no-prune", action="store_true",
help="Disabilita MedianPruner (più lento ma completo)")
ap.add_argument("--debug-export", action="store_true",
help="Esporta WAV di debug per i primi N file del corpus (no sweep)")
ap.add_argument("--debug-dir", type=str, default="debug_export",
help="Cartella output WAV di debug (default: debug_export)")
ap.add_argument("--debug-n", type=int, default=10,
help="Quanti file esportare in debug (default: 10)")
return ap.parse_args()
def main():
args = parse_args()
missing = []
if not _HAS_OPTUNA: missing.append("optuna")
if not _HAS_SPADE: missing.append("spade_declip_v11.py (nella stessa dir)")
if missing:
pip = [m for m in missing if not m.endswith(")")]
sys.exit("Mancante:\n pip install " + " ".join(pip)
+ ("\n " + "\n ".join(m for m in missing if m.endswith(")")) if any(m.endswith(")") for m in missing) else ""))
base_dir = Path(args.base_dir).resolve()
storage = f"sqlite:///{STUDY_NAME}.db"
sampler = TPESampler(seed=42, multivariate=True, warn_independent_sampling=False)
pruner = (MedianPruner(n_startup_trials=10, n_warmup_steps=3)
if not args.no_prune else optuna.pruners.NopPruner())
if args.report:
try:
study = optuna.load_study(study_name=STUDY_NAME, storage=storage,
sampler=sampler, pruner=pruner)
except Exception:
sys.exit(f"Nessuno study trovato in {STUDY_NAME}.db")
print_report(study, top_n=args.top)
save_csv(study)
return
# ── Debug export ──────────────────────────────────────────────────────────
if args.debug_export:
# Usa i parametri del best trial se esiste un DB, altrimenti DEBUG_PARAMS
spade_params = dict(DEBUG_PARAMS)
try:
study = optuna.load_study(study_name=STUDY_NAME, storage=storage,
sampler=sampler, pruner=pruner)
completed = [t for t in study.trials
if t.state == optuna.trial.TrialState.COMPLETE]
if completed:
best_t = max(completed, key=lambda t: t.value or 0)
p = best_t.params
win = 2 ** p["win_exp"]
hop = win // p["hop_div"]
spade_params = dict(
delta_db = p["delta_db"],
window_length = win,
hop_length = hop,
release_ms = p["release_ms"],
max_gain_db = p["max_gain_db"],
eps = p["eps"],
max_iter = p["max_iter"],
)
print(f" [DEBUG] Usando best trial #{best_t.number}"
f" (score={best_t.value:.5f}) dal DB.")
except Exception:
print(f" [DEBUG] DB non trovato — uso DEBUG_PARAMS di default.")
# Costruisci corpus (limitato a debug_n file per velocita')
corpus = build_corpus(base_dir, max_files=args.debug_n)
if not corpus:
sys.exit("Corpus vuoto. Controlla --base-dir.")
debug_export(
corpus = corpus,
base_dir = base_dir,
out_dir = Path(args.debug_dir),
n_files = args.debug_n,
spade_params = spade_params,
)
return
# ── Corpus ───────────────────────────────────────────────────────────────
print("\n" + "=" * 65)
print("CORPUS + LIMITER SINTETICO (Case 1 — threshold-based)")
print("=" * 65)
print(f" Base dir : {base_dir}")
print(f" Threshold : −{LIMITER_THRESHOLD_DB} dBFS")
print(f" Release : {LIMITER_RELEASE_MS} ms")
print(f" Level align: NESSUNO — loudness invariata per costruzione")
print(f" Rumore rosa: {PINK_NOISE_LEVEL_DB} dB rel. peak "
f"(simula sottofondo musicale sotto il transiente)")
corpus = build_corpus(base_dir, max_files=args.corpus_size)
if not corpus:
sys.exit("Corpus vuoto. Controlla --base-dir e le cartelle.")
print(f"\n ✓ {len(corpus)} file nel corpus\n")
col_w = max(len(item["file"]) for item in corpus) + 2
for item in corpus:
rms = float(np.sqrt(np.mean(item["gt_res"] ** 2)))
peak = float(np.max(np.abs(item["gt_res"])))
print(f" {item['file']:<{col_w}} sr={item['sr']} "
f"GT rms={rms:.4f} peak={peak:.4f}")
# ── Study ─────────────────────────────────────────────────────────────────
print(f"\n{'='*65}")
print(f"OTTIMIZZAZIONE BAYESIANA — {args.trials} trial")
print(f"TPE (multivariate) + MedianPruner | storage: {STUDY_NAME}.db")
print(f"{'='*65}\n")
study = optuna.create_study(
study_name = STUDY_NAME,
storage = storage,
sampler = sampler,
pruner = pruner,
direction = "maximize",
load_if_exists = True,
)
# ── Progress bar (rich → tqdm → plain fallback) ───────────────────────────
try:
from rich.progress import (
Progress, BarColumn, TextColumn,
TimeElapsedColumn, TimeRemainingColumn, MofNCompleteColumn,
)
_has_rich_progress = True
except ImportError:
_has_rich_progress = False
try:
import tqdm as _tqdm_mod
_has_tqdm = True
except ImportError:
_has_tqdm = False
# Stato condiviso aggiornato dal callback.
# Pre-popolato con i trial gia' nel DB in caso di --resume,
# cosi' la progress bar mostra il conteggio corretto dall'inizio.
_existing_complete = [t for t in study.trials
if t.state == optuna.trial.TrialState.COMPLETE]
_existing_pruned = [t for t in study.trials
if t.state == optuna.trial.TrialState.PRUNED]
if _existing_complete:
_best_existing = max(_existing_complete, key=lambda t: t.value or 0)
_init_best = _best_existing.value or 0.0
_init_best_p = dict(_best_existing.params)
_init_last = _init_best
else:
_init_best, _init_best_p, _init_last = float("-inf"), {}, float("-inf")
_state = {
"done": len(_existing_complete),
"pruned": len(_existing_pruned),
"best": _init_best,
"best_p": _init_best_p,
"last": _init_last,
"t0": time.time(),
"n_total": len(_existing_complete) + len(_existing_pruned) + args.trials,
}
def _fmt_best(state: dict) -> str:
"""Stringa compatta con i parametri del best trial corrente."""
bp = state["best_p"]
if not bp:
return "—"
win = 2 ** bp.get("win_exp", 10)
hop = win // bp.get("hop_div", 4)
return (f"δ={bp.get('delta_db',0):.2f} "
f"win={win} hop={hop} "
f"rel={bp.get('release_ms',0):.0f}ms "
f"gain={bp.get('max_gain_db',0):.1f}dB")
# ── Rich progress bar ─────────────────────────────────────────────────────
if _has_rich_progress:
progress = Progress(
TextColumn("[bold cyan]Trial[/] [cyan]{task.completed}/{task.total}[/]"),
BarColumn(bar_width=32),
MofNCompleteColumn(),
TextColumn(" score [green]{task.fields[last]:.5f}[/]"),
TextColumn(" best [bold green]{task.fields[best]:.5f}[/]"),
TextColumn(" [dim]pruned {task.fields[pruned]}[/]"),
TimeElapsedColumn(),
TextColumn("ETA"),
TimeRemainingColumn(),
refresh_per_second=4,
transient=False,
)
task_id = None # creato dentro il context
def on_trial_end(study, trial):
fin = (trial.state == optuna.trial.TrialState.COMPLETE)
prn = (trial.state == optuna.trial.TrialState.PRUNED)
if fin:
_state["done"] += 1
_state["last"] = trial.value or 0.0
if _state["last"] > _state["best"]:
_state["best"] = _state["last"]
_state["best_p"] = dict(study.best_params)
elif prn:
_state["pruned"] += 1
progress.update(
task_id,
advance = 1,
last = _state["last"],
best = max(_state["best"], 0.0),
pruned = _state["pruned"],
)
t0 = time.time()
try:
with progress:
task_id = progress.add_task(
"sweep",
total = _state["n_total"],
completed = _state["done"] + _state["pruned"],
last = max(_state["last"], 0.0),
best = max(_state["best"], 0.0),
pruned = _state["pruned"],
)
study.optimize(
make_objective(corpus),
n_trials = args.trials,
callbacks = [on_trial_end],
show_progress_bar = False,
)
except KeyboardInterrupt:
print("\n[!] Interrotto — risultati parziali salvati.")
# ── tqdm fallback ─────────────────────────────────────────────────────────
elif _has_tqdm:
import tqdm
_already = _state["done"] + _state["pruned"]
pbar = tqdm.tqdm(
total = _state["n_total"],
initial = _already,
unit = "trial",
bar_format = "{l_bar}{bar}| {n}/{total} [{elapsed}<{remaining}]",
)
if _already > 0:
pbar.set_postfix(
score = f"{max(_state['last'], 0.0):.5f}",
best = f"{max(_state['best'], 0.0):.5f}",
pruned = _state["pruned"],
)
def on_trial_end(study, trial):
fin = trial.state == optuna.trial.TrialState.COMPLETE
prn = trial.state == optuna.trial.TrialState.PRUNED
if fin:
_state["done"] += 1
_state["last"] = trial.value or 0.0
if _state["last"] > _state["best"]:
_state["best"] = _state["last"]
_state["best_p"] = dict(study.best_params)
elif prn:
_state["pruned"] += 1
pbar.update(1)
pbar.set_postfix(
score = f"{_state['last']:.5f}",
best = f"{_state['best']:.5f}",
pruned = _state["pruned"],
)
t0 = time.time()
try:
study.optimize(
make_objective(corpus),
n_trials = args.trials,
callbacks = [on_trial_end],
show_progress_bar = False,
)
except KeyboardInterrupt:
print("\n[!] Interrotto — risultati parziali salvati.")
finally:
pbar.close()
# ── Plain fallback ────────────────────────────────────────────────────────
else:
def on_trial_end(study, trial):
fin = trial.state == optuna.trial.TrialState.COMPLETE
prn = trial.state == optuna.trial.TrialState.PRUNED
if fin:
_state["done"] += 1
_state["last"] = trial.value or 0.0
if _state["last"] > _state["best"]:
_state["best"] = _state["last"]
_state["best_p"] = dict(study.best_params)
elapsed = time.time() - _state["t0"]
done_tot = _state["done"] + _state["pruned"]
eta_s = (elapsed / done_tot) * (_state["n_total"] - done_tot) if done_tot else 0
is_best = abs(_state["last"] - _state["best"]) < 1e-9
bar_n = int(32 * done_tot / max(_state["n_total"], 1))
bar = "█" * bar_n + "░" * (32 - bar_n)
print(f"\r[{bar}] {done_tot}/{_state['n_total']}"
f" {'★' if is_best else ' '}score={_state['last']:.5f}"
f" best={_state['best']:.5f}"
f" pruned={_state['pruned']}"
f" ETA {eta_s/60:.1f}min ", end="", flush=True)
elif prn:
_state["pruned"] += 1
t0 = time.time()
try:
study.optimize(
make_objective(corpus),
n_trials = args.trials,
callbacks = [on_trial_end],
show_progress_bar = False,
)
except KeyboardInterrupt:
print("\n[!] Interrotto — risultati parziali salvati.")
print() # newline dopo la riga \r
elapsed = time.time() - t0
n_done = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.COMPLETE)
n_prune = sum(1 for t in study.trials if t.state == optuna.trial.TrialState.PRUNED)
print(f"\n Completati: {n_done} | Pruned: {n_prune}"
f" | Tempo totale: {elapsed/60:.1f} min"
f" | Media: {elapsed/max(n_done+n_prune,1):.1f} s/trial")
print_report(study, top_n=args.top)
save_csv(study)
print("\nDone.")
if __name__ == "__main__":
main()