Spaces:
Sleeping
Sleeping
| """Stochastic noise model for the LHC simulator. | |
| All randomness is funneled through a single seeded ``numpy.Generator`` so | |
| episodes are reproducible. The methods are physics-flavoured: Poisson event | |
| counts, Gaussian-smeared masses, log-normal cross-sections, false discovery | |
| helpers, and quality degradation. | |
| """ | |
| from __future__ import annotations | |
| from typing import List | |
| import numpy as np | |
| class NoiseModel: | |
| """Centralised noise generator for the CERN simulator.""" | |
| def __init__(self, seed: int = 42): | |
| self.rng = np.random.default_rng(seed) | |
| def reseed(self, seed: int) -> None: | |
| self.rng = np.random.default_rng(seed) | |
| # ── counting / Poisson statistics ───────────────────────────────── | |
| def poisson(self, lam: float) -> int: | |
| return int(self.rng.poisson(max(lam, 0.0))) | |
| def signal_yield( | |
| self, | |
| cross_section_fb: float, | |
| luminosity_fb: float, | |
| branching: float, | |
| efficiency: float, | |
| trigger_efficiency: float, | |
| ) -> int: | |
| """Expected signal events ~ σ × L × BR × ε_reco × ε_trig + Poisson noise. | |
| BR = branching ratio of the decay channel. | |
| ε_reco = channel reconstruction efficiency. | |
| ε_trig = trigger acceptance. | |
| """ | |
| mu = cross_section_fb * luminosity_fb * branching * efficiency * trigger_efficiency | |
| return self.poisson(mu) | |
| def background_yield( | |
| self, | |
| baseline_per_fb: float, | |
| luminosity_fb: float, | |
| qcd_strength: float, | |
| trigger_efficiency: float, | |
| ) -> int: | |
| """Expected background events scale linearly with luminosity.""" | |
| mu = baseline_per_fb * luminosity_fb * qcd_strength * trigger_efficiency | |
| return self.poisson(mu) | |
| # ── mass smearing ────────────────────────────────────────────────── | |
| def smear_mass( | |
| self, | |
| true_mass_gev: float, | |
| resolution_gev: float, | |
| scale_offset_gev: float = 0.0, | |
| ) -> float: | |
| return float(self.rng.normal(true_mass_gev + scale_offset_gev, resolution_gev)) | |
| def fit_mass_estimate( | |
| self, | |
| true_mass_gev: float, | |
| n_signal: int, | |
| resolution_gev: float, | |
| scale_offset_gev: float, | |
| ) -> float: | |
| """Fitted mass ≈ true mass + Gaussian error scaling like 1/√N_signal.""" | |
| n_eff = max(n_signal, 1) | |
| sigma = resolution_gev / np.sqrt(n_eff) | |
| return float(self.rng.normal(true_mass_gev + scale_offset_gev, sigma)) | |
| def fit_mass_uncertainty( | |
| self, | |
| n_signal: int, | |
| resolution_gev: float, | |
| ) -> float: | |
| """Statistical mass uncertainty from a peak with N_signal events.""" | |
| n_eff = max(n_signal, 1) | |
| return float(resolution_gev / np.sqrt(n_eff)) | |
| # ── significance ─────────────────────────────────────────────────── | |
| def asimov_significance( | |
| self, | |
| n_signal: int, | |
| n_background: int, | |
| nuisance_inflation: float = 0.0, | |
| ) -> float: | |
| """Asymptotic Asimov-style significance Z = √(2[(s+b) ln(1+s/b) - s]). | |
| A small nuisance_inflation term in [0,1] shrinks Z to mimic systematic | |
| penalties when calibration / systematics studies are skipped. | |
| """ | |
| if n_background <= 0: | |
| return 0.0 | |
| s = float(n_signal) | |
| b = float(n_background) | |
| if s <= 0: | |
| return 0.0 | |
| term = (s + b) * np.log(1.0 + s / b) - s | |
| z = float(np.sqrt(max(2.0 * term, 0.0))) | |
| return float(z * (1.0 - nuisance_inflation)) | |
| # ── helpers ───────────────────────────────────────────────────────── | |
| def coin_flip(self, p: float) -> bool: | |
| return bool(self.rng.random() < p) | |
| def jitter(self, mean: float, sigma: float) -> float: | |
| return float(self.rng.normal(mean, sigma)) | |
| def quality_degradation(self, base_quality: float, factors: List[float]) -> float: | |
| q = base_quality | |
| for f in factors: | |
| q *= f | |
| return float(np.clip(q + self.rng.normal(0, 0.02), 0.0, 1.0)) | |
| def sample_qc_metric( | |
| self, mean: float, std: float, clip_lo: float = 0.0, clip_hi: float = 1.0 | |
| ) -> float: | |
| return float(np.clip(self.rng.normal(mean, std), clip_lo, clip_hi)) | |
| def histogram( | |
| self, | |
| n_signal: int, | |
| n_background: int, | |
| true_mass_gev: float, | |
| resolution_gev: float, | |
| window_lo_gev: float, | |
| window_hi_gev: float, | |
| n_bins: int = 40, | |
| background_alpha: float = -2.5, | |
| ) -> List[int]: | |
| """Generate a coarse invariant-mass histogram. | |
| Signal is Gaussian around the (smeared) true mass with width | |
| =resolution; background is a falling power-law shape. | |
| """ | |
| if window_hi_gev <= window_lo_gev: | |
| return [0] * n_bins | |
| edges = np.linspace(window_lo_gev, window_hi_gev, n_bins + 1) | |
| centers = 0.5 * (edges[:-1] + edges[1:]) | |
| sig_mu = true_mass_gev | |
| sig_pdf = np.exp(-0.5 * ((centers - sig_mu) / max(resolution_gev, 1e-3)) ** 2) | |
| sig_pdf /= max(sig_pdf.sum(), 1e-9) | |
| bg_pdf = np.power(np.clip(centers, 1.0, None), background_alpha) | |
| bg_pdf /= max(bg_pdf.sum(), 1e-9) | |
| sig_counts = self.rng.multinomial(max(n_signal, 0), sig_pdf) | |
| bg_counts = self.rng.multinomial(max(n_background, 0), bg_pdf) | |
| return (sig_counts + bg_counts).astype(int).tolist() | |