Upload agents/sensor_agent.py with huggingface_hub
Browse files- agents/sensor_agent.py +218 -373
agents/sensor_agent.py
CHANGED
|
@@ -1,384 +1,229 @@
|
|
| 1 |
-
"""
|
| 2 |
-
FORENSIQ β Sensor Characteristics Agent
|
| 3 |
-
Analyzes sensor physics violations:
|
| 4 |
-
- PRNU (Photo-Response Non-Uniformity) noise residual analysis
|
| 5 |
-
- Noise structure (Poisson-Gaussian model fit)
|
| 6 |
-
- Bayer demosaicing artifact detection
|
| 7 |
-
"""
|
| 8 |
-
|
| 9 |
import numpy as np
|
| 10 |
from PIL import Image
|
| 11 |
-
from scipy.ndimage import gaussian_filter, uniform_filter
|
| 12 |
from scipy.signal import convolve2d
|
| 13 |
-
from dataclasses import dataclass
|
| 14 |
from typing import Dict, Any
|
| 15 |
-
|
| 16 |
from agents.optical_agent import AgentEvidence
|
| 17 |
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
"""
|
| 22 |
-
Extract noise residual fingerprint.
|
| 23 |
-
Real cameras leave a unique PRNU pattern; AI images have uniform or random noise.
|
| 24 |
-
Inconsistent local noise variance β splicing / AI generation.
|
| 25 |
-
"""
|
| 26 |
-
rgb = np.array(img.convert("RGB")).astype(np.float64)
|
| 27 |
-
|
| 28 |
-
noise_residuals = []
|
| 29 |
for c in range(3):
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
note = "Consistent sensor noise pattern with correlated channels (real camera)"
|
| 61 |
-
elif uniformity < 0.4:
|
| 62 |
-
score = 0.5
|
| 63 |
-
note = "Inconsistent noise regions suggest splicing or AI generation"
|
| 64 |
-
elif avg_corr < 0.1:
|
| 65 |
-
score = 0.4
|
| 66 |
-
note = "Uncorrelated channel noise (atypical for real cameras)"
|
| 67 |
-
else:
|
| 68 |
-
score = 0.1
|
| 69 |
-
note = "Moderate noise consistency"
|
| 70 |
-
|
| 71 |
-
return {
|
| 72 |
-
"test": "PRNU Noise Residual",
|
| 73 |
-
"noise_uniformity": round(uniformity, 4),
|
| 74 |
-
"noise_mean": round(noise_mean, 4),
|
| 75 |
-
"rg_correlation": round(rg_corr, 4),
|
| 76 |
-
"rb_correlation": round(rb_corr, 4),
|
| 77 |
-
"score": score,
|
| 78 |
-
"note": note,
|
| 79 |
-
"noise_map": noise_energy,
|
| 80 |
-
}
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
# βββ Noise Structure (Poisson-Gaussian Model) ββββββββββββββββββββββ
|
| 84 |
-
def analyze_noise_structure(img: Image.Image) -> Dict[str, Any]:
|
| 85 |
-
"""
|
| 86 |
-
Real sensor noise follows ΟΒ² = ΟΒ²_read + kΒ·I (Poisson-Gaussian).
|
| 87 |
-
AI images lack this physical noise model.
|
| 88 |
-
"""
|
| 89 |
-
rgb = np.array(img.convert("RGB")).astype(np.float64)
|
| 90 |
-
gray = np.mean(rgb, axis=-1)
|
| 91 |
-
|
| 92 |
-
# Compute local mean and local variance in blocks
|
| 93 |
-
block_size = 16
|
| 94 |
-
h, w = gray.shape
|
| 95 |
-
h_crop, w_crop = (h // block_size) * block_size, (w // block_size) * block_size
|
| 96 |
-
gray = gray[:h_crop, :w_crop]
|
| 97 |
-
|
| 98 |
-
intensities = []
|
| 99 |
-
variances = []
|
| 100 |
-
|
| 101 |
-
for i in range(0, h_crop, block_size):
|
| 102 |
-
for j in range(0, w_crop, block_size):
|
| 103 |
-
block = gray[i:i + block_size, j:j + block_size]
|
| 104 |
-
intensities.append(float(np.mean(block)))
|
| 105 |
-
variances.append(float(np.var(block)))
|
| 106 |
-
|
| 107 |
-
intensities = np.array(intensities)
|
| 108 |
-
variances = np.array(variances)
|
| 109 |
-
|
| 110 |
-
# Filter out extreme blocks
|
| 111 |
-
valid = (intensities > 10) & (intensities < 245) & (variances > 0)
|
| 112 |
-
if np.sum(valid) < 20:
|
| 113 |
-
return {
|
| 114 |
-
"test": "Noise Structure (Poisson-Gaussian)",
|
| 115 |
-
"score": 0.0,
|
| 116 |
-
"note": "Insufficient data for noise model fitting",
|
| 117 |
-
}
|
| 118 |
-
|
| 119 |
-
I = intensities[valid]
|
| 120 |
-
V = variances[valid]
|
| 121 |
-
|
| 122 |
-
# Fit linear model: V = a + b*I (Poisson-Gaussian)
|
| 123 |
try:
|
| 124 |
-
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
|
| 146 |
-
|
| 147 |
-
|
| 148 |
-
}
|
| 149 |
-
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
def
|
| 153 |
-
|
| 154 |
-
|
| 155 |
-
|
| 156 |
-
|
| 157 |
-
""
|
| 158 |
-
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
|
| 171 |
-
|
| 172 |
-
|
| 173 |
-
|
| 174 |
-
|
| 175 |
-
|
| 176 |
-
|
| 177 |
-
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
|
| 182 |
-
|
| 183 |
-
|
| 184 |
-
|
| 185 |
-
|
| 186 |
-
|
| 187 |
-
|
| 188 |
-
|
| 189 |
-
|
| 190 |
-
|
| 191 |
-
|
| 192 |
-
|
| 193 |
-
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
}
|
| 199 |
-
|
| 200 |
-
|
| 201 |
-
|
| 202 |
-
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
-
|
| 209 |
-
|
| 210 |
-
|
| 211 |
-
|
| 212 |
-
|
| 213 |
-
|
| 214 |
-
|
| 215 |
-
fft_rg = np.abs(np.fft.fftshift(np.fft.fft2(rg)))
|
| 216 |
-
fft_bg = np.abs(np.fft.fftshift(np.fft.fft2(bg)))
|
| 217 |
-
|
| 218 |
-
h, w = fft_rg.shape
|
| 219 |
-
cy, cx = h // 2, w // 2
|
| 220 |
-
|
| 221 |
-
# Check for Bayer CFA signature: peaks at (N/2, 0), (0, N/2), (N/2, N/2)
|
| 222 |
-
nyquist_energy_rg = float(
|
| 223 |
-
fft_rg[cy, 0] + fft_rg[0, cx] + fft_rg[0, 0]
|
| 224 |
-
) / 3
|
| 225 |
-
center_energy_rg = float(np.mean(fft_rg[cy - 5:cy + 5, cx - 5:cx + 5]))
|
| 226 |
-
cfa_ratio = nyquist_energy_rg / (center_energy_rg + 1e-9)
|
| 227 |
-
|
| 228 |
-
if cfa_ratio > 1.5:
|
| 229 |
-
score = -0.3
|
| 230 |
-
note = f"CFA interpolation traces detected (ratio={cfa_ratio:.2f}, real camera)"
|
| 231 |
-
elif cfa_ratio < 0.5:
|
| 232 |
-
score = 0.3
|
| 233 |
-
note = f"No CFA traces (ratio={cfa_ratio:.2f}, possible AI generation)"
|
| 234 |
-
else:
|
| 235 |
-
score = 0.0
|
| 236 |
-
note = f"Ambiguous CFA analysis (ratio={cfa_ratio:.2f})"
|
| 237 |
-
|
| 238 |
-
return {
|
| 239 |
-
"test": "CFA Pattern Verification",
|
| 240 |
-
"cfa_nyquist_ratio": round(cfa_ratio, 4),
|
| 241 |
-
"score": score,
|
| 242 |
-
"note": note,
|
| 243 |
-
}
|
| 244 |
-
|
| 245 |
-
|
| 246 |
-
# βββ Hot/Dead Pixel Analysis ββββββββββββββββββββββββββββββββββββββββ
|
| 247 |
-
def analyze_hot_dead_pixels(img: Image.Image) -> Dict[str, Any]:
|
| 248 |
-
"""
|
| 249 |
-
Real sensors have hot (stuck bright) and dead (stuck dark) pixels.
|
| 250 |
-
AI images lack these sensor defects entirely.
|
| 251 |
-
"""
|
| 252 |
-
gray = np.array(img.convert("L")).astype(np.float64)
|
| 253 |
-
h, w = gray.shape
|
| 254 |
-
|
| 255 |
-
# Local median filter
|
| 256 |
-
from scipy.ndimage import median_filter
|
| 257 |
-
med = median_filter(gray, size=5)
|
| 258 |
-
|
| 259 |
-
diff = np.abs(gray - med)
|
| 260 |
-
|
| 261 |
-
# Hot pixels: much brighter than neighbors
|
| 262 |
-
hot_threshold = np.percentile(diff, 99.9)
|
| 263 |
-
hot_pixels = int(np.sum(diff > hot_threshold))
|
| 264 |
-
|
| 265 |
-
# Dead pixels: much darker than neighbors AND very low absolute value
|
| 266 |
-
dark_mask = (gray < 5) & (diff > hot_threshold * 0.5)
|
| 267 |
-
dead_pixels = int(np.sum(dark_mask))
|
| 268 |
-
|
| 269 |
-
total_defects = hot_pixels + dead_pixels
|
| 270 |
-
defect_rate = total_defects / (h * w)
|
| 271 |
-
|
| 272 |
-
# Real cameras: typically 0.001%-0.01% defective pixels
|
| 273 |
-
if 0.00001 < defect_rate < 0.001:
|
| 274 |
-
score = -0.2
|
| 275 |
-
note = f"Sensor defects detected ({total_defects} pixels, rate={defect_rate:.6f}, real camera)"
|
| 276 |
-
elif defect_rate < 0.000001:
|
| 277 |
-
score = 0.2
|
| 278 |
-
note = f"No sensor defects ({total_defects} pixels, possible AI generation)"
|
| 279 |
-
else:
|
| 280 |
-
score = 0.0
|
| 281 |
-
note = f"Defect rate={defect_rate:.6f} ({total_defects} pixels)"
|
| 282 |
-
|
| 283 |
-
return {
|
| 284 |
-
"test": "Hot/Dead Pixel Analysis",
|
| 285 |
-
"hot_pixels": hot_pixels,
|
| 286 |
-
"dead_pixels": dead_pixels,
|
| 287 |
-
"defect_rate": round(defect_rate, 8),
|
| 288 |
-
"score": score,
|
| 289 |
-
"note": note,
|
| 290 |
-
}
|
| 291 |
-
|
| 292 |
-
|
| 293 |
-
# βββ JPEG Quantization Table Analysis βββββββββββββββββββββββββββββββ
|
| 294 |
-
def analyze_jpeg_quantization(img: Image.Image) -> Dict[str, Any]:
|
| 295 |
-
"""
|
| 296 |
-
Real JPEG images have specific quantization tables from camera firmware.
|
| 297 |
-
AI-generated images saved as JPEG have generic tables.
|
| 298 |
-
Double-compressed images show quantization table mismatches.
|
| 299 |
-
"""
|
| 300 |
try:
|
| 301 |
-
|
| 302 |
-
if
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
|
| 307 |
-
|
| 308 |
-
|
| 309 |
-
|
| 310 |
-
|
| 311 |
-
|
| 312 |
-
|
| 313 |
-
|
| 314 |
-
|
| 315 |
-
|
| 316 |
-
|
| 317 |
-
|
| 318 |
-
|
| 319 |
-
|
| 320 |
-
|
| 321 |
-
|
| 322 |
-
|
| 323 |
-
|
| 324 |
-
|
| 325 |
-
|
| 326 |
-
|
| 327 |
-
|
| 328 |
-
|
| 329 |
-
|
| 330 |
-
|
| 331 |
-
|
| 332 |
-
|
| 333 |
-
|
| 334 |
-
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
| 338 |
-
|
| 339 |
-
|
| 340 |
-
|
| 341 |
-
|
| 342 |
-
|
| 343 |
-
|
| 344 |
-
|
| 345 |
-
|
| 346 |
-
|
| 347 |
-
|
| 348 |
-
|
| 349 |
-
|
| 350 |
-
|
| 351 |
-
|
| 352 |
-
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
|
| 356 |
-
|
| 357 |
-
|
| 358 |
-
|
| 359 |
-
|
| 360 |
-
|
| 361 |
-
|
| 362 |
-
|
| 363 |
-
|
| 364 |
-
|
| 365 |
-
|
| 366 |
-
|
| 367 |
-
|
| 368 |
-
|
| 369 |
-
|
| 370 |
-
|
| 371 |
-
|
| 372 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
for f in findings:
|
| 374 |
-
if f.get("note"):
|
| 375 |
-
|
| 376 |
-
|
| 377 |
-
return AgentEvidence(
|
| 378 |
-
agent_name="Sensor Characteristics Agent",
|
| 379 |
-
violation_score=np.clip(avg_score, -1, 1),
|
| 380 |
-
confidence=confidence,
|
| 381 |
-
failure_prob=max(0.0, 1.0 - len(scores) / 6),
|
| 382 |
-
rationale=rationale,
|
| 383 |
-
sub_findings=findings,
|
| 384 |
-
)
|
|
|
|
| 1 |
+
"""FORENSIQ β Sensor Characteristics Agent (18 features)"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
import numpy as np
|
| 3 |
from PIL import Image
|
| 4 |
+
from scipy.ndimage import gaussian_filter, uniform_filter, median_filter, label
|
| 5 |
from scipy.signal import convolve2d
|
|
|
|
| 6 |
from typing import Dict, Any
|
|
|
|
| 7 |
from agents.optical_agent import AgentEvidence
|
| 8 |
|
| 9 |
+
def _g(img): return np.array(img.convert("L")).astype(np.float64)
|
| 10 |
+
def _rgb(img): return np.array(img.convert("RGB")).astype(np.float64)
|
| 11 |
|
| 12 |
+
def s01_prnu_uniformity(img):
|
| 13 |
+
rgb=_rgb(img); res=[]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
for c in range(3):
|
| 15 |
+
ch=rgb[:,:,c]; dn=gaussian_filter(ch,3.0); res.append(ch-dn)
|
| 16 |
+
ne=np.mean(np.stack(res,axis=-1)**2,axis=-1)
|
| 17 |
+
lv=uniform_filter(ne,32); std=float(np.std(lv)); mn=float(np.mean(lv))
|
| 18 |
+
u=1.0-min(std/(mn+1e-9),1.0)
|
| 19 |
+
if u>0.7: s,n=-0.4, f"Uniform sensor noise (uniformity={u:.3f})"
|
| 20 |
+
elif u<0.4: s,n=0.5, f"Inconsistent noise ({u:.3f}) β splicing/AI"
|
| 21 |
+
else: s,n=0.1, f"Moderate noise uniformity ({u:.3f})"
|
| 22 |
+
return {"test":"PRNU Uniformity","uniformity":round(u,4),"score":s,"note":n,"noise_map":ne}
|
| 23 |
+
|
| 24 |
+
def s02_prnu_correlation(img):
|
| 25 |
+
rgb=_rgb(img); res=[]
|
| 26 |
+
for c in range(3): ch=rgb[:,:,c]; res.append((ch-gaussian_filter(ch,3.0)).ravel())
|
| 27 |
+
step=max(1,len(res[0])//100000)
|
| 28 |
+
rg=float(np.corrcoef(res[0][::step],res[1][::step])[0,1])
|
| 29 |
+
rb=float(np.corrcoef(res[0][::step],res[2][::step])[0,1])
|
| 30 |
+
avg=(rg+rb)/2
|
| 31 |
+
if avg>0.3: s,n=-0.3, f"Correlated channel noise ({avg:.3f}) β real sensor"
|
| 32 |
+
elif avg<0.1: s,n=0.4, f"Uncorrelated noise ({avg:.3f}) β AI-like"
|
| 33 |
+
else: s,n=0.1, f"Moderate noise correlation ({avg:.3f})"
|
| 34 |
+
return {"test":"PRNU Cross-Channel","correlation":round(avg,4),"score":s,"note":n}
|
| 35 |
+
|
| 36 |
+
def s03_noise_model(img):
|
| 37 |
+
rgb=_rgb(img); gray=np.mean(rgb,axis=-1); h,w=gray.shape; bs=16
|
| 38 |
+
hc,wc=(h//bs)*bs,(w//bs)*bs; gray=gray[:hc,:wc]
|
| 39 |
+
I,V=[],[]
|
| 40 |
+
for i in range(0,hc,bs):
|
| 41 |
+
for j in range(0,wc,bs):
|
| 42 |
+
b=gray[i:i+bs,j:j+bs]; I.append(float(np.mean(b))); V.append(float(np.var(b)))
|
| 43 |
+
I,V=np.array(I),np.array(V); v=(I>10)&(I<245)&(V>0)
|
| 44 |
+
if np.sum(v)<20: return {"test":"Poisson-Gaussian Model","score":0.0,"note":"Insufficient data"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
try:
|
| 46 |
+
c=np.polyfit(I[v],V[v],1); f=np.polyval(c,I[v]); r2=1.0-float(np.mean((V[v]-f)**2))/(np.var(V[v])+1e-9)
|
| 47 |
+
except: r2=0.0
|
| 48 |
+
if r2>0.5: s,n=-0.3, f"Poisson-Gaussian fit RΒ²={r2:.3f}"
|
| 49 |
+
elif r2<0.1: s,n=0.5, f"No sensor noise model RΒ²={r2:.3f}"
|
| 50 |
+
else: s,n=0.15, f"Weak fit RΒ²={r2:.3f}"
|
| 51 |
+
return {"test":"Poisson-Gaussian Model","r_squared":round(r2,4),"score":s,"note":n}
|
| 52 |
+
|
| 53 |
+
def s04_bayer(img):
|
| 54 |
+
rgb=_rgb(img); ns={}
|
| 55 |
+
for c,nm in enumerate(["red","green","blue"]):
|
| 56 |
+
ch=rgb[:,:,c]; ns[nm]=float(np.std(ch-gaussian_filter(ch,1.5)))
|
| 57 |
+
gl=ns["green"]<min(ns["red"],ns["blue"])
|
| 58 |
+
rb=abs(ns["red"]-ns["blue"])/(max(ns["red"],ns["blue"])+1e-9)<0.2
|
| 59 |
+
if gl and rb: s,n=-0.4, f"Bayer: ΟG({ns['green']:.3f})<ΟR({ns['red']:.3f})βΟB({ns['blue']:.3f})"
|
| 60 |
+
elif gl: s,n=-0.2, "Green quieter but R/B differ"
|
| 61 |
+
else: s,n=0.4, f"No Bayer pattern: ΟG={ns['green']:.3f}"
|
| 62 |
+
return {"test":"Bayer CFA Pattern","score":s,"note":n}
|
| 63 |
+
|
| 64 |
+
def s05_cfa_nyquist(img):
|
| 65 |
+
rgb=_rgb(img); rg=rgb[:,:,0]-rgb[:,:,1]; fft=np.abs(np.fft.fftshift(np.fft.fft2(rg)))
|
| 66 |
+
h,w=fft.shape; cy,cx=h//2,w//2
|
| 67 |
+
nyq=float(fft[cy,0]+fft[0,cx]+fft[0,0])/3; cen=float(np.mean(fft[cy-5:cy+5,cx-5:cx+5]))
|
| 68 |
+
r=nyq/(cen+1e-9)
|
| 69 |
+
if r>1.5: s,n=-0.3, f"CFA traces (ratio={r:.2f})"
|
| 70 |
+
elif r<0.5: s,n=0.3, f"No CFA traces ({r:.2f})"
|
| 71 |
+
else: s,n=0.0, f"CFA ratio={r:.2f}"
|
| 72 |
+
return {"test":"CFA Nyquist","ratio":round(r,4),"score":s,"note":n}
|
| 73 |
+
|
| 74 |
+
def s06_hot_dead(img):
|
| 75 |
+
gray=_g(img); h,w=gray.shape; med=median_filter(gray,5); diff=np.abs(gray-med)
|
| 76 |
+
hot=int(np.sum(diff>np.percentile(diff,99.9)))
|
| 77 |
+
dead=int(np.sum((gray<5)&(diff>np.percentile(diff,99.5))))
|
| 78 |
+
rate=(hot+dead)/(h*w)
|
| 79 |
+
if 0.00001<rate<0.001: s,n=-0.2, f"Sensor defects ({hot+dead}px, rate={rate:.6f})"
|
| 80 |
+
elif rate<0.000001: s,n=0.2, f"No defects β possible AI"
|
| 81 |
+
else: s,n=0.0, f"Defect rate={rate:.6f}"
|
| 82 |
+
return {"test":"Hot/Dead Pixels","count":hot+dead,"score":s,"note":n}
|
| 83 |
+
|
| 84 |
+
def s07_fixed_pattern(img):
|
| 85 |
+
rgb=_rgb(img); gray=np.mean(rgb,axis=-1); h,w=gray.shape
|
| 86 |
+
row_means=np.mean(gray,axis=1); col_means=np.mean(gray,axis=0)
|
| 87 |
+
row_var=float(np.var(row_means-gaussian_filter(row_means,10)))
|
| 88 |
+
col_var=float(np.var(col_means-gaussian_filter(col_means,10)))
|
| 89 |
+
fpn=row_var+col_var
|
| 90 |
+
if fpn>5: s,n=-0.2, f"Fixed pattern noise ({fpn:.2f}) β sensor"
|
| 91 |
+
elif fpn<0.5: s,n=0.2, f"No fixed pattern ({fpn:.2f})"
|
| 92 |
+
else: s,n=0.0, f"FPN={fpn:.2f}"
|
| 93 |
+
return {"test":"Fixed Pattern Noise","fpn":round(fpn,4),"score":s,"note":n}
|
| 94 |
+
|
| 95 |
+
def s08_dark_current(img):
|
| 96 |
+
gray=_g(img); dark=gray[gray<10]
|
| 97 |
+
if len(dark)<100: return {"test":"Dark Current","score":0.0,"note":"No dark pixels"}
|
| 98 |
+
dk_mean=float(np.mean(dark)); dk_std=float(np.std(dark))
|
| 99 |
+
if dk_std>1: s,n=-0.2, f"Dark current variation (Ο={dk_std:.2f}) β sensor"
|
| 100 |
+
elif dk_std<0.3: s,n=0.1, f"Flat dark pixels (Ο={dk_std:.2f})"
|
| 101 |
+
else: s,n=0.0, f"Dark Ο={dk_std:.2f}"
|
| 102 |
+
return {"test":"Dark Current","dark_std":round(dk_std,3),"score":s,"note":n}
|
| 103 |
+
|
| 104 |
+
def s09_read_noise(img):
|
| 105 |
+
rgb=_rgb(img); gray=np.mean(rgb,axis=-1); h,w=gray.shape
|
| 106 |
+
flat=gray[(gray>100)&(gray<150)]
|
| 107 |
+
if len(flat)<1000: return {"test":"Read Noise Floor","score":0.0,"note":"No flat regions"}
|
| 108 |
+
rn=float(np.std(flat-gaussian_filter(flat.reshape(-1,1),1).ravel()))
|
| 109 |
+
if 0.5<rn<5: s,n=-0.2, f"Read noise={rn:.2f} β real sensor"
|
| 110 |
+
elif rn<0.2: s,n=0.3, f"No read noise ({rn:.2f})"
|
| 111 |
+
else: s,n=0.0, f"Read noise={rn:.2f}"
|
| 112 |
+
return {"test":"Read Noise Floor","read_noise":round(rn,3),"score":s,"note":n}
|
| 113 |
+
|
| 114 |
+
def s10_pixel_nonlinearity(img):
|
| 115 |
+
gray=_g(img); bins=np.linspace(0,255,20)
|
| 116 |
+
hist,_=np.histogram(gray,bins=bins); hist=hist.astype(float)
|
| 117 |
+
# Check for gaps/non-linearities in tonal response
|
| 118 |
+
smooth=gaussian_filter(hist.astype(np.float64),2); diff=np.abs(hist-smooth)
|
| 119 |
+
nonlin=float(np.mean(diff)/(np.mean(hist)+1e-9))
|
| 120 |
+
if nonlin<0.1: s,n=-0.2, f"Smooth tonal response ({nonlin:.3f})"
|
| 121 |
+
elif nonlin>0.3: s,n=0.3, f"Non-linear tonality ({nonlin:.3f})"
|
| 122 |
+
else: s,n=0.0, f"Tonal linearity={nonlin:.3f}"
|
| 123 |
+
return {"test":"Pixel Response Linearity","nonlinearity":round(nonlin,4),"score":s,"note":n}
|
| 124 |
+
|
| 125 |
+
def s11_color_matrix(img):
|
| 126 |
+
rgb=_rgb(img)
|
| 127 |
+
rg=float(np.corrcoef(rgb[:,:,0].ravel()[::100],rgb[:,:,1].ravel()[::100])[0,1])
|
| 128 |
+
rb=float(np.corrcoef(rgb[:,:,0].ravel()[::100],rgb[:,:,2].ravel()[::100])[0,1])
|
| 129 |
+
gb=float(np.corrcoef(rgb[:,:,1].ravel()[::100],rgb[:,:,2].ravel()[::100])[0,1])
|
| 130 |
+
avg=(rg+rb+gb)/3
|
| 131 |
+
if 0.5<avg<0.95: s,n=-0.2, f"Natural color matrix (avg_corr={avg:.3f})"
|
| 132 |
+
elif avg>0.98: s,n=0.2, f"Identical channels ({avg:.3f})"
|
| 133 |
+
else: s,n=0.0, f"Color correlation={avg:.3f}"
|
| 134 |
+
return {"test":"Color Matrix Verify","avg_corr":round(avg,4),"score":s,"note":n}
|
| 135 |
+
|
| 136 |
+
def s12_quantization(img):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
try:
|
| 138 |
+
qt=img.quantization
|
| 139 |
+
if qt:
|
| 140 |
+
t=list(list(qt.values())[0].values()) if isinstance(list(qt.values())[0],dict) else list(list(qt.values())[0])
|
| 141 |
+
if len(t)==64:
|
| 142 |
+
mx,mn=int(np.max(t)),int(np.min(t)); std=float(np.std(t))
|
| 143 |
+
if std<5: s,n=0.2, f"Uniform quantization (std={std:.1f})"
|
| 144 |
+
elif mx>100: s,n=-0.2, f"Camera quantization (max={mx})"
|
| 145 |
+
else: s,n=-0.1, f"Standard JPEG table (range=[{mn},{mx}])"
|
| 146 |
+
else: s,n=0.0, "Non-standard table"
|
| 147 |
+
else: s,n=0.1, "No JPEG tables"
|
| 148 |
+
except: s,n=0.0, "Cannot read tables"
|
| 149 |
+
return {"test":"JPEG Quantization","score":s,"note":n}
|
| 150 |
+
|
| 151 |
+
def s13_bit_depth(img):
|
| 152 |
+
gray=_g(img); unique=len(np.unique(gray.astype(int)))
|
| 153 |
+
ratio=unique/256
|
| 154 |
+
if ratio>0.95: s,n=-0.2, f"Full 8-bit usage ({unique} levels)"
|
| 155 |
+
elif ratio<0.5: s,n=0.3, f"Limited tonal range ({unique} levels)"
|
| 156 |
+
else: s,n=0.0, f"{unique} unique levels"
|
| 157 |
+
return {"test":"Bit Depth Usage","unique_levels":unique,"score":s,"note":n}
|
| 158 |
+
|
| 159 |
+
def s14_saturation_clipping(img):
|
| 160 |
+
gray=_g(img); clip_white=float(np.mean(gray>254)); clip_black=float(np.mean(gray<1))
|
| 161 |
+
total=clip_white+clip_black
|
| 162 |
+
if 0.001<total<0.05: s,n=-0.2, f"Natural clipping ({total:.3%})"
|
| 163 |
+
elif total<0.0001: s,n=0.2, f"No clipping ({total:.5%}) β unusual"
|
| 164 |
+
elif total>0.1: s,n=0.1, f"Heavy clipping ({total:.1%})"
|
| 165 |
+
else: s,n=0.0, f"Clipping={total:.3%}"
|
| 166 |
+
return {"test":"Saturation Clipping","clip_fraction":round(total,5),"score":s,"note":n}
|
| 167 |
+
|
| 168 |
+
def s15_noise_spatial_freq(img):
|
| 169 |
+
rgb=_rgb(img); gray=np.mean(rgb,axis=-1)
|
| 170 |
+
noise=gray-gaussian_filter(gray,2)
|
| 171 |
+
fft=np.abs(np.fft.fftshift(np.fft.fft2(noise))); h,w=fft.shape; cy,cx=h//2,w//2
|
| 172 |
+
lf=float(np.mean(fft[cy-h//8:cy+h//8,cx-w//8:cx+w//8]))
|
| 173 |
+
hf=float(np.mean(fft))-lf
|
| 174 |
+
ratio=hf/(lf+1e-9)
|
| 175 |
+
if ratio>1.5: s,n=-0.2, f"High-freq noise dominant ({ratio:.2f}) β sensor"
|
| 176 |
+
elif ratio<0.5: s,n=0.3, f"Low-freq noise ({ratio:.2f}) β unusual"
|
| 177 |
+
else: s,n=0.0, f"Noise freq ratio={ratio:.2f}"
|
| 178 |
+
return {"test":"Noise Spatial Frequency","ratio":round(ratio,3),"score":s,"note":n}
|
| 179 |
+
|
| 180 |
+
def s16_green_imbalance(img):
|
| 181 |
+
rgb=_rgb(img); g=rgb[:,:,1]; h,w=g.shape
|
| 182 |
+
g1=g[0::2,0::2]; g2=g[1::2,1::2]
|
| 183 |
+
mh,mw=min(g1.shape[0],g2.shape[0]),min(g1.shape[1],g2.shape[1])
|
| 184 |
+
diff=float(np.mean(np.abs(g1[:mh,:mw]-g2[:mh,:mw])))
|
| 185 |
+
if diff>0.5: s,n=-0.2, f"Green channel imbalance ({diff:.3f}) β Bayer"
|
| 186 |
+
elif diff<0.1: s,n=0.2, f"Identical green subpixels ({diff:.3f})"
|
| 187 |
+
else: s,n=0.0, f"Green diff={diff:.3f}"
|
| 188 |
+
return {"test":"Green Pixel Imbalance","diff":round(diff,4),"score":s,"note":n}
|
| 189 |
+
|
| 190 |
+
def s17_sensor_crop_factor(img):
|
| 191 |
+
"""Check aspect ratio against common sensor sizes."""
|
| 192 |
+
w,h=img.size; ratio=max(w,h)/min(w,h)
|
| 193 |
+
common=[1.0,4/3,3/2,16/9,1.85,2.35,2.39]
|
| 194 |
+
min_diff=min(abs(ratio-c) for c in common)
|
| 195 |
+
if min_diff<0.02: s,n=-0.1, f"Standard aspect ratio ({ratio:.3f})"
|
| 196 |
+
elif min_diff>0.1: s,n=0.2, f"Unusual aspect ratio ({ratio:.3f})"
|
| 197 |
+
else: s,n=0.0, f"Aspect ratio={ratio:.3f}"
|
| 198 |
+
return {"test":"Sensor Aspect Ratio","ratio":round(ratio,4),"score":s,"note":n}
|
| 199 |
+
|
| 200 |
+
def s18_demosaic_interpolation(img):
|
| 201 |
+
rgb=_rgb(img); h,w,_=rgb.shape
|
| 202 |
+
# Check for demosaic interpolation artifacts at pixel level
|
| 203 |
+
r=rgb[:,:,0]; g=rgb[:,:,1]; b=rgb[:,:,2]
|
| 204 |
+
# Real demosaiced images: neighboring pixels in same channel are correlated
|
| 205 |
+
r_h_corr = float(np.corrcoef(r[:,:-1].ravel()[::100],r[:,1:].ravel()[::100])[0,1])
|
| 206 |
+
g_h_corr = float(np.corrcoef(g[:,:-1].ravel()[::100],g[:,1:].ravel()[::100])[0,1])
|
| 207 |
+
# In Bayer, green has higher correlation due to 2x sampling
|
| 208 |
+
if g_h_corr > r_h_corr + 0.005: s,n = -0.3, f"Demosaic pattern (G_corr={g_h_corr:.4f}>R_corr={r_h_corr:.4f})"
|
| 209 |
+
elif abs(g_h_corr-r_h_corr)<0.001: s,n = 0.2, f"No demosaic signature"
|
| 210 |
+
else: s,n = 0.0, f"G_corr={g_h_corr:.4f}, R_corr={r_h_corr:.4f}"
|
| 211 |
+
return {"test":"Demosaic Interpolation","g_corr":round(g_h_corr,4),"r_corr":round(r_h_corr,4),"score":s,"note":n}
|
| 212 |
+
|
| 213 |
+
ALL_TESTS=[s01_prnu_uniformity,s02_prnu_correlation,s03_noise_model,s04_bayer,s05_cfa_nyquist,
|
| 214 |
+
s06_hot_dead,s07_fixed_pattern,s08_dark_current,s09_read_noise,s10_pixel_nonlinearity,
|
| 215 |
+
s11_color_matrix,s12_quantization,s13_bit_depth,s14_saturation_clipping,
|
| 216 |
+
s15_noise_spatial_freq,s16_green_imbalance,s17_sensor_crop_factor,s18_demosaic_interpolation]
|
| 217 |
+
|
| 218 |
+
def run_sensor_agent(img):
|
| 219 |
+
findings,scores=[],[]
|
| 220 |
+
for fn in ALL_TESTS:
|
| 221 |
+
try: r=fn(img); findings.append(r); scores.append(r["score"])
|
| 222 |
+
except Exception as e: findings.append({"test":fn.__name__,"error":str(e),"score":0})
|
| 223 |
+
avg=float(np.mean(scores)) if scores else 0.0; conf=min(1.0,0.5+0.5*abs(avg))
|
| 224 |
+
viol=[f["test"] for f in findings if f.get("score",0)>0.2]
|
| 225 |
+
comp=[f["test"] for f in findings if f.get("score",0)<-0.1]
|
| 226 |
+
rat=f"Sensor violations: {', '.join(viol)}." if viol else f"Sensor consistent: {', '.join(comp)}." if comp else "Sensor inconclusive."
|
| 227 |
for f in findings:
|
| 228 |
+
if f.get("note"): rat+=f" [{f['test']}]: {f['note']}."
|
| 229 |
+
return AgentEvidence("Sensor Characteristics Agent",np.clip(avg,-1,1),conf,max(0,1-len(scores)/len(ALL_TESTS)),rat,findings)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|