anky2002 commited on
Commit
0aa9fa4
Β·
verified Β·
1 Parent(s): 036320d

Upload agents/optical_agent.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. agents/optical_agent.py +391 -560
agents/optical_agent.py CHANGED
@@ -1,583 +1,414 @@
1
  """
2
- FORENSIQ β€” Optical Physics Agent
3
- Tests violations of lens and optical physics:
4
- - Chromatic Aberration analysis (RGB channel misregistration)
5
- - Vignetting analysis (cos⁴θ intensity falloff)
6
- - Depth-of-Field consistency (expected vs measured blur)
7
- - Bokeh microstructure (aperture blade FFT signatures)
8
  """
9
 
10
  import numpy as np
11
  from PIL import Image
12
- from scipy.ndimage import sobel, gaussian_filter, uniform_filter
13
- from scipy.signal import find_peaks
14
  from dataclasses import dataclass, field
15
  from typing import List, Dict, Any, Optional
16
 
17
 
18
  @dataclass
19
  class AgentEvidence:
20
- """Structured evidence output from any forensic agent."""
21
  agent_name: str
22
- violation_score: float # -1 (authentic) to +1 (fake)
23
- confidence: float # 0 to 1
24
- failure_prob: float # probability this agent's analysis failed
25
  rationale: str
26
  sub_findings: List[Dict[str, Any]] = field(default_factory=list)
27
- visual_evidence: Optional[Any] = None # numpy array or PIL image
28
 
29
 
30
- def _to_gray(img: Image.Image) -> np.ndarray:
31
- return np.array(img.convert("L")).astype(np.float64)
32
 
33
-
34
- def _to_rgb(img: Image.Image) -> np.ndarray:
35
- return np.array(img.convert("RGB")).astype(np.float64)
36
-
37
-
38
- # ─── Chromatic Aberration ────────────────────────────────────────────
39
- def analyze_chromatic_aberration(img: Image.Image) -> Dict[str, Any]:
40
- """
41
- Real lenses create slight spatial offsets between R/G/B edges.
42
- AI images have perfectly aligned or impossibly misaligned channels.
43
- """
44
- rgb = _to_rgb(img)
45
- r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
46
-
47
- # Edge magnitude per channel
48
  edges = {}
49
- for name, ch in [("R", r), ("G", g), ("B", b)]:
50
- ex = sobel(ch, axis=0)
51
- ey = sobel(ch, axis=1)
52
- edges[name] = np.hypot(ex, ey)
53
-
54
- # Flatten for correlation
55
- er, eg, eb = edges["R"].ravel(), edges["G"].ravel(), edges["B"].ravel()
56
-
57
- rg_corr = float(np.corrcoef(er, eg)[0, 1])
58
- rb_corr = float(np.corrcoef(er, eb)[0, 1])
59
- gb_corr = float(np.corrcoef(eg, eb)[0, 1])
60
-
61
- avg_corr = (rg_corr + rb_corr + gb_corr) / 3.0
62
-
63
- # Natural range: moderate correlation (0.80–0.97)
64
- # Too perfect (>0.99) = synthetic; too low (<0.70) = heavy editing
65
- if avg_corr > 0.99:
66
- score = 0.6
67
- note = "Suspiciously perfect channel alignment (no natural CA)"
68
- elif avg_corr < 0.70:
69
- score = 0.5
70
- note = "Abnormally low channel correlation (heavy manipulation)"
71
- elif 0.80 <= avg_corr <= 0.97:
72
- score = -0.4
73
- note = "Natural chromatic aberration pattern detected"
74
- else:
75
- score = 0.2
76
- note = "Borderline chromatic aberration"
77
-
78
- return {
79
- "test": "Chromatic Aberration",
80
- "rg_correlation": round(rg_corr, 4),
81
- "rb_correlation": round(rb_corr, 4),
82
- "gb_correlation": round(gb_corr, 4),
83
- "avg_correlation": round(avg_corr, 4),
84
- "score": score,
85
- "note": note,
86
- }
87
-
88
-
89
- # ─── Vignetting ─────────────────────────────────────────────────────
90
- def analyze_vignetting(img: Image.Image) -> Dict[str, Any]:
91
- """
92
- Real photos show cos⁴(θ) intensity falloff from center.
93
- AI images often have flat or unnatural brightness profiles.
94
- """
95
- gray = _to_gray(img)
96
- h, w = gray.shape
97
- cy, cx = h / 2, w / 2
98
-
99
- # Radial distance for every pixel
100
- Y, X = np.mgrid[0:h, 0:w]
101
- R = np.sqrt((X - cx) ** 2 + (Y - cy) ** 2)
102
- R_max = np.sqrt(cx ** 2 + cy ** 2)
103
- R_norm = R / R_max # 0 at center, 1 at corner
104
-
105
- # Bin by radial distance and compute mean intensity
106
- n_bins = 20
107
- bin_edges = np.linspace(0, 1, n_bins + 1)
108
- radial_means = []
109
- for i in range(n_bins):
110
- mask = (R_norm >= bin_edges[i]) & (R_norm < bin_edges[i + 1])
111
- if mask.any():
112
- radial_means.append(float(np.mean(gray[mask])))
113
- else:
114
- radial_means.append(0)
115
-
116
- radial_means = np.array(radial_means)
117
- if radial_means[0] == 0:
118
- radial_means[0] = 1.0
119
-
120
- # Expected cos⁴(θ) model: I ∝ cos⁴(arctan(r/f))
121
- # Simplified: intensity should decrease toward edges
122
- normalized = radial_means / (radial_means[0] + 1e-9)
123
-
124
- # Fit cos⁴ model
125
- r_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
126
- theta = np.arctan(r_centers * 1.5) # ~35Β° max field angle
127
- cos4_model = np.cos(theta) ** 4
128
-
129
- residual = float(np.mean((normalized - cos4_model) ** 2))
130
-
131
- # Monotonic decrease check
132
- diffs = np.diff(normalized)
133
- non_decreasing_frac = float(np.sum(diffs > 0.02) / len(diffs))
134
-
135
- if residual < 0.01 and non_decreasing_frac < 0.3:
136
- score = -0.3
137
- note = "Natural vignetting pattern (cos⁴θ consistent)"
138
- elif residual > 0.05 or non_decreasing_frac > 0.5:
139
- score = 0.4
140
- note = "Unnatural brightness profile (vignetting absent or inconsistent)"
141
- else:
142
- score = 0.1
143
- note = "Mild vignetting deviation"
144
-
145
- return {
146
- "test": "Vignetting (cos⁴θ)",
147
- "cos4_residual": round(residual, 5),
148
- "non_decreasing_fraction": round(non_decreasing_frac, 3),
149
- "score": score,
150
- "note": note,
151
- "radial_profile": normalized.tolist(),
152
- }
153
-
154
-
155
- # ─── Depth of Field Consistency ─────────────────────────────────────
156
- def analyze_dof_consistency(img: Image.Image) -> Dict[str, Any]:
157
- """
158
- Real depth-of-field blur varies smoothly with distance.
159
- AI images often have inconsistent blur regions.
160
- """
161
- gray = _to_gray(img)
162
- h, w = gray.shape
163
-
164
- # Local blur estimation via Laplacian variance in blocks
165
- block_size = max(h, w) // 16
166
- if block_size < 8:
167
- block_size = 8
168
-
169
- blur_map = np.zeros((h // block_size, w // block_size))
170
- for bi in range(blur_map.shape[0]):
171
- for bj in range(blur_map.shape[1]):
172
- y0, y1 = bi * block_size, (bi + 1) * block_size
173
- x0, x1 = bj * block_size, (bj + 1) * block_size
174
- block = gray[y0:y1, x0:x1]
175
- # Laplacian variance = sharpness metric
176
- lap = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]], dtype=np.float64)
177
- from scipy.signal import convolve2d
178
- laplacian = convolve2d(block, lap, mode="valid")
179
- blur_map[bi, bj] = float(np.var(laplacian))
180
-
181
- # Smooth blur map
182
- if blur_map.size > 1:
183
- smooth_blur = gaussian_filter(blur_map, sigma=1.0)
184
- # Inconsistency = high-frequency variation in blur map
185
- blur_residual = blur_map - smooth_blur
186
- inconsistency = float(np.std(blur_residual) / (np.mean(blur_map) + 1e-9))
187
- else:
188
- inconsistency = 0.0
189
-
190
- if inconsistency < 0.3:
191
- score = -0.3
192
- note = "Smooth depth-of-field gradient (consistent with real optics)"
193
- elif inconsistency > 0.7:
194
- score = 0.5
195
- note = "Abrupt blur transitions (inconsistent DoF, possible manipulation)"
196
- else:
197
- score = 0.1
198
- note = "Moderate DoF variation"
199
-
200
- return {
201
- "test": "Depth-of-Field Consistency",
202
- "blur_inconsistency": round(inconsistency, 4),
203
- "score": score,
204
- "note": note,
205
- "blur_map": blur_map,
206
- }
207
-
208
-
209
- # ─── Bokeh Microstructure ───────────────────────────────────────────
210
- def analyze_bokeh(img: Image.Image) -> Dict[str, Any]:
211
- """
212
- Real bokeh shows aperture blade polygon structure in Fourier domain.
213
- AI bokeh is typically smooth circles without blade structure.
214
- """
215
- gray = _to_gray(img)
216
-
217
- # Find bright point-like regions (potential bokeh)
218
- threshold = np.percentile(gray, 97)
219
- bright_mask = gray > threshold
220
-
221
- if np.sum(bright_mask) < 100:
222
- return {
223
- "test": "Bokeh Microstructure",
224
- "score": 0.0,
225
- "note": "Insufficient bright highlights for bokeh analysis",
226
- "bokeh_found": False,
227
- }
228
-
229
- # Extract largest bright region
230
- from scipy.ndimage import label
231
- labeled, n_features = label(bright_mask)
232
- if n_features == 0:
233
- return {
234
- "test": "Bokeh Microstructure",
235
- "score": 0.0,
236
- "note": "No bokeh regions detected",
237
- "bokeh_found": False,
238
- }
239
-
240
- # Get largest blob
241
- sizes = [np.sum(labeled == i) for i in range(1, n_features + 1)]
242
- largest = np.argmax(sizes) + 1
243
- blob_mask = labeled == largest
244
-
245
- # Crop around blob
246
- ys, xs = np.where(blob_mask)
247
- y0, y1, x0, x1 = ys.min(), ys.max(), xs.min(), xs.max()
248
- patch = gray[y0:y1 + 1, x0:x1 + 1]
249
-
250
- if patch.shape[0] < 8 or patch.shape[1] < 8:
251
- return {
252
- "test": "Bokeh Microstructure",
253
- "score": 0.0,
254
- "note": "Bokeh regions too small for analysis",
255
- "bokeh_found": False,
256
- }
257
-
258
- # FFT of patch β€” look for angular structure (blade signatures)
259
- fft = np.fft.fft2(patch)
260
- fft_shift = np.fft.fftshift(fft)
261
- magnitude = np.log(np.abs(fft_shift) + 1)
262
-
263
- # Angular variance in FFT (high = blade structure, low = smooth circle)
264
- cy, cx = magnitude.shape[0] // 2, magnitude.shape[1] // 2
265
- angles = np.arctan2(
266
- np.mgrid[0:magnitude.shape[0], 0:magnitude.shape[1]][0] - cy,
267
- np.mgrid[0:magnitude.shape[0], 0:magnitude.shape[1]][1] - cx,
268
- )
269
- n_angular_bins = 12
270
- angular_profile = []
271
- for k in range(n_angular_bins):
272
- a0 = -np.pi + k * (2 * np.pi / n_angular_bins)
273
- a1 = a0 + (2 * np.pi / n_angular_bins)
274
- amask = (angles >= a0) & (angles < a1)
275
- angular_profile.append(float(np.mean(magnitude[amask])) if amask.any() else 0)
276
-
277
- angular_var = float(np.var(angular_profile))
278
-
279
- if angular_var > 0.1:
280
- score = -0.2
281
- note = "Aperture blade structure detected in bokeh (consistent with real lens)"
282
- else:
283
- score = 0.3
284
- note = "Smooth circular bokeh without blade structure (possible AI generation)"
285
-
286
- return {
287
- "test": "Bokeh Microstructure",
288
- "angular_variance": round(angular_var, 4),
289
- "score": score,
290
- "note": note,
291
- "bokeh_found": True,
292
- }
293
-
294
-
295
- # ─── Lens Distortion Analysis ────────────────────────────────────────
296
- def analyze_lens_distortion(img: Image.Image) -> Dict[str, Any]:
297
- """
298
- Real lenses produce barrel/pincushion distortion following Brown-Conrady model.
299
- AI images often have perfectly rectilinear geometry or impossible distortion.
300
- """
301
- gray = _to_gray(img)
302
- h, w = gray.shape
303
-
304
- # Edge detection
305
- ex = sobel(gray, axis=1)
306
- ey = sobel(gray, axis=0)
307
- edge_mag = np.hypot(ex, ey)
308
-
309
- # Threshold strong edges
310
- threshold = np.percentile(edge_mag, 90)
311
- strong_edges = edge_mag > threshold
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
312
 
313
- # Analyze edge straightness in radial bands
314
- cy, cx = h / 2, w / 2
315
- Y, X = np.mgrid[0:h, 0:w]
316
- R = np.sqrt((X - cx) ** 2 + (Y - cy) ** 2)
317
- R_max = np.sqrt(cx ** 2 + cy ** 2)
318
- R_norm = R / R_max
319
-
320
- # Compare edge density at different radial distances
321
- inner_edges = float(np.mean(strong_edges[R_norm < 0.3]))
322
- mid_edges = float(np.mean(strong_edges[(R_norm >= 0.3) & (R_norm < 0.7)]))
323
- outer_edges = float(np.mean(strong_edges[R_norm >= 0.7]))
324
-
325
- # Real lenses: edges slightly softer at corners due to distortion
326
- # AI: uniform edge sharpness across frame
327
- edge_ratio = outer_edges / (inner_edges + 1e-9)
328
-
329
- if 0.5 < edge_ratio < 0.9:
330
- score = -0.3
331
- note = f"Natural edge falloff at periphery (ratio={edge_ratio:.3f}, lens distortion present)"
332
- elif edge_ratio > 0.95:
333
- score = 0.3
334
- note = f"Unnaturally uniform edges across frame (ratio={edge_ratio:.3f}, no lens distortion)"
335
- else:
336
- score = 0.1
337
- note = f"Edge distribution ratio={edge_ratio:.3f}"
338
-
339
- return {
340
- "test": "Lens Distortion",
341
- "edge_ratio_outer_inner": round(edge_ratio, 4),
342
- "inner_edge_density": round(inner_edges, 4),
343
- "outer_edge_density": round(outer_edges, 4),
344
- "score": score,
345
- "note": note,
346
- }
347
-
348
-
349
- # ─── CA Radial Pattern Analysis ─────────────────────────────────────
350
- def analyze_ca_radial_pattern(img: Image.Image) -> Dict[str, Any]:
351
- """
352
- Real chromatic aberration increases radially from center (more at corners).
353
- AI images have spatially uniform or random channel misregistration.
354
- """
355
- rgb = _to_rgb(img)
356
- h, w, _ = rgb.shape
357
- cy, cx = h / 2, w / 2
358
-
359
- r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
360
-
361
- # Compute local channel difference in blocks
362
- block_size = max(32, min(h, w) // 8)
363
- center_diffs = []
364
- edge_diffs = []
365
-
366
- Y, X = np.mgrid[0:h, 0:w]
367
- R = np.sqrt((X - cx) ** 2 + (Y - cy) ** 2)
368
- R_max = np.sqrt(cx ** 2 + cy ** 2)
369
-
370
- for bi in range(0, h - block_size, block_size):
371
- for bj in range(0, w - block_size, block_size):
372
- block_r = r[bi:bi + block_size, bj:bj + block_size]
373
- block_g = g[bi:bi + block_size, bj:bj + block_size]
374
- block_b = b[bi:bi + block_size, bj:bj + block_size]
375
-
376
- # Local RG difference as proxy for CA
377
- rg_diff = float(np.std(block_r - block_g))
378
- rb_diff = float(np.std(block_r - block_b))
379
- ca_magnitude = (rg_diff + rb_diff) / 2
380
-
381
- block_center_r = R[bi + block_size // 2, bj + block_size // 2] / R_max
382
-
383
- if block_center_r < 0.4:
384
- center_diffs.append(ca_magnitude)
385
- elif block_center_r > 0.6:
386
- edge_diffs.append(ca_magnitude)
387
-
388
- if center_diffs and edge_diffs:
389
- center_ca = float(np.mean(center_diffs))
390
- edge_ca = float(np.mean(edge_diffs))
391
- ca_increase = edge_ca / (center_ca + 1e-9)
392
-
393
- # Real lenses: CA increases toward edges (ratio > 1.1)
394
- if ca_increase > 1.15:
395
- score = -0.3
396
- note = f"CA increases radially (edge/center={ca_increase:.2f}, natural lens behavior)"
397
- elif ca_increase < 0.9:
398
- score = 0.3
399
- note = f"CA decreases toward edges (ratio={ca_increase:.2f}, unnatural)"
400
- else:
401
- score = 0.1
402
- note = f"Flat CA distribution (ratio={ca_increase:.2f})"
403
- else:
404
- ca_increase = 1.0
405
- score = 0.0
406
- note = "Insufficient data for radial CA analysis"
407
-
408
- return {
409
- "test": "CA Radial Pattern",
410
- "ca_edge_center_ratio": round(ca_increase, 4),
411
- "score": score,
412
- "note": note,
413
- }
414
-
415
-
416
- # ─── Specular Reflection Map ────────────────────────────────────────
417
- def analyze_specular_reflections(img: Image.Image) -> Dict[str, Any]:
418
- """
419
- Real specular reflections follow Phong/Blinn-Phong model with
420
- consistent highlight shapes. AI often has inconsistent specularity.
421
- """
422
- rgb = _to_rgb(img)
423
- gray = np.mean(rgb, axis=-1)
424
-
425
- # Detect specular highlights (very bright, near-white pixels)
426
- highlight_threshold = np.percentile(gray, 98)
427
- highlight_mask = gray > highlight_threshold
428
-
429
- # Compute saturation (low saturation = specular)
430
- max_c = np.max(rgb, axis=-1)
431
- min_c = np.min(rgb, axis=-1)
432
- saturation = (max_c - min_c) / (max_c + 1e-9)
433
-
434
- specular_mask = highlight_mask & (saturation < 0.2)
435
- n_specular = int(np.sum(specular_mask))
436
- specular_fraction = float(n_specular / (gray.size + 1e-9))
437
-
438
- if n_specular < 50:
439
- return {
440
- "test": "Specular Reflections",
441
- "score": 0.0,
442
- "note": "Insufficient specular highlights for analysis",
443
- "specular_count": n_specular,
444
- }
445
-
446
- # Check if specular highlights are compact (real) vs diffuse (AI)
447
- from scipy.ndimage import label
448
- labeled, n_features = label(specular_mask)
449
- if n_features > 0:
450
- sizes = [int(np.sum(labeled == i)) for i in range(1, min(n_features + 1, 100))]
451
- avg_size = float(np.mean(sizes))
452
- size_std = float(np.std(sizes))
453
- size_cv = size_std / (avg_size + 1e-9) # coefficient of variation
454
- else:
455
- size_cv = 0.0
456
- avg_size = 0.0
457
-
458
- # Real highlights: varied sizes (large CV), AI: uniform sizes
459
- if size_cv > 1.0:
460
- score = -0.2
461
- note = f"Varied specular highlight sizes (CV={size_cv:.2f}, natural)"
462
- elif size_cv < 0.3 and n_features > 3:
463
- score = 0.3
464
- note = f"Suspiciously uniform highlight sizes (CV={size_cv:.2f})"
465
- else:
466
- score = 0.0
467
- note = f"Specular analysis neutral (CV={size_cv:.2f})"
468
-
469
- return {
470
- "test": "Specular Reflections",
471
- "specular_count": n_specular,
472
- "n_highlights": n_features,
473
- "size_cv": round(size_cv, 4),
474
- "avg_size": round(avg_size, 2),
475
- "score": score,
476
- "note": note,
477
- }
478
-
479
-
480
- # ─── Purple Fringing Detection ──────────────────────────────────────
481
- def analyze_purple_fringing(img: Image.Image) -> Dict[str, Any]:
482
- """
483
- Real cameras exhibit purple/magenta fringing at high-contrast edges
484
- due to chromatic aberration. AI images rarely reproduce this artifact.
485
- """
486
- rgb = _to_rgb(img)
487
- gray = np.mean(rgb, axis=-1)
488
-
489
- # Find high-contrast edges
490
- edge = np.hypot(sobel(gray, axis=0), sobel(gray, axis=1))
491
- edge_mask = edge > np.percentile(edge, 95)
492
-
493
- # Check for purple/magenta hue at edges
494
- r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
495
-
496
- # Purple = high R, low G, high B
497
- purple_score_map = (r + b - 2 * g) / (r + g + b + 1e-9)
498
- edge_purple = purple_score_map[edge_mask]
499
-
500
- if len(edge_purple) < 100:
501
- return {
502
- "test": "Purple Fringing",
503
- "score": 0.0,
504
- "note": "Insufficient high-contrast edges for fringing analysis",
505
- }
506
-
507
- mean_purple = float(np.mean(edge_purple))
508
- purple_fraction = float(np.mean(edge_purple > 0.1))
509
-
510
- if purple_fraction > 0.05:
511
- score = -0.3
512
- note = f"Purple fringing detected at {purple_fraction:.1%} of edges (real lens artifact)"
513
- elif purple_fraction < 0.01 and mean_purple < 0.02:
514
- score = 0.2
515
- note = "No purple fringing (uncommon in real photography, possible AI)"
516
- else:
517
- score = 0.0
518
- note = f"Minimal fringing (fraction={purple_fraction:.3f})"
519
-
520
- return {
521
- "test": "Purple Fringing",
522
- "purple_fraction": round(purple_fraction, 4),
523
- "mean_purple_score": round(mean_purple, 4),
524
- "score": score,
525
- "note": note,
526
- }
527
-
528
-
529
- # ─── Main Agent Entry Point ─────────────────────────────────────────
530
  def run_optical_agent(img: Image.Image) -> AgentEvidence:
531
- """Run all optical physics tests and produce structured evidence."""
532
- findings = []
533
- scores = []
534
-
535
- tests = [
536
- analyze_chromatic_aberration,
537
- analyze_vignetting,
538
- analyze_dof_consistency,
539
- analyze_bokeh,
540
- analyze_lens_distortion,
541
- analyze_ca_radial_pattern,
542
- analyze_specular_reflections,
543
- analyze_purple_fringing,
544
- ]
545
-
546
- for fn in tests:
547
  try:
548
- result = fn(img)
549
- findings.append(result)
550
- scores.append(result["score"])
551
  except Exception as e:
552
- findings.append({"test": fn.__name__, "error": str(e), "score": 0})
553
-
554
- if scores:
555
- avg_score = float(np.mean(scores))
556
- confidence = min(1.0, 0.5 + 0.5 * abs(avg_score))
557
- else:
558
- avg_score = 0.0
559
- confidence = 0.1
560
-
561
- # Build rationale
562
- violations = [f["test"] for f in findings if f.get("score", 0) > 0.2]
563
- compliant = [f["test"] for f in findings if f.get("score", 0) < -0.1]
564
-
565
- if violations:
566
- rationale = f"Optical violations detected in: {', '.join(violations)}."
567
- elif compliant:
568
- rationale = f"Optical physics consistent: {', '.join(compliant)}."
569
- else:
570
- rationale = "Optical analysis inconclusive."
571
-
572
  for f in findings:
573
- if f.get("note"):
574
- rationale += f" [{f['test']}]: {f['note']}."
575
-
576
- return AgentEvidence(
577
- agent_name="Optical Physics Agent",
578
- violation_score=np.clip(avg_score, -1, 1),
579
- confidence=confidence,
580
- failure_prob=max(0.0, 1.0 - len(scores) / len(tests)),
581
- rationale=rationale,
582
- sub_findings=findings,
583
- )
 
1
  """
2
+ FORENSIQ β€” Optical Physics Agent (20 features)
3
+ Tests violations of lens and optical physics.
 
 
 
 
4
  """
5
 
6
  import numpy as np
7
  from PIL import Image
8
+ from scipy.ndimage import sobel, gaussian_filter, uniform_filter, label, median_filter, maximum_filter
9
+ from scipy.signal import find_peaks, convolve2d
10
  from dataclasses import dataclass, field
11
  from typing import List, Dict, Any, Optional
12
 
13
 
14
  @dataclass
15
  class AgentEvidence:
 
16
  agent_name: str
17
+ violation_score: float
18
+ confidence: float
19
+ failure_prob: float
20
  rationale: str
21
  sub_findings: List[Dict[str, Any]] = field(default_factory=list)
22
+ visual_evidence: Optional[Any] = None
23
 
24
 
25
+ def _g(img): return np.array(img.convert("L")).astype(np.float64)
26
+ def _rgb(img): return np.array(img.convert("RGB")).astype(np.float64)
27
 
28
+ # ── 1. Chromatic Aberration Magnitude ────────────────────────────────
29
+ def f01_ca_magnitude(img: Image.Image) -> Dict[str, Any]:
30
+ rgb = _rgb(img); r,g,b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
 
 
 
 
 
 
 
 
 
 
 
 
31
  edges = {}
32
+ for n,c in [("R",r),("G",g),("B",b)]:
33
+ edges[n] = np.hypot(sobel(c,0), sobel(c,1))
34
+ er,eg,eb = edges["R"].ravel(), edges["G"].ravel(), edges["B"].ravel()
35
+ step = max(1, len(er)//200000)
36
+ rg = float(np.corrcoef(er[::step],eg[::step])[0,1])
37
+ rb = float(np.corrcoef(er[::step],eb[::step])[0,1])
38
+ gb = float(np.corrcoef(eg[::step],eb[::step])[0,1])
39
+ avg = (rg+rb+gb)/3
40
+ if avg > 0.99: s,n = 0.6, "Perfect channel alignment β€” no natural CA"
41
+ elif avg < 0.70: s,n = 0.5, "Abnormally low channel correlation"
42
+ elif 0.80<=avg<=0.97: s,n = -0.4, "Natural CA pattern detected"
43
+ else: s,n = 0.2, "Borderline CA"
44
+ return {"test":"CA Magnitude","avg_corr":round(avg,4),"score":s,"note":n}
45
+
46
+ # ── 2. CA Radial Gradient ────────────────────────────────────────────
47
+ def f02_ca_radial(img: Image.Image) -> Dict[str, Any]:
48
+ rgb = _rgb(img); h,w,_ = rgb.shape; cy,cx = h/2,w/2
49
+ r,g,b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
50
+ bs = max(32,min(h,w)//8)
51
+ Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2); Rm = np.sqrt(cx**2+cy**2)
52
+ cen,edg = [],[]
53
+ for bi in range(0,h-bs,bs):
54
+ for bj in range(0,w-bs,bs):
55
+ ca = (float(np.std(r[bi:bi+bs,bj:bj+bs]-g[bi:bi+bs,bj:bj+bs]))+float(np.std(r[bi:bi+bs,bj:bj+bs]-b[bi:bi+bs,bj:bj+bs])))/2
56
+ rd = R[bi+bs//2,bj+bs//2]/Rm
57
+ if rd<0.4: cen.append(ca)
58
+ elif rd>0.6: edg.append(ca)
59
+ if cen and edg:
60
+ ratio = float(np.mean(edg))/(float(np.mean(cen))+1e-9)
61
+ if ratio>1.15: s,n = -0.3, f"CA increases radially ({ratio:.2f}) β€” real lens"
62
+ elif ratio<0.9: s,n = 0.3, f"CA decreases toward edges ({ratio:.2f}) β€” unnatural"
63
+ else: s,n = 0.1, f"Flat CA ({ratio:.2f})"
64
+ else: ratio=1.0; s,n = 0.0, "Insufficient data"
65
+ return {"test":"CA Radial Gradient","ratio":round(ratio,4),"score":s,"note":n}
66
+
67
+ # ── 3. Lateral CA (Red-Blue Shift) ───────────────────────────────────
68
+ def f03_lateral_ca(img: Image.Image) -> Dict[str, Any]:
69
+ rgb = _rgb(img); h,w,_ = rgb.shape
70
+ r_edge = np.hypot(sobel(rgb[:,:,0],0),sobel(rgb[:,:,0],1))
71
+ b_edge = np.hypot(sobel(rgb[:,:,2],0),sobel(rgb[:,:,2],1))
72
+ # Compare edge positions β€” real lenses shift R and B in opposite radial directions
73
+ r_centroid_y = float(np.average(np.arange(h), weights=np.sum(r_edge,axis=1)+1e-9))
74
+ b_centroid_y = float(np.average(np.arange(h), weights=np.sum(b_edge,axis=1)+1e-9))
75
+ shift = abs(r_centroid_y - b_centroid_y)
76
+ norm_shift = shift / (h+1e-9)
77
+ if 0.001 < norm_shift < 0.02: s,n = -0.3, f"Natural lateral CA shift ({norm_shift:.4f})"
78
+ elif norm_shift < 0.0005: s,n = 0.3, f"Zero lateral CA ({norm_shift:.4f}) β€” synthetic"
79
+ elif norm_shift > 0.03: s,n = 0.3, f"Excessive CA shift ({norm_shift:.4f}) β€” unnatural"
80
+ else: s,n = 0.0, f"Borderline lateral CA ({norm_shift:.4f})"
81
+ return {"test":"Lateral CA","shift":round(norm_shift,6),"score":s,"note":n}
82
+
83
+ # ── 4. Vignetting cos⁴θ ─────────────────────────────────────────────
84
+ def f04_vignetting(img: Image.Image) -> Dict[str, Any]:
85
+ gray = _g(img); h,w = gray.shape; cy,cx = h/2,w/2
86
+ Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2); Rm = np.sqrt(cx**2+cy**2)
87
+ Rn = R/Rm; nbins = 20; be = np.linspace(0,1,nbins+1)
88
+ rm = np.array([float(np.mean(gray[(Rn>=be[i])&(Rn<be[i+1])])) if np.any((Rn>=be[i])&(Rn<be[i+1])) else 0 for i in range(nbins)])
89
+ if rm[0]==0: rm[0]=1.0
90
+ norm = rm/(rm[0]+1e-9)
91
+ rc = (be[:-1]+be[1:])/2; theta = np.arctan(rc*1.5); cos4 = np.cos(theta)**4
92
+ res = float(np.mean((norm-cos4)**2))
93
+ ndf = float(np.sum(np.diff(norm)>0.02)/len(np.diff(norm)))
94
+ if res<0.01 and ndf<0.3: s,n = -0.3, f"Natural vignetting (cos⁴θ residual={res:.5f})"
95
+ elif res>0.05 or ndf>0.5: s,n = 0.4, f"Absent/inconsistent vignetting (res={res:.5f})"
96
+ else: s,n = 0.1, f"Mild vignetting deviation (res={res:.5f})"
97
+ return {"test":"Vignetting cos⁴θ","residual":round(res,5),"score":s,"note":n}
98
+
99
+ # ── 5. Vignetting Symmetry ──────────────────────────────────────────
100
+ def f05_vignetting_symmetry(img: Image.Image) -> Dict[str, Any]:
101
+ gray = _g(img); h,w = gray.shape
102
+ q1 = float(np.mean(gray[:h//2,:w//2])); q2 = float(np.mean(gray[:h//2,w//2:]))
103
+ q3 = float(np.mean(gray[h//2:,:w//2])); q4 = float(np.mean(gray[h//2:,w//2:]))
104
+ qs = [q1,q2,q3,q4]; std = float(np.std(qs)); mean = float(np.mean(qs))
105
+ asym = std/(mean+1e-9)
106
+ if asym < 0.03: s,n = -0.2, f"Symmetric brightness (asym={asym:.4f}) β€” real optics"
107
+ elif asym > 0.1: s,n = 0.3, f"Asymmetric brightness (asym={asym:.4f}) β€” manipulation"
108
+ else: s,n = 0.0, f"Moderate asymmetry ({asym:.4f})"
109
+ return {"test":"Vignetting Symmetry","asymmetry":round(asym,4),"score":s,"note":n}
110
+
111
+ # ── 6. DoF Consistency ───────────────────────────────────────────────
112
+ def f06_dof(img: Image.Image) -> Dict[str, Any]:
113
+ gray = _g(img); h,w = gray.shape
114
+ bs = max(max(h,w)//16, 8)
115
+ lap = np.array([[0,1,0],[1,-4,1],[0,1,0]], dtype=np.float64)
116
+ bm = np.zeros((h//bs, w//bs))
117
+ for bi in range(bm.shape[0]):
118
+ for bj in range(bm.shape[1]):
119
+ block = gray[bi*bs:(bi+1)*bs, bj*bs:(bj+1)*bs]
120
+ bm[bi,bj] = float(np.var(convolve2d(block, lap, mode="valid")))
121
+ if bm.size>1:
122
+ sm = gaussian_filter(bm, sigma=1.0); inc = float(np.std(bm-sm)/(np.mean(bm)+1e-9))
123
+ else: inc = 0.0
124
+ if inc < 0.3: s,n = -0.3, f"Smooth DoF gradient (inc={inc:.4f})"
125
+ elif inc > 0.7: s,n = 0.5, f"Abrupt blur transitions (inc={inc:.4f})"
126
+ else: s,n = 0.1, f"Moderate DoF variation ({inc:.4f})"
127
+ return {"test":"DoF Consistency","inconsistency":round(inc,4),"score":s,"note":n,"blur_map":bm}
128
+
129
+ # ── 7. DoF Gradient Direction ────────────────────────────────────────
130
+ def f07_dof_gradient(img: Image.Image) -> Dict[str, Any]:
131
+ gray = _g(img); h,w = gray.shape; bs = max(32,max(h,w)//8)
132
+ lap = np.array([[0,1,0],[1,-4,1],[0,1,0]], dtype=np.float64)
133
+ sharpness = []
134
+ for bi in range(0,h-bs,bs):
135
+ row_sharp = []
136
+ for bj in range(0,w-bs,bs):
137
+ block = gray[bi:bi+bs,bj:bj+bs]
138
+ row_sharp.append(float(np.var(convolve2d(block,lap,mode="valid"))))
139
+ sharpness.append(row_sharp)
140
+ if not sharpness: return {"test":"DoF Gradient","score":0.0,"note":"Too small"}
141
+ sm = np.array(sharpness)
142
+ # Check if sharpness changes monotonically in some direction (real DoF)
143
+ row_means = np.mean(sm,axis=1)
144
+ if len(row_means)>2:
145
+ diffs = np.diff(row_means)
146
+ monotonic = float(max(np.sum(diffs>0), np.sum(diffs<0))/len(diffs))
147
+ else: monotonic = 0.5
148
+ if monotonic > 0.7: s,n = -0.2, f"Directional DoF gradient (monotonicity={monotonic:.2f})"
149
+ elif monotonic < 0.4: s,n = 0.2, f"Random sharpness variation ({monotonic:.2f})"
150
+ else: s,n = 0.0, f"Weak DoF gradient ({monotonic:.2f})"
151
+ return {"test":"DoF Gradient Direction","monotonicity":round(monotonic,3),"score":s,"note":n}
152
+
153
+ # ── 8. Bokeh Microstructure ──────────────────────────────────────────
154
+ def f08_bokeh(img: Image.Image) -> Dict[str, Any]:
155
+ gray = _g(img); thr = np.percentile(gray,97); bright = gray > thr
156
+ if np.sum(bright)<100: return {"test":"Bokeh Shape","score":0.0,"note":"No bokeh regions"}
157
+ labeled, nf = label(bright)
158
+ if nf==0: return {"test":"Bokeh Shape","score":0.0,"note":"No features"}
159
+ sizes = [int(np.sum(labeled==i)) for i in range(1,min(nf+1,50))]
160
+ largest = np.argmax(sizes)+1; ys,xs = np.where(labeled==largest)
161
+ patch = gray[ys.min():ys.max()+1, xs.min():xs.max()+1]
162
+ if patch.shape[0]<8 or patch.shape[1]<8: return {"test":"Bokeh Shape","score":0.0,"note":"Too small"}
163
+ fft = np.fft.fftshift(np.fft.fft2(patch)); mag = np.log(np.abs(fft)+1)
164
+ cy,cx = mag.shape[0]//2, mag.shape[1]//2
165
+ angles = np.arctan2(np.mgrid[0:mag.shape[0],0:mag.shape[1]][0]-cy, np.mgrid[0:mag.shape[0],0:mag.shape[1]][1]-cx)
166
+ ap = [float(np.mean(mag[(angles>=-np.pi+k*2*np.pi/12)&(angles<-np.pi+(k+1)*2*np.pi/12)])) for k in range(12)]
167
+ av = float(np.var(ap))
168
+ if av>0.1: s,n = -0.2, f"Aperture blade structure (var={av:.4f})"
169
+ else: s,n = 0.3, f"Smooth circular bokeh ({av:.4f}) β€” AI-like"
170
+ return {"test":"Bokeh Shape","angular_var":round(av,4),"score":s,"note":n}
171
+
172
+ # ── 9. Bokeh Chromatic ───────────────────────────────────────────────
173
+ def f09_bokeh_chromatic(img: Image.Image) -> Dict[str, Any]:
174
+ rgb = _rgb(img); gray = np.mean(rgb,axis=-1)
175
+ thr = np.percentile(gray,97); bright = gray > thr
176
+ if np.sum(bright)<50: return {"test":"Bokeh Chromatic","score":0.0,"note":"No highlights"}
177
+ r_bright = float(np.mean(rgb[:,:,0][bright]))
178
+ g_bright = float(np.mean(rgb[:,:,1][bright]))
179
+ b_bright = float(np.mean(rgb[:,:,2][bright]))
180
+ # Real bokeh: slight color fringing at edges of highlights
181
+ color_spread = float(np.std([r_bright,g_bright,b_bright]))/(float(np.mean([r_bright,g_bright,b_bright]))+1e-9)
182
+ if 0.01 < color_spread < 0.08: s,n = -0.2, f"Natural bokeh color fringing ({color_spread:.4f})"
183
+ elif color_spread < 0.005: s,n = 0.2, f"No chromatic bokeh ({color_spread:.4f})"
184
+ else: s,n = 0.0, f"Bokeh chromatic spread={color_spread:.4f}"
185
+ return {"test":"Bokeh Chromatic","spread":round(color_spread,4),"score":s,"note":n}
186
+
187
+ # ── 10. Lens Distortion ─────────────────────────────────────────────
188
+ def f10_distortion(img: Image.Image) -> Dict[str, Any]:
189
+ gray = _g(img); h,w = gray.shape
190
+ em = np.hypot(sobel(gray,1),sobel(gray,0)); thr = np.percentile(em,90); se = em>thr
191
+ cy,cx = h/2,w/2; Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2); Rm = np.sqrt(cx**2+cy**2); Rn=R/Rm
192
+ ie = float(np.mean(se[Rn<0.3])); oe = float(np.mean(se[Rn>=0.7]))
193
+ ratio = oe/(ie+1e-9)
194
+ if 0.5<ratio<0.9: s,n = -0.3, f"Peripheral edge softening ({ratio:.3f}) β€” lens distortion"
195
+ elif ratio>0.95: s,n = 0.3, f"Uniform edges ({ratio:.3f}) β€” no distortion"
196
+ else: s,n = 0.1, f"Edge ratio={ratio:.3f}"
197
+ return {"test":"Lens Distortion","ratio":round(ratio,4),"score":s,"note":n}
198
+
199
+ # ── 11. Field Curvature ─────────────────────────────────────────────
200
+ def f11_field_curvature(img: Image.Image) -> Dict[str, Any]:
201
+ gray = _g(img); h,w = gray.shape; bs = max(32,min(h,w)//8)
202
+ lap = np.array([[0,1,0],[1,-4,1],[0,1,0]],dtype=np.float64)
203
+ cy,cx = h/2,w/2; Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2)
204
+ Rm = np.sqrt(cx**2+cy**2)
205
+ center_sharp, mid_sharp, edge_sharp = [],[],[]
206
+ for bi in range(0,h-bs,bs):
207
+ for bj in range(0,w-bs,bs):
208
+ block = gray[bi:bi+bs,bj:bj+bs]
209
+ sh = float(np.var(convolve2d(block,lap,mode="valid")))
210
+ rd = R[bi+bs//2,bj+bs//2]/Rm
211
+ if rd<0.3: center_sharp.append(sh)
212
+ elif rd<0.6: mid_sharp.append(sh)
213
+ else: edge_sharp.append(sh)
214
+ if center_sharp and edge_sharp:
215
+ c = float(np.mean(center_sharp)); e = float(np.mean(edge_sharp))
216
+ m = float(np.mean(mid_sharp)) if mid_sharp else (c+e)/2
217
+ # Field curvature: mid-field sharper or softer than expected linear falloff
218
+ expected_mid = (c+e)/2; curvature = abs(m-expected_mid)/(c+1e-9)
219
+ if curvature > 0.1: s,n = -0.2, f"Field curvature detected ({curvature:.3f}) β€” real lens"
220
+ elif curvature < 0.02: s,n = 0.2, f"No field curvature ({curvature:.3f})"
221
+ else: s,n = 0.0, f"Mild curvature ({curvature:.3f})"
222
+ else: curvature=0; s,n = 0.0, "Insufficient data"
223
+ return {"test":"Field Curvature","curvature":round(curvature,4),"score":s,"note":n}
224
+
225
+ # ── 12. MTF (Modulation Transfer Function) ──────────────────────────
226
+ def f12_mtf(img: Image.Image) -> Dict[str, Any]:
227
+ gray = _g(img); h,w = gray.shape
228
+ fft = np.abs(np.fft.fftshift(np.fft.fft2(gray)))
229
+ cy,cx = h//2,w//2
230
+ # Radial average of MTF
231
+ Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2).astype(int)
232
+ maxr = min(cy,cx); rp = np.zeros(maxr)
233
+ for r in range(maxr):
234
+ m = R==r
235
+ if m.any(): rp[r] = float(np.mean(fft[m]))
236
+ rp = rp/(rp[0]+1e-9)
237
+ # Real lenses: smooth MTF rolloff. AI: sharper cutoff or unusual bumps
238
+ if len(rp)>20:
239
+ smooth = gaussian_filter(rp,sigma=3); roughness = float(np.mean(np.abs(rp-smooth)))
240
+ half_idx = np.argmax(rp<0.5) if np.any(rp<0.5) else len(rp)
241
+ mtf50 = float(half_idx/maxr)
242
+ else: roughness=0; mtf50=0.5
243
+ if roughness < 0.02 and 0.1<mtf50<0.6: s,n = -0.2, f"Natural MTF rolloff (MTF50={mtf50:.3f})"
244
+ elif roughness > 0.05: s,n = 0.3, f"Irregular MTF ({roughness:.4f}) β€” AI artifacts"
245
+ else: s,n = 0.0, f"MTF50={mtf50:.3f}, roughness={roughness:.4f}"
246
+ return {"test":"MTF Analysis","mtf50":round(mtf50,4),"roughness":round(roughness,4),"score":s,"note":n}
247
+
248
+ # ── 13. Specular Reflection Consistency ──────────────────────────────
249
+ def f13_specular(img: Image.Image) -> Dict[str, Any]:
250
+ rgb = _rgb(img); gray = np.mean(rgb,axis=-1)
251
+ thr = np.percentile(gray,98); hmask = gray>thr
252
+ maxc = np.max(rgb,axis=-1); minc = np.min(rgb,axis=-1)
253
+ sat = (maxc-minc)/(maxc+1e-9)
254
+ spec = hmask & (sat<0.2); ns = int(np.sum(spec))
255
+ if ns<50: return {"test":"Specular Consistency","score":0.0,"note":"Few highlights"}
256
+ labeled,nf = label(spec)
257
+ if nf>0:
258
+ sizes = [int(np.sum(labeled==i)) for i in range(1,min(nf+1,100))]
259
+ cv = float(np.std(sizes))/(float(np.mean(sizes))+1e-9)
260
+ else: cv=0
261
+ if cv>1.0: s,n = -0.2, f"Varied highlight sizes (CV={cv:.2f}) β€” natural"
262
+ elif cv<0.3 and nf>3: s,n = 0.3, f"Uniform highlights (CV={cv:.2f})"
263
+ else: s,n = 0.0, f"Specular CV={cv:.2f}"
264
+ return {"test":"Specular Consistency","cv":round(cv,3),"count":nf,"score":s,"note":n}
265
+
266
+ # ── 14. Specular Color Temperature ───────────────────────────────────
267
+ def f14_specular_color(img: Image.Image) -> Dict[str, Any]:
268
+ rgb = _rgb(img); gray = np.mean(rgb,axis=-1)
269
+ thr = np.percentile(gray,99); hmask = gray>thr
270
+ if np.sum(hmask)<20: return {"test":"Specular Color Temp","score":0.0,"note":"Few highlights"}
271
+ r_mean = float(np.mean(rgb[:,:,0][hmask])); b_mean = float(np.mean(rgb[:,:,2][hmask]))
272
+ rb_ratio = r_mean/(b_mean+1e-9)
273
+ # Real light: highlights should reflect light source color consistently
274
+ # Multiple light sources = multiple highlight colors (OK)
275
+ # Uniform white = typical for AI
276
+ highlight_pixels = rgb[hmask]; color_std = float(np.std(highlight_pixels))
277
+ if color_std > 15: s,n = -0.2, f"Varied highlight colors (std={color_std:.1f}) β€” real"
278
+ elif color_std < 3: s,n = 0.3, f"Uniform white highlights (std={color_std:.1f})"
279
+ else: s,n = 0.0, f"Highlight color std={color_std:.1f}"
280
+ return {"test":"Specular Color Temp","color_std":round(color_std,2),"score":s,"note":n}
281
+
282
+ # ── 15. Purple Fringing ──────────────────────────────────────────────
283
+ def f15_purple_fringing(img: Image.Image) -> Dict[str, Any]:
284
+ rgb = _rgb(img); gray = np.mean(rgb,axis=-1)
285
+ edge = np.hypot(sobel(gray,0),sobel(gray,1)); emask = edge>np.percentile(edge,95)
286
+ r,g,b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
287
+ purple = (r+b-2*g)/(r+g+b+1e-9); ep = purple[emask]
288
+ if len(ep)<100: return {"test":"Purple Fringing","score":0.0,"note":"Few edges"}
289
+ pf = float(np.mean(ep>0.1))
290
+ if pf>0.05: s,n = -0.3, f"Purple fringing at {pf:.1%} of edges β€” real lens"
291
+ elif pf<0.01: s,n = 0.2, f"No fringing ({pf:.3f})"
292
+ else: s,n = 0.0, f"Minimal fringing ({pf:.3f})"
293
+ return {"test":"Purple Fringing","fraction":round(pf,4),"score":s,"note":n}
294
+
295
+ # ── 16. Lens Flare Physics ──────────────────────────────────────────
296
+ def f16_lens_flare(img: Image.Image) -> Dict[str, Any]:
297
+ rgb = _rgb(img); gray = np.mean(rgb,axis=-1); h,w = gray.shape
298
+ # Detect bright saturated blobs (potential flare)
299
+ sat_mask = gray > 250
300
+ if np.sum(sat_mask) < 20: return {"test":"Lens Flare","score":0.0,"note":"No saturated regions"}
301
+ labeled,nf = label(sat_mask)
302
+ if nf<2: return {"test":"Lens Flare","score":0.0,"note":"Insufficient flare candidates"}
303
+ # Real lens flare: blobs aligned on a line through center
304
+ centroids = []
305
+ for i in range(1,min(nf+1,20)):
306
+ ys,xs = np.where(labeled==i)
307
+ centroids.append((float(np.mean(ys)), float(np.mean(xs))))
308
+ if len(centroids)>=3:
309
+ # Check collinearity
310
+ pts = np.array(centroids); pts_c = pts - pts.mean(axis=0)
311
+ if pts_c.shape[0]>1:
312
+ _,s,_ = np.linalg.svd(pts_c); linearity = float(s[0]/(s[1]+1e-9))
313
+ else: linearity=1
314
+ if linearity>5: sc,nt = -0.2, f"Aligned flare elements (linearity={linearity:.1f}) β€” real"
315
+ else: sc,nt = 0.1, f"Scattered bright blobs ({linearity:.1f})"
316
+ else: sc,nt = 0.0, f"Few candidates ({len(centroids)})"
317
+ return {"test":"Lens Flare","score":sc,"note":nt}
318
+
319
+ # ── 17. Radial Sharpness Falloff ────────────────────────────────────
320
+ def f17_sharpness_falloff(img: Image.Image) -> Dict[str, Any]:
321
+ gray = _g(img); h,w = gray.shape
322
+ em = np.hypot(sobel(gray,0),sobel(gray,1))
323
+ cy,cx = h/2,w/2; Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2)
324
+ Rm = np.sqrt(cx**2+cy**2); Rn = R/Rm
325
+ bins = 10; be = np.linspace(0,1,bins+1)
326
+ rs = [float(np.mean(em[(Rn>=be[i])&(Rn<be[i+1])])) if np.any((Rn>=be[i])&(Rn<be[i+1])) else 0 for i in range(bins)]
327
+ rs = np.array(rs); rs = rs/(rs[0]+1e-9)
328
+ # Expect monotonic decrease
329
+ mono = float(np.sum(np.diff(rs)<0)/(len(rs)-1+1e-9))
330
+ if mono>0.7: s,n = -0.2, f"Natural sharpness falloff (monotonicity={mono:.2f})"
331
+ elif mono<0.4: s,n = 0.3, f"Random sharpness profile ({mono:.2f})"
332
+ else: s,n = 0.0, f"Moderate falloff ({mono:.2f})"
333
+ return {"test":"Sharpness Falloff","monotonicity":round(mono,3),"score":s,"note":n}
334
+
335
+ # ── 18. Diffraction Limit Check ─────────────────────────────────────
336
+ def f18_diffraction(img: Image.Image) -> Dict[str, Any]:
337
+ gray = _g(img); h,w = gray.shape
338
+ fft = np.abs(np.fft.fftshift(np.fft.fft2(gray)))
339
+ # Check for sharp high-frequency cutoff (diffraction-limited lens)
340
+ cy,cx = h//2,w//2; maxr = min(cy,cx)
341
+ Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2).astype(int)
342
+ rp = np.zeros(maxr)
343
+ for r in range(maxr):
344
+ m = R==r
345
+ if m.any(): rp[r] = float(np.mean(fft[m]))
346
+ rp_log = np.log(rp+1); rp_log = rp_log/(rp_log[0]+1e-9) if rp_log[0]>0 else rp_log
347
+ # Check slope at high freq
348
+ if maxr>20:
349
+ hf = rp_log[maxr*3//4:]; slope = float(np.mean(np.diff(hf)))
350
+ if slope < -0.01: s,n = -0.2, f"Sharp HF cutoff (slope={slope:.4f}) β€” diffraction limited"
351
+ elif abs(slope) < 0.001: s,n = 0.2, f"Flat HF spectrum ({slope:.4f}) β€” unusual"
352
+ else: s,n = 0.0, f"HF slope={slope:.4f}"
353
+ else: s,n = 0.0, "Image too small"
354
+ return {"test":"Diffraction Limit","score":s,"note":n}
355
+
356
+ # ── 19. Geometric Distortion Pattern ────────────────────────────────
357
+ def f19_geometric_distortion(img: Image.Image) -> Dict[str, Any]:
358
+ gray = _g(img); h,w = gray.shape
359
+ # Horizontal and vertical edge orientation distribution
360
+ gx = sobel(gray,axis=1); gy = sobel(gray,axis=0)
361
+ mag = np.hypot(gx,gy); strong = mag > np.percentile(mag,80)
362
+ angles = np.arctan2(gy[strong],gx[strong])
363
+ # Real images have dominant H/V edges; distortion bends them
364
+ hist,_ = np.histogram(angles, bins=36, range=(-np.pi,np.pi))
365
+ hist = hist.astype(float); hist /= (hist.sum()+1e-9)
366
+ # Check for peaks at 0, Β±Ο€/2
367
+ hv_energy = float(hist[0]+hist[9]+hist[18]+hist[27])/(hist.sum()+1e-9)
368
+ entropy_val = -float(np.sum(hist*np.log(hist+1e-9)))
369
+ if hv_energy > 0.3: s,n = -0.2, f"Strong H/V edge dominance ({hv_energy:.2f})"
370
+ elif entropy_val > 3.5: s,n = 0.2, f"Isotropic edges (entropy={entropy_val:.2f}) β€” unusual"
371
+ else: s,n = 0.0, f"Edge orientation entropy={entropy_val:.2f}"
372
+ return {"test":"Geometric Distortion","hv_energy":round(hv_energy,3),"score":s,"note":n}
373
+
374
+ # ── 20. Optical Center Estimation ────────────────────────────────────
375
+ def f20_optical_center(img: Image.Image) -> Dict[str, Any]:
376
+ gray = _g(img); h,w = gray.shape
377
+ # Estimate optical center from vignetting gradient
378
+ smoothed = gaussian_filter(gray, sigma=max(h,w)//10)
379
+ # Find brightest point (should be near geometric center for real cameras)
380
+ y_max, x_max = np.unravel_index(np.argmax(smoothed), smoothed.shape)
381
+ cy, cx = h/2, w/2
382
+ offset_y = abs(y_max - cy)/(h+1e-9); offset_x = abs(x_max - cx)/(w+1e-9)
383
+ offset = np.sqrt(offset_y**2 + offset_x**2)
384
+ if offset < 0.1: s,n = -0.2, f"Optical center near image center (offset={offset:.3f})"
385
+ elif offset < 0.25: s,n = 0.0, f"Slight optical center offset ({offset:.3f})"
386
+ else: s,n = 0.2, f"Optical center far from center ({offset:.3f})"
387
+ return {"test":"Optical Center","offset":round(offset,4),"score":s,"note":n}
388
+
389
+
390
+ # ═══ MAIN ENTRY ══════════════════════════════════════════════════════
391
+ ALL_TESTS = [f01_ca_magnitude,f02_ca_radial,f03_lateral_ca,f04_vignetting,
392
+ f05_vignetting_symmetry,f06_dof,f07_dof_gradient,f08_bokeh,
393
+ f09_bokeh_chromatic,f10_distortion,f11_field_curvature,f12_mtf,
394
+ f13_specular,f14_specular_color,f15_purple_fringing,f16_lens_flare,
395
+ f17_sharpness_falloff,f18_diffraction,f19_geometric_distortion,f20_optical_center]
396
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
397
  def run_optical_agent(img: Image.Image) -> AgentEvidence:
398
+ findings, scores = [], []
399
+ for fn in ALL_TESTS:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
400
  try:
401
+ r = fn(img); findings.append(r); scores.append(r["score"])
 
 
402
  except Exception as e:
403
+ findings.append({"test":fn.__name__,"error":str(e),"score":0})
404
+ avg = float(np.mean(scores)) if scores else 0.0
405
+ conf = min(1.0, 0.5+0.5*abs(avg))
406
+ viol = [f["test"] for f in findings if f.get("score",0)>0.2]
407
+ comp = [f["test"] for f in findings if f.get("score",0)<-0.1]
408
+ if viol: rat = f"Optical violations: {', '.join(viol)}."
409
+ elif comp: rat = f"Optical physics consistent: {', '.join(comp)}."
410
+ else: rat = "Optical analysis inconclusive."
 
 
 
 
 
 
 
 
 
 
 
 
411
  for f in findings:
412
+ if f.get("note"): rat += f" [{f['test']}]: {f['note']}."
413
+ return AgentEvidence("Optical Physics Agent", np.clip(avg,-1,1), conf,
414
+ max(0.0,1.0-len(scores)/len(ALL_TESTS)), rat, findings)