File size: 24,348 Bytes
b500bb5
0aa9fa4
 
b500bb5
 
 
 
0aa9fa4
 
b500bb5
 
 
 
 
 
 
0aa9fa4
 
 
b500bb5
 
0aa9fa4
b500bb5
 
0aa9fa4
 
b500bb5
0aa9fa4
 
 
b500bb5
0aa9fa4
 
 
 
 
 
 
 
8cd421b
0aa9fa4
 
8cd421b
0aa9fa4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cd421b
 
0aa9fa4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8cd421b
0aa9fa4
8cd421b
0aa9fa4
 
 
 
 
 
 
 
 
8917df2
6c17bfb
8cd421b
6c17bfb
8cd421b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
"""
FORENSIQ β€” Optical Physics Agent  (20 features)
Tests violations of lens and optical physics.
"""

import numpy as np
from PIL import Image
from scipy.ndimage import sobel, gaussian_filter, uniform_filter, label, median_filter, maximum_filter
from scipy.signal import find_peaks, convolve2d
from dataclasses import dataclass, field
from typing import List, Dict, Any, Optional


@dataclass
class AgentEvidence:
    agent_name: str
    violation_score: float
    confidence: float
    failure_prob: float
    rationale: str
    sub_findings: List[Dict[str, Any]] = field(default_factory=list)
    visual_evidence: Optional[Any] = None


def _g(img): return np.array(img.convert("L")).astype(np.float64)
def _rgb(img): return np.array(img.convert("RGB")).astype(np.float64)

# ── 1. Chromatic Aberration Magnitude ────────────────────────────────
def f01_ca_magnitude(img: Image.Image) -> Dict[str, Any]:
    rgb = _rgb(img); r,g,b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
    edges = {}
    for n,c in [("R",r),("G",g),("B",b)]:
        edges[n] = np.hypot(sobel(c,0), sobel(c,1))
    er,eg,eb = edges["R"].ravel(), edges["G"].ravel(), edges["B"].ravel()
    step = max(1, len(er)//200000)
    rg = float(np.corrcoef(er[::step],eg[::step])[0,1])
    rb = float(np.corrcoef(er[::step],eb[::step])[0,1])
    gb = float(np.corrcoef(eg[::step],eb[::step])[0,1])
    avg = (rg+rb+gb)/3
    if avg > 0.99:   s,n = 0.35, "Near-perfect channel alignment β€” weak CA indicator (modern diffusion models can produce CA)"
    elif avg < 0.70:  s,n = 0.5, "Abnormally low channel correlation"
    elif 0.80<=avg<=0.97: s,n = -0.4, "Natural CA pattern detected"
    else: s,n = 0.15, "Borderline CA"
    return {"test":"CA Magnitude","avg_corr":round(avg,4),"score":s,"note":n}

# ── 2. CA Radial Gradient ────────────────────────────────────────────
def f02_ca_radial(img: Image.Image) -> Dict[str, Any]:
    rgb = _rgb(img); h,w,_ = rgb.shape; cy,cx = h/2,w/2
    r,g,b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
    bs = max(32,min(h,w)//8)
    Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2); Rm = np.sqrt(cx**2+cy**2)
    cen,edg = [],[]
    for bi in range(0,h-bs,bs):
        for bj in range(0,w-bs,bs):
            ca = (float(np.std(r[bi:bi+bs,bj:bj+bs]-g[bi:bi+bs,bj:bj+bs]))+float(np.std(r[bi:bi+bs,bj:bj+bs]-b[bi:bi+bs,bj:bj+bs])))/2
            rd = R[bi+bs//2,bj+bs//2]/Rm
            if rd<0.4: cen.append(ca)
            elif rd>0.6: edg.append(ca)
    if cen and edg:
        ratio = float(np.mean(edg))/(float(np.mean(cen))+1e-9)
        if ratio>1.15: s,n = -0.3, f"CA increases radially ({ratio:.2f}) β€” real lens"
        elif ratio<0.9: s,n = 0.3, f"CA decreases toward edges ({ratio:.2f}) β€” unnatural"
        else: s,n = 0.1, f"Flat CA ({ratio:.2f})"
    else: ratio=1.0; s,n = 0.0, "Insufficient data"
    return {"test":"CA Radial Gradient","ratio":round(ratio,4),"score":s,"note":n}

# ── 3. Lateral CA (Red-Blue Shift) ───────────────────────────────────
def f03_lateral_ca(img: Image.Image) -> Dict[str, Any]:
    rgb = _rgb(img); h,w,_ = rgb.shape
    r_edge = np.hypot(sobel(rgb[:,:,0],0),sobel(rgb[:,:,0],1))
    b_edge = np.hypot(sobel(rgb[:,:,2],0),sobel(rgb[:,:,2],1))
    # Compare edge positions β€” real lenses shift R and B in opposite radial directions
    r_centroid_y = float(np.average(np.arange(h), weights=np.sum(r_edge,axis=1)+1e-9))
    b_centroid_y = float(np.average(np.arange(h), weights=np.sum(b_edge,axis=1)+1e-9))
    shift = abs(r_centroid_y - b_centroid_y)
    norm_shift = shift / (h+1e-9)
    if 0.001 < norm_shift < 0.02: s,n = -0.3, f"Natural lateral CA shift ({norm_shift:.4f})"
    elif norm_shift < 0.0005: s,n = 0.3, f"Zero lateral CA ({norm_shift:.4f}) β€” synthetic"
    elif norm_shift > 0.03: s,n = 0.3, f"Excessive CA shift ({norm_shift:.4f}) β€” unnatural"
    else: s,n = 0.0, f"Borderline lateral CA ({norm_shift:.4f})"
    return {"test":"Lateral CA","shift":round(norm_shift,6),"score":s,"note":n}

# ── 4. Vignetting cos⁴θ ─────────────────────────────────────────────
def f04_vignetting(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape; cy,cx = h/2,w/2
    Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2); Rm = np.sqrt(cx**2+cy**2)
    Rn = R/Rm; nbins = 20; be = np.linspace(0,1,nbins+1)
    rm = np.array([float(np.mean(gray[(Rn>=be[i])&(Rn<be[i+1])])) if np.any((Rn>=be[i])&(Rn<be[i+1])) else 0 for i in range(nbins)])
    if rm[0]==0: rm[0]=1.0
    norm = rm/(rm[0]+1e-9)
    rc = (be[:-1]+be[1:])/2; theta = np.arctan(rc*1.5); cos4 = np.cos(theta)**4
    res = float(np.mean((norm-cos4)**2))
    ndf = float(np.sum(np.diff(norm)>0.02)/len(np.diff(norm)))
    if res<0.01 and ndf<0.3: s,n = -0.3, f"Natural vignetting (cos⁴θ residual={res:.5f})"
    elif res>0.05 or ndf>0.5: s,n = 0.4, f"Absent/inconsistent vignetting (res={res:.5f})"
    else: s,n = 0.1, f"Mild vignetting deviation (res={res:.5f})"
    return {"test":"Vignetting cos⁴θ","residual":round(res,5),"score":s,"note":n}

# ── 5. Vignetting Symmetry ──────────────────────────────────────────
def f05_vignetting_symmetry(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape
    q1 = float(np.mean(gray[:h//2,:w//2])); q2 = float(np.mean(gray[:h//2,w//2:]))
    q3 = float(np.mean(gray[h//2:,:w//2])); q4 = float(np.mean(gray[h//2:,w//2:]))
    qs = [q1,q2,q3,q4]; std = float(np.std(qs)); mean = float(np.mean(qs))
    asym = std/(mean+1e-9)
    if asym < 0.03: s,n = -0.15, f"Symmetric brightness (asym={asym:.4f}) β€” real optics"
    elif asym > 0.2: s,n = 0.15, f"Asymmetric brightness (asym={asym:.4f}) β€” possible manipulation (but could be scene lighting)"
    else: s,n = 0.0, f"Moderate asymmetry ({asym:.4f})"
    return {"test":"Vignetting Symmetry","asymmetry":round(asym,4),"score":s,"note":n}

# ── 6. DoF Consistency ───────────────────────────────────────────────
def f06_dof(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape
    bs = max(max(h,w)//16, 8)
    lap = np.array([[0,1,0],[1,-4,1],[0,1,0]], dtype=np.float64)
    bm = np.zeros((h//bs, w//bs))
    for bi in range(bm.shape[0]):
        for bj in range(bm.shape[1]):
            block = gray[bi*bs:(bi+1)*bs, bj*bs:(bj+1)*bs]
            bm[bi,bj] = float(np.var(convolve2d(block, lap, mode="valid")))
    if bm.size>1:
        sm = gaussian_filter(bm, sigma=1.0); inc = float(np.std(bm-sm)/(np.mean(bm)+1e-9))
    else: inc = 0.0
    if inc < 0.3: s,n = -0.3, f"Smooth DoF gradient (inc={inc:.4f})"
    elif inc > 0.7: s,n = 0.5, f"Abrupt blur transitions (inc={inc:.4f})"
    else: s,n = 0.1, f"Moderate DoF variation ({inc:.4f})"
    return {"test":"DoF Consistency","inconsistency":round(inc,4),"score":s,"note":n,"blur_map":bm}

# ── 7. DoF Gradient Direction ────────────────────────────────────────
def f07_dof_gradient(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape; bs = max(32,max(h,w)//8)
    lap = np.array([[0,1,0],[1,-4,1],[0,1,0]], dtype=np.float64)
    sharpness = []
    for bi in range(0,h-bs,bs):
        row_sharp = []
        for bj in range(0,w-bs,bs):
            block = gray[bi:bi+bs,bj:bj+bs]
            row_sharp.append(float(np.var(convolve2d(block,lap,mode="valid"))))
        sharpness.append(row_sharp)
    if not sharpness: return {"test":"DoF Gradient","score":0.0,"note":"Too small"}
    sm = np.array(sharpness)
    # Check if sharpness changes monotonically in some direction (real DoF)
    row_means = np.mean(sm,axis=1)
    if len(row_means)>2:
        diffs = np.diff(row_means)
        monotonic = float(max(np.sum(diffs>0), np.sum(diffs<0))/len(diffs))
    else: monotonic = 0.5
    if monotonic > 0.7: s,n = -0.2, f"Directional DoF gradient (monotonicity={monotonic:.2f})"
    elif monotonic < 0.4: s,n = 0.2, f"Random sharpness variation ({monotonic:.2f})"
    else: s,n = 0.0, f"Weak DoF gradient ({monotonic:.2f})"
    return {"test":"DoF Gradient Direction","monotonicity":round(monotonic,3),"score":s,"note":n}

# ── 8. Bokeh Microstructure ──────────────────────────────────────────
def f08_bokeh(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); thr = np.percentile(gray,97); bright = gray > thr
    if np.sum(bright)<100: return {"test":"Bokeh Shape","score":0.0,"note":"No bokeh regions"}
    labeled, nf = label(bright)
    if nf==0: return {"test":"Bokeh Shape","score":0.0,"note":"No features"}
    sizes = [int(np.sum(labeled==i)) for i in range(1,min(nf+1,50))]
    largest = np.argmax(sizes)+1; ys,xs = np.where(labeled==largest)
    patch = gray[ys.min():ys.max()+1, xs.min():xs.max()+1]
    if patch.shape[0]<8 or patch.shape[1]<8: return {"test":"Bokeh Shape","score":0.0,"note":"Too small"}
    fft = np.fft.fftshift(np.fft.fft2(patch)); mag = np.log(np.abs(fft)+1)
    cy,cx = mag.shape[0]//2, mag.shape[1]//2
    angles = np.arctan2(np.mgrid[0:mag.shape[0],0:mag.shape[1]][0]-cy, np.mgrid[0:mag.shape[0],0:mag.shape[1]][1]-cx)
    ap = [float(np.mean(mag[(angles>=-np.pi+k*2*np.pi/12)&(angles<-np.pi+(k+1)*2*np.pi/12)])) for k in range(12)]
    av = float(np.var(ap))
    if av>0.1: s,n = -0.2, f"Aperture blade structure (var={av:.4f})"
    else: s,n = 0.3, f"Smooth circular bokeh ({av:.4f}) β€” AI-like"
    return {"test":"Bokeh Shape","angular_var":round(av,4),"score":s,"note":n}

# ── 9. Bokeh Chromatic ───────────────────────────────────────────────
def f09_bokeh_chromatic(img: Image.Image) -> Dict[str, Any]:
    rgb = _rgb(img); gray = np.mean(rgb,axis=-1)
    thr = np.percentile(gray,97); bright = gray > thr
    if np.sum(bright)<50: return {"test":"Bokeh Chromatic","score":0.0,"note":"No highlights"}
    r_bright = float(np.mean(rgb[:,:,0][bright]))
    g_bright = float(np.mean(rgb[:,:,1][bright]))
    b_bright = float(np.mean(rgb[:,:,2][bright]))
    # Real bokeh: slight color fringing at edges of highlights
    color_spread = float(np.std([r_bright,g_bright,b_bright]))/(float(np.mean([r_bright,g_bright,b_bright]))+1e-9)
    if 0.01 < color_spread < 0.08: s,n = -0.2, f"Natural bokeh color fringing ({color_spread:.4f})"
    elif color_spread < 0.005: s,n = 0.2, f"No chromatic bokeh ({color_spread:.4f})"
    else: s,n = 0.0, f"Bokeh chromatic spread={color_spread:.4f}"
    return {"test":"Bokeh Chromatic","spread":round(color_spread,4),"score":s,"note":n}

# ── 10. Lens Distortion ─────────────────────────────────────────────
def f10_distortion(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape
    em = np.hypot(sobel(gray,1),sobel(gray,0)); thr = np.percentile(em,90); se = em>thr
    cy,cx = h/2,w/2; Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2); Rm = np.sqrt(cx**2+cy**2); Rn=R/Rm
    ie = float(np.mean(se[Rn<0.3])); oe = float(np.mean(se[Rn>=0.7]))
    ratio = oe/(ie+1e-9)
    if 0.5<ratio<0.9: s,n = -0.3, f"Peripheral edge softening ({ratio:.3f}) β€” lens distortion"
    elif ratio>0.95: s,n = 0.3, f"Uniform edges ({ratio:.3f}) β€” no distortion"
    else: s,n = 0.1, f"Edge ratio={ratio:.3f}"
    return {"test":"Lens Distortion","ratio":round(ratio,4),"score":s,"note":n}

# ── 11. Field Curvature ─────────────────────────────────────────────
def f11_field_curvature(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape; bs = max(32,min(h,w)//8)
    lap = np.array([[0,1,0],[1,-4,1],[0,1,0]],dtype=np.float64)
    cy,cx = h/2,w/2; Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2)
    Rm = np.sqrt(cx**2+cy**2)
    center_sharp, mid_sharp, edge_sharp = [],[],[]
    for bi in range(0,h-bs,bs):
        for bj in range(0,w-bs,bs):
            block = gray[bi:bi+bs,bj:bj+bs]
            sh = float(np.var(convolve2d(block,lap,mode="valid")))
            rd = R[bi+bs//2,bj+bs//2]/Rm
            if rd<0.3: center_sharp.append(sh)
            elif rd<0.6: mid_sharp.append(sh)
            else: edge_sharp.append(sh)
    if center_sharp and edge_sharp:
        c = float(np.mean(center_sharp)); e = float(np.mean(edge_sharp))
        m = float(np.mean(mid_sharp)) if mid_sharp else (c+e)/2
        # Field curvature: mid-field sharper or softer than expected linear falloff
        expected_mid = (c+e)/2; curvature = abs(m-expected_mid)/(c+1e-9)
        if curvature > 0.1: s,n = -0.2, f"Field curvature detected ({curvature:.3f}) β€” real lens"
        elif curvature < 0.02: s,n = 0.2, f"No field curvature ({curvature:.3f})"
        else: s,n = 0.0, f"Mild curvature ({curvature:.3f})"
    else: curvature=0; s,n = 0.0, "Insufficient data"
    return {"test":"Field Curvature","curvature":round(curvature,4),"score":s,"note":n}

# ── 12. MTF (Modulation Transfer Function) ──────────────────────────
def f12_mtf(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape
    fft = np.abs(np.fft.fftshift(np.fft.fft2(gray)))
    cy,cx = h//2,w//2
    # Radial average of MTF
    Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2).astype(int)
    maxr = min(cy,cx); rp = np.zeros(maxr)
    for r in range(maxr):
        m = R==r
        if m.any(): rp[r] = float(np.mean(fft[m]))
    rp = rp/(rp[0]+1e-9)
    # Real lenses: smooth MTF rolloff. AI: sharper cutoff or unusual bumps
    if len(rp)>20:
        smooth = gaussian_filter(rp,sigma=3); roughness = float(np.mean(np.abs(rp-smooth)))
        half_idx = np.argmax(rp<0.5) if np.any(rp<0.5) else len(rp)
        mtf50 = float(half_idx/maxr)
    else: roughness=0; mtf50=0.5
    if roughness < 0.02 and 0.1<mtf50<0.6: s,n = -0.2, f"Natural MTF rolloff (MTF50={mtf50:.3f})"
    elif roughness > 0.05: s,n = 0.3, f"Irregular MTF ({roughness:.4f}) β€” AI artifacts"
    else: s,n = 0.0, f"MTF50={mtf50:.3f}, roughness={roughness:.4f}"
    return {"test":"MTF Analysis","mtf50":round(mtf50,4),"roughness":round(roughness,4),"score":s,"note":n}

# ── 13. Specular Reflection Consistency ──────────────────────────────
def f13_specular(img: Image.Image) -> Dict[str, Any]:
    rgb = _rgb(img); gray = np.mean(rgb,axis=-1)
    thr = np.percentile(gray,98); hmask = gray>thr
    maxc = np.max(rgb,axis=-1); minc = np.min(rgb,axis=-1)
    sat = (maxc-minc)/(maxc+1e-9)
    spec = hmask & (sat<0.2); ns = int(np.sum(spec))
    if ns<50: return {"test":"Specular Consistency","score":0.0,"note":"Few highlights"}
    labeled,nf = label(spec)
    if nf>0:
        sizes = [int(np.sum(labeled==i)) for i in range(1,min(nf+1,100))]
        cv = float(np.std(sizes))/(float(np.mean(sizes))+1e-9)
    else: cv=0
    if cv>1.0: s,n = -0.2, f"Varied highlight sizes (CV={cv:.2f}) β€” natural"
    elif cv<0.3 and nf>3: s,n = 0.3, f"Uniform highlights (CV={cv:.2f})"
    else: s,n = 0.0, f"Specular CV={cv:.2f}"
    return {"test":"Specular Consistency","cv":round(cv,3),"count":nf,"score":s,"note":n}

# ── 14. Specular Color Temperature ───────────────────────────────────
def f14_specular_color(img: Image.Image) -> Dict[str, Any]:
    rgb = _rgb(img); gray = np.mean(rgb,axis=-1)
    thr = np.percentile(gray,99); hmask = gray>thr
    if np.sum(hmask)<20: return {"test":"Specular Color Temp","score":0.0,"note":"Few highlights"}
    r_mean = float(np.mean(rgb[:,:,0][hmask])); b_mean = float(np.mean(rgb[:,:,2][hmask]))
    rb_ratio = r_mean/(b_mean+1e-9)
    # Real light: highlights should reflect light source color consistently
    # Multiple light sources = multiple highlight colors (OK)
    # Uniform white = typical for AI
    highlight_pixels = rgb[hmask]; color_std = float(np.std(highlight_pixels))
    if color_std > 15: s,n = -0.2, f"Varied highlight colors (std={color_std:.1f}) β€” real"
    elif color_std < 3: s,n = 0.3, f"Uniform white highlights (std={color_std:.1f})"
    else: s,n = 0.0, f"Highlight color std={color_std:.1f}"
    return {"test":"Specular Color Temp","color_std":round(color_std,2),"score":s,"note":n}

# ── 15. Purple Fringing ──────────────────────────────────────────────
def f15_purple_fringing(img: Image.Image) -> Dict[str, Any]:
    rgb = _rgb(img); gray = np.mean(rgb,axis=-1)
    edge = np.hypot(sobel(gray,0),sobel(gray,1)); emask = edge>np.percentile(edge,95)
    r,g,b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]
    purple = (r+b-2*g)/(r+g+b+1e-9); ep = purple[emask]
    if len(ep)<100: return {"test":"Purple Fringing","score":0.0,"note":"Few edges"}
    pf = float(np.mean(ep>0.1))
    if pf>0.05: s,n = -0.3, f"Purple fringing at {pf:.1%} of edges β€” real lens"
    elif pf<0.01: s,n = 0.2, f"No fringing ({pf:.3f})"
    else: s,n = 0.0, f"Minimal fringing ({pf:.3f})"
    return {"test":"Purple Fringing","fraction":round(pf,4),"score":s,"note":n}

# ── 16. Lens Flare Physics ──────────────────────────────────────────
def f16_lens_flare(img: Image.Image) -> Dict[str, Any]:
    rgb = _rgb(img); gray = np.mean(rgb,axis=-1); h,w = gray.shape
    # Detect bright saturated blobs (potential flare)
    sat_mask = gray > 250
    if np.sum(sat_mask) < 20: return {"test":"Lens Flare","score":0.0,"note":"No saturated regions"}
    labeled,nf = label(sat_mask)
    if nf<2: return {"test":"Lens Flare","score":0.0,"note":"Insufficient flare candidates"}
    # Real lens flare: blobs aligned on a line through center
    centroids = []
    for i in range(1,min(nf+1,20)):
        ys,xs = np.where(labeled==i)
        centroids.append((float(np.mean(ys)), float(np.mean(xs))))
    if len(centroids)>=3:
        # Check collinearity
        pts = np.array(centroids); pts_c = pts - pts.mean(axis=0)
        if pts_c.shape[0]>1:
            _,s,_ = np.linalg.svd(pts_c); linearity = float(s[0]/(s[1]+1e-9))
        else: linearity=1
        if linearity>5: sc,nt = -0.2, f"Aligned flare elements (linearity={linearity:.1f}) β€” real"
        else: sc,nt = 0.1, f"Scattered bright blobs ({linearity:.1f})"
    else: sc,nt = 0.0, f"Few candidates ({len(centroids)})"
    return {"test":"Lens Flare","score":sc,"note":nt}

# ── 17. Radial Sharpness Falloff ────────────────────────────────────
def f17_sharpness_falloff(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape
    em = np.hypot(sobel(gray,0),sobel(gray,1))
    cy,cx = h/2,w/2; Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2)
    Rm = np.sqrt(cx**2+cy**2); Rn = R/Rm
    bins = 10; be = np.linspace(0,1,bins+1)
    rs = [float(np.mean(em[(Rn>=be[i])&(Rn<be[i+1])])) if np.any((Rn>=be[i])&(Rn<be[i+1])) else 0 for i in range(bins)]
    rs = np.array(rs); rs = rs/(rs[0]+1e-9)
    # Expect monotonic decrease
    mono = float(np.sum(np.diff(rs)<0)/(len(rs)-1+1e-9))
    if mono>0.7: s,n = -0.2, f"Natural sharpness falloff (monotonicity={mono:.2f})"
    elif mono<0.4: s,n = 0.3, f"Random sharpness profile ({mono:.2f})"
    else: s,n = 0.0, f"Moderate falloff ({mono:.2f})"
    return {"test":"Sharpness Falloff","monotonicity":round(mono,3),"score":s,"note":n}

# ── 18. Diffraction Limit Check ─────────────────────────────────────
def f18_diffraction(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape
    fft = np.abs(np.fft.fftshift(np.fft.fft2(gray)))
    # Check for sharp high-frequency cutoff (diffraction-limited lens)
    cy,cx = h//2,w//2; maxr = min(cy,cx)
    Y,X = np.mgrid[0:h,0:w]; R = np.sqrt((X-cx)**2+(Y-cy)**2).astype(int)
    rp = np.zeros(maxr)
    for r in range(maxr):
        m = R==r
        if m.any(): rp[r] = float(np.mean(fft[m]))
    rp_log = np.log(rp+1); rp_log = rp_log/(rp_log[0]+1e-9) if rp_log[0]>0 else rp_log
    # Check slope at high freq
    if maxr>20:
        hf = rp_log[maxr*3//4:]; slope = float(np.mean(np.diff(hf)))
        if slope < -0.01: s,n = -0.2, f"Sharp HF cutoff (slope={slope:.4f}) β€” diffraction limited"
        elif abs(slope) < 0.001: s,n = 0.2, f"Flat HF spectrum ({slope:.4f}) β€” unusual"
        else: s,n = 0.0, f"HF slope={slope:.4f}"
    else: s,n = 0.0, "Image too small"
    return {"test":"Diffraction Limit","score":s,"note":n}

# ── 19. Geometric Distortion Pattern ────────────────────────────────
def f19_geometric_distortion(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape
    # Horizontal and vertical edge orientation distribution
    gx = sobel(gray,axis=1); gy = sobel(gray,axis=0)
    mag = np.hypot(gx,gy); strong = mag > np.percentile(mag,80)
    angles = np.arctan2(gy[strong],gx[strong])
    # Real images have dominant H/V edges; distortion bends them
    hist,_ = np.histogram(angles, bins=36, range=(-np.pi,np.pi))
    hist = hist.astype(float); hist /= (hist.sum()+1e-9)
    # Check for peaks at 0, Β±Ο€/2
    hv_energy = float(hist[0]+hist[9]+hist[18]+hist[27])/(hist.sum()+1e-9)
    entropy_val = -float(np.sum(hist*np.log(hist+1e-9)))
    if hv_energy > 0.3: s,n = -0.2, f"Strong H/V edge dominance ({hv_energy:.2f})"
    elif entropy_val > 3.5: s,n = 0.2, f"Isotropic edges (entropy={entropy_val:.2f}) β€” unusual"
    else: s,n = 0.0, f"Edge orientation entropy={entropy_val:.2f}"
    return {"test":"Geometric Distortion","hv_energy":round(hv_energy,3),"score":s,"note":n}

# ── 20. Optical Center Estimation ────────────────────────────────────
def f20_optical_center(img: Image.Image) -> Dict[str, Any]:
    gray = _g(img); h,w = gray.shape
    # Estimate optical center from vignetting gradient
    smoothed = gaussian_filter(gray, sigma=max(h,w)//10)
    # Find brightest point (should be near geometric center for real cameras)
    y_max, x_max = np.unravel_index(np.argmax(smoothed), smoothed.shape)
    cy, cx = h/2, w/2
    offset_y = abs(y_max - cy)/(h+1e-9); offset_x = abs(x_max - cx)/(w+1e-9)
    offset = np.sqrt(offset_y**2 + offset_x**2)
    if offset < 0.1: s,n = -0.1, f"Optical center near image center (offset={offset:.3f})"
    elif offset < 0.25: s,n = 0.0, f"Slight optical center offset ({offset:.3f})"
    else: s,n = 0.1, f"Optical center offset ({offset:.3f}) β€” unreliable for scenes with bright objects"
    return {"test":"Optical Center","offset":round(offset,4),"score":s,"note":n}


# ═══ MAIN ENTRY ══════════════════════════════════════════════════════
ALL_TESTS = [f01_ca_magnitude,f02_ca_radial,f03_lateral_ca,f04_vignetting,
             f05_vignetting_symmetry,f06_dof,f07_dof_gradient,f08_bokeh,
             f09_bokeh_chromatic,f10_distortion,f11_field_curvature,f12_mtf,
             f13_specular,f14_specular_color,f15_purple_fringing,f16_lens_flare,
             f17_sharpness_falloff,f18_diffraction,f19_geometric_distortion,f20_optical_center]

def run_optical_agent(img: Image.Image, modality_adjustments=None) -> AgentEvidence:
    from agents.utils import run_agent_tests
    findings, avg, conf, fail, rat = run_agent_tests(ALL_TESTS, img, "Optical Physics Agent", modality_adjustments)
    return AgentEvidence("Optical Physics Agent", np.clip(avg,-1,1), conf, fail, rat, findings)