anky2002 commited on
Commit
b500bb5
Β·
verified Β·
1 Parent(s): 57dc17b

Upload agents/optical_agent.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. agents/optical_agent.py +358 -0
agents/optical_agent.py ADDED
@@ -0,0 +1,358 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ FORENSIQ β€” Optical Physics Agent
3
+ Tests violations of lens and optical physics:
4
+ - Chromatic Aberration analysis (RGB channel misregistration)
5
+ - Vignetting analysis (cos⁴θ intensity falloff)
6
+ - Depth-of-Field consistency (expected vs measured blur)
7
+ - Bokeh microstructure (aperture blade FFT signatures)
8
+ """
9
+
10
+ import numpy as np
11
+ from PIL import Image
12
+ from scipy.ndimage import sobel, gaussian_filter, uniform_filter
13
+ from scipy.signal import find_peaks
14
+ from dataclasses import dataclass, field
15
+ from typing import List, Dict, Any, Optional
16
+
17
+
18
+ @dataclass
19
+ class AgentEvidence:
20
+ """Structured evidence output from any forensic agent."""
21
+ agent_name: str
22
+ violation_score: float # -1 (authentic) to +1 (fake)
23
+ confidence: float # 0 to 1
24
+ failure_prob: float # probability this agent's analysis failed
25
+ rationale: str
26
+ sub_findings: List[Dict[str, Any]] = field(default_factory=list)
27
+ visual_evidence: Optional[Any] = None # numpy array or PIL image
28
+
29
+
30
+ def _to_gray(img: Image.Image) -> np.ndarray:
31
+ return np.array(img.convert("L")).astype(np.float64)
32
+
33
+
34
+ def _to_rgb(img: Image.Image) -> np.ndarray:
35
+ return np.array(img.convert("RGB")).astype(np.float64)
36
+
37
+
38
+ # ─── Chromatic Aberration ────────────────────────────────────────────
39
+ def analyze_chromatic_aberration(img: Image.Image) -> Dict[str, Any]:
40
+ """
41
+ Real lenses create slight spatial offsets between R/G/B edges.
42
+ AI images have perfectly aligned or impossibly misaligned channels.
43
+ """
44
+ rgb = _to_rgb(img)
45
+ r, g, b = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
46
+
47
+ # Edge magnitude per channel
48
+ edges = {}
49
+ for name, ch in [("R", r), ("G", g), ("B", b)]:
50
+ ex = sobel(ch, axis=0)
51
+ ey = sobel(ch, axis=1)
52
+ edges[name] = np.hypot(ex, ey)
53
+
54
+ # Flatten for correlation
55
+ er, eg, eb = edges["R"].ravel(), edges["G"].ravel(), edges["B"].ravel()
56
+
57
+ rg_corr = float(np.corrcoef(er, eg)[0, 1])
58
+ rb_corr = float(np.corrcoef(er, eb)[0, 1])
59
+ gb_corr = float(np.corrcoef(eg, eb)[0, 1])
60
+
61
+ avg_corr = (rg_corr + rb_corr + gb_corr) / 3.0
62
+
63
+ # Natural range: moderate correlation (0.80–0.97)
64
+ # Too perfect (>0.99) = synthetic; too low (<0.70) = heavy editing
65
+ if avg_corr > 0.99:
66
+ score = 0.6
67
+ note = "Suspiciously perfect channel alignment (no natural CA)"
68
+ elif avg_corr < 0.70:
69
+ score = 0.5
70
+ note = "Abnormally low channel correlation (heavy manipulation)"
71
+ elif 0.80 <= avg_corr <= 0.97:
72
+ score = -0.4
73
+ note = "Natural chromatic aberration pattern detected"
74
+ else:
75
+ score = 0.2
76
+ note = "Borderline chromatic aberration"
77
+
78
+ return {
79
+ "test": "Chromatic Aberration",
80
+ "rg_correlation": round(rg_corr, 4),
81
+ "rb_correlation": round(rb_corr, 4),
82
+ "gb_correlation": round(gb_corr, 4),
83
+ "avg_correlation": round(avg_corr, 4),
84
+ "score": score,
85
+ "note": note,
86
+ }
87
+
88
+
89
+ # ─── Vignetting ─────────────────────────────────────────────────────
90
+ def analyze_vignetting(img: Image.Image) -> Dict[str, Any]:
91
+ """
92
+ Real photos show cos⁴(θ) intensity falloff from center.
93
+ AI images often have flat or unnatural brightness profiles.
94
+ """
95
+ gray = _to_gray(img)
96
+ h, w = gray.shape
97
+ cy, cx = h / 2, w / 2
98
+
99
+ # Radial distance for every pixel
100
+ Y, X = np.mgrid[0:h, 0:w]
101
+ R = np.sqrt((X - cx) ** 2 + (Y - cy) ** 2)
102
+ R_max = np.sqrt(cx ** 2 + cy ** 2)
103
+ R_norm = R / R_max # 0 at center, 1 at corner
104
+
105
+ # Bin by radial distance and compute mean intensity
106
+ n_bins = 20
107
+ bin_edges = np.linspace(0, 1, n_bins + 1)
108
+ radial_means = []
109
+ for i in range(n_bins):
110
+ mask = (R_norm >= bin_edges[i]) & (R_norm < bin_edges[i + 1])
111
+ if mask.any():
112
+ radial_means.append(float(np.mean(gray[mask])))
113
+ else:
114
+ radial_means.append(0)
115
+
116
+ radial_means = np.array(radial_means)
117
+ if radial_means[0] == 0:
118
+ radial_means[0] = 1.0
119
+
120
+ # Expected cos⁴(θ) model: I ∝ cos⁴(arctan(r/f))
121
+ # Simplified: intensity should decrease toward edges
122
+ normalized = radial_means / (radial_means[0] + 1e-9)
123
+
124
+ # Fit cos⁴ model
125
+ r_centers = (bin_edges[:-1] + bin_edges[1:]) / 2
126
+ theta = np.arctan(r_centers * 1.5) # ~35Β° max field angle
127
+ cos4_model = np.cos(theta) ** 4
128
+
129
+ residual = float(np.mean((normalized - cos4_model) ** 2))
130
+
131
+ # Monotonic decrease check
132
+ diffs = np.diff(normalized)
133
+ non_decreasing_frac = float(np.sum(diffs > 0.02) / len(diffs))
134
+
135
+ if residual < 0.01 and non_decreasing_frac < 0.3:
136
+ score = -0.3
137
+ note = "Natural vignetting pattern (cos⁴θ consistent)"
138
+ elif residual > 0.05 or non_decreasing_frac > 0.5:
139
+ score = 0.4
140
+ note = "Unnatural brightness profile (vignetting absent or inconsistent)"
141
+ else:
142
+ score = 0.1
143
+ note = "Mild vignetting deviation"
144
+
145
+ return {
146
+ "test": "Vignetting (cos⁴θ)",
147
+ "cos4_residual": round(residual, 5),
148
+ "non_decreasing_fraction": round(non_decreasing_frac, 3),
149
+ "score": score,
150
+ "note": note,
151
+ "radial_profile": normalized.tolist(),
152
+ }
153
+
154
+
155
+ # ─── Depth of Field Consistency ─────────────────────────────────────
156
+ def analyze_dof_consistency(img: Image.Image) -> Dict[str, Any]:
157
+ """
158
+ Real depth-of-field blur varies smoothly with distance.
159
+ AI images often have inconsistent blur regions.
160
+ """
161
+ gray = _to_gray(img)
162
+ h, w = gray.shape
163
+
164
+ # Local blur estimation via Laplacian variance in blocks
165
+ block_size = max(h, w) // 16
166
+ if block_size < 8:
167
+ block_size = 8
168
+
169
+ blur_map = np.zeros((h // block_size, w // block_size))
170
+ for bi in range(blur_map.shape[0]):
171
+ for bj in range(blur_map.shape[1]):
172
+ y0, y1 = bi * block_size, (bi + 1) * block_size
173
+ x0, x1 = bj * block_size, (bj + 1) * block_size
174
+ block = gray[y0:y1, x0:x1]
175
+ # Laplacian variance = sharpness metric
176
+ lap = np.array([[0, 1, 0], [1, -4, 1], [0, 1, 0]], dtype=np.float64)
177
+ from scipy.signal import convolve2d
178
+ laplacian = convolve2d(block, lap, mode="valid")
179
+ blur_map[bi, bj] = float(np.var(laplacian))
180
+
181
+ # Smooth blur map
182
+ if blur_map.size > 1:
183
+ smooth_blur = gaussian_filter(blur_map, sigma=1.0)
184
+ # Inconsistency = high-frequency variation in blur map
185
+ blur_residual = blur_map - smooth_blur
186
+ inconsistency = float(np.std(blur_residual) / (np.mean(blur_map) + 1e-9))
187
+ else:
188
+ inconsistency = 0.0
189
+
190
+ if inconsistency < 0.3:
191
+ score = -0.3
192
+ note = "Smooth depth-of-field gradient (consistent with real optics)"
193
+ elif inconsistency > 0.7:
194
+ score = 0.5
195
+ note = "Abrupt blur transitions (inconsistent DoF, possible manipulation)"
196
+ else:
197
+ score = 0.1
198
+ note = "Moderate DoF variation"
199
+
200
+ return {
201
+ "test": "Depth-of-Field Consistency",
202
+ "blur_inconsistency": round(inconsistency, 4),
203
+ "score": score,
204
+ "note": note,
205
+ "blur_map": blur_map,
206
+ }
207
+
208
+
209
+ # ─── Bokeh Microstructure ───────────────────────────────────────────
210
+ def analyze_bokeh(img: Image.Image) -> Dict[str, Any]:
211
+ """
212
+ Real bokeh shows aperture blade polygon structure in Fourier domain.
213
+ AI bokeh is typically smooth circles without blade structure.
214
+ """
215
+ gray = _to_gray(img)
216
+
217
+ # Find bright point-like regions (potential bokeh)
218
+ threshold = np.percentile(gray, 97)
219
+ bright_mask = gray > threshold
220
+
221
+ if np.sum(bright_mask) < 100:
222
+ return {
223
+ "test": "Bokeh Microstructure",
224
+ "score": 0.0,
225
+ "note": "Insufficient bright highlights for bokeh analysis",
226
+ "bokeh_found": False,
227
+ }
228
+
229
+ # Extract largest bright region
230
+ from scipy.ndimage import label
231
+ labeled, n_features = label(bright_mask)
232
+ if n_features == 0:
233
+ return {
234
+ "test": "Bokeh Microstructure",
235
+ "score": 0.0,
236
+ "note": "No bokeh regions detected",
237
+ "bokeh_found": False,
238
+ }
239
+
240
+ # Get largest blob
241
+ sizes = [np.sum(labeled == i) for i in range(1, n_features + 1)]
242
+ largest = np.argmax(sizes) + 1
243
+ blob_mask = labeled == largest
244
+
245
+ # Crop around blob
246
+ ys, xs = np.where(blob_mask)
247
+ y0, y1, x0, x1 = ys.min(), ys.max(), xs.min(), xs.max()
248
+ patch = gray[y0:y1 + 1, x0:x1 + 1]
249
+
250
+ if patch.shape[0] < 8 or patch.shape[1] < 8:
251
+ return {
252
+ "test": "Bokeh Microstructure",
253
+ "score": 0.0,
254
+ "note": "Bokeh regions too small for analysis",
255
+ "bokeh_found": False,
256
+ }
257
+
258
+ # FFT of patch β€” look for angular structure (blade signatures)
259
+ fft = np.fft.fft2(patch)
260
+ fft_shift = np.fft.fftshift(fft)
261
+ magnitude = np.log(np.abs(fft_shift) + 1)
262
+
263
+ # Angular variance in FFT (high = blade structure, low = smooth circle)
264
+ cy, cx = magnitude.shape[0] // 2, magnitude.shape[1] // 2
265
+ angles = np.arctan2(
266
+ np.mgrid[0:magnitude.shape[0], 0:magnitude.shape[1]][0] - cy,
267
+ np.mgrid[0:magnitude.shape[0], 0:magnitude.shape[1]][1] - cx,
268
+ )
269
+ n_angular_bins = 12
270
+ angular_profile = []
271
+ for k in range(n_angular_bins):
272
+ a0 = -np.pi + k * (2 * np.pi / n_angular_bins)
273
+ a1 = a0 + (2 * np.pi / n_angular_bins)
274
+ amask = (angles >= a0) & (angles < a1)
275
+ angular_profile.append(float(np.mean(magnitude[amask])) if amask.any() else 0)
276
+
277
+ angular_var = float(np.var(angular_profile))
278
+
279
+ if angular_var > 0.1:
280
+ score = -0.2
281
+ note = "Aperture blade structure detected in bokeh (consistent with real lens)"
282
+ else:
283
+ score = 0.3
284
+ note = "Smooth circular bokeh without blade structure (possible AI generation)"
285
+
286
+ return {
287
+ "test": "Bokeh Microstructure",
288
+ "angular_variance": round(angular_var, 4),
289
+ "score": score,
290
+ "note": note,
291
+ "bokeh_found": True,
292
+ }
293
+
294
+
295
+ # ─── Main Agent Entry Point ─────────────────────────────────────────
296
+ def run_optical_agent(img: Image.Image) -> AgentEvidence:
297
+ """Run all optical physics tests and produce structured evidence."""
298
+ findings = []
299
+ scores = []
300
+
301
+ try:
302
+ ca = analyze_chromatic_aberration(img)
303
+ findings.append(ca)
304
+ scores.append(ca["score"])
305
+ except Exception as e:
306
+ findings.append({"test": "Chromatic Aberration", "error": str(e), "score": 0})
307
+
308
+ try:
309
+ vig = analyze_vignetting(img)
310
+ findings.append(vig)
311
+ scores.append(vig["score"])
312
+ except Exception as e:
313
+ findings.append({"test": "Vignetting", "error": str(e), "score": 0})
314
+
315
+ try:
316
+ dof = analyze_dof_consistency(img)
317
+ findings.append(dof)
318
+ scores.append(dof["score"])
319
+ except Exception as e:
320
+ findings.append({"test": "DoF Consistency", "error": str(e), "score": 0})
321
+
322
+ try:
323
+ bokeh = analyze_bokeh(img)
324
+ findings.append(bokeh)
325
+ scores.append(bokeh["score"])
326
+ except Exception as e:
327
+ findings.append({"test": "Bokeh Microstructure", "error": str(e), "score": 0})
328
+
329
+ if scores:
330
+ avg_score = float(np.mean(scores))
331
+ confidence = min(1.0, 0.5 + 0.5 * abs(avg_score))
332
+ else:
333
+ avg_score = 0.0
334
+ confidence = 0.1
335
+
336
+ # Build rationale
337
+ violations = [f["test"] for f in findings if f.get("score", 0) > 0.2]
338
+ compliant = [f["test"] for f in findings if f.get("score", 0) < -0.1]
339
+
340
+ if violations:
341
+ rationale = f"Optical violations detected in: {', '.join(violations)}."
342
+ elif compliant:
343
+ rationale = f"Optical physics consistent: {', '.join(compliant)}."
344
+ else:
345
+ rationale = "Optical analysis inconclusive."
346
+
347
+ for f in findings:
348
+ if f.get("note"):
349
+ rationale += f" [{f['test']}]: {f['note']}."
350
+
351
+ return AgentEvidence(
352
+ agent_name="Optical Physics Agent",
353
+ violation_score=np.clip(avg_score, -1, 1),
354
+ confidence=confidence,
355
+ failure_prob=max(0.0, 1.0 - len(scores) / 4),
356
+ rationale=rationale,
357
+ sub_findings=findings,
358
+ )