FORENSIQ / agents /model_agent.py
anky2002's picture
fix: replace Laplacian bokeh check with local-variance method in autocorrelation override
d348aa2 verified
"""FORENSIQ β€” Generative Model Agent (15 features)"""
import numpy as np
from PIL import Image
from scipy.signal import find_peaks
from scipy.ndimage import gaussian_filter, label
from typing import Dict, Any
from agents.optical_agent import AgentEvidence
def _g(img): return np.array(img.convert("L")).astype(np.float64)
def m01_fft_grid_8x8(img):
gray=_g(img); h,w=gray.shape; fft=np.fft.fftshift(np.fft.fft2(gray)); mag=np.log(np.abs(fft)+1)
cy,cx=h//2,w//2; rp=mag[cy,:]; cp=mag[:,cx]
rpeaks,_=find_peaks(rp,distance=5,prominence=0.3); cpeaks,_=find_peaks(cp,distance=5,prominence=0.3)
def check(peaks,sz):
if len(peaks)<3: return 0.0
sp=np.diff(sorted(peaks)); e8=sz/8; e16=sz/16
m8=np.sum(np.abs(sp-e8)<e8*0.15); m16=np.sum(np.abs(sp-e16)<e16*0.15)
return float(max(m8,m16)/max(len(sp),1))
gs=(check(rpeaks,w)+check(cpeaks,h))/2
if gs>0.4: s,n=0.7,f"8Γ—8 grid artifacts (periodicity={gs:.2f})"
elif gs>0.2: s,n=0.3,f"Weak grid patterns ({gs:.2f})"
else: s,n=-0.2,"No grid artifacts"
return {"test":"FFT Grid 8Γ—8","periodicity":round(gs,4),"score":s,"note":n,"magnitude_spectrum":mag}
def m02_fft_grid_16x16(img):
gray=_g(img); h,w=gray.shape; fft=np.fft.fftshift(np.fft.fft2(gray)); mag=np.log(np.abs(fft)+1)
cy,cx=h//2,w//2
# Check specifically for 16Γ—16 period
e16_h,e16_w=h//16,w//16
peaks_h=[mag[cy,cx+k*e16_w] if cx+k*e16_w<w else 0 for k in range(1,8)]
peaks_v=[mag[cy+k*e16_h,cx] if cy+k*e16_h<h else 0 for k in range(1,8)]
avg_peak=(float(np.mean(peaks_h))+float(np.mean(peaks_v)))/2
bg=float(np.median(mag))
ratio=avg_peak/(bg+1e-9)
if ratio>1.5: s,n=0.5,f"16Γ—16 spectral peaks (ratio={ratio:.2f})"
elif ratio>1.2: s,n=0.2,f"Mild 16Γ—16 peaks ({ratio:.2f})"
else: s,n=-0.1,f"No 16Γ—16 artifacts ({ratio:.2f})"
return {"test":"FFT Grid 16Γ—16","peak_ratio":round(ratio,3),"score":s,"note":n}
def m03_spectral_slope(img):
gray=_g(img); h,w=gray.shape; fft=np.fft.fftshift(np.fft.fft2(gray)); power=np.abs(fft)**2
cy,cx=h//2,w//2; Y,X=np.mgrid[0:h,0:w]; R=np.sqrt((X-cx)**2+(Y-cy)**2).astype(int)
maxr=min(cy,cx); rp=np.zeros(maxr)
for r in range(maxr):
m=R==r
if m.any(): rp[r]=float(np.mean(power[m]))
rp=np.log(rp+1); freqs=np.arange(1,maxr); lf=np.log(freqs+1); lp=rp[1:maxr]
if len(lf)>10:
c=np.polyfit(lf,lp,1); slope=float(c[0]); dev=abs(slope-(-2.0))
else: slope=0; dev=2
if dev<0.5: s,n=-0.3,f"Natural 1/fΒ² slope ({slope:.2f})"
elif dev>1.5: s,n=0.3,f"Unnatural spectral slope ({slope:.2f})"
else: s,n=0.1,f"Slope deviation={dev:.2f}"
return {"test":"Spectral Slope 1/fΒ²","slope":round(slope,3),"score":s,"note":n}
def m04_diffusion_notches(img):
gray=_g(img); fft=np.fft.fftshift(np.fft.fft2(gray)); power=np.abs(fft)**2
h,w=power.shape; cy,cx=h//2,w//2; Y,X=np.mgrid[0:h,0:w]; R=np.sqrt((X-cx)**2+(Y-cy)**2).astype(int)
maxr=min(cy,cx); rp=np.zeros(maxr)
for r in range(maxr):
m=R==r
if m.any(): rp[r]=float(np.mean(power[m]))
rp=np.log(rp+1)
if len(rp)>20:
c=np.polyfit(np.log(np.arange(1,maxr)+1),rp[1:maxr],1); fitted=np.polyval(c,np.log(np.arange(1,maxr)+1))
res=rp[1:maxr]-fitted; notches,_=find_peaks(-res,prominence=0.5); nn=len(notches)
else: nn=0
if nn>3: s,n=0.5,f"{nn} spectral notches β€” diffusion signature"
elif nn>1: s,n=0.2,f"{nn} notches"
else: s,n=-0.1,"No diffusion notches"
return {"test":"Diffusion Notches","count":nn,"score":s,"note":n}
def m05_autocorrelation(img):
gray=_g(img); fft=np.fft.fft2(gray); power=np.abs(fft)**2
ac=np.real(np.fft.ifft2(power)); ac=np.fft.fftshift(ac); ac=ac/(ac.max()+1e-9)
h,w=ac.shape; cy,cx=h//2,w//2; acm=ac.copy()
re=max(h,w)//20; Y,X=np.mgrid[0:h,0:w]; cm=((X-cx)**2+(Y-cy)**2)<re**2; acm[cm]=0
ms=float(np.max(acm))
# Before firing hard override, check if high autocorrelation is explained by bokeh.
# Macro/portrait/shallow-DoF photos have large uniform bokeh regions that create
# high autocorrelation from optical physics, not AI generation.
#
# Method: Local variance via uniform_filter. Bokeh regions have very low local
# variance (uniform pixel intensity). This is more robust than the Laplacian
# approach because it directly measures what bokeh is β€” spatial uniformity β€”
# rather than inferring it from edge magnitude.
bokeh_explained = False
bokeh_fraction = 0.0
if ms > 0.95:
from scipy.ndimage import uniform_filter as uf
# Compute local variance: E[XΒ²] - E[X]Β² over 32Γ—32 windows
local_mean = uf(gray, size=32)
local_sq_mean = uf(gray**2, size=32)
local_var = np.clip(local_sq_mean - local_mean**2, 0, None)
# Bimodal variance test: in a real bokeh/macro/shallow-DoF image, the
# variance distribution is strongly bimodal β€” sharp foreground has high
# local variance while the defocused background has near-zero variance.
#
# Strategy: Use the P95 of local variance (captures the sharp subject's
# typical variance) and threshold at 5% of that value. Pixels below this
# threshold are effectively uniform/bokeh. This works because:
# - Real bokeh: P95 is high (sharp subject), most pixels are far below it
# - AI smooth: P95 is low, so threshold is tiny, nothing qualifies as
# "bokeh relative to sharp detail" because there IS no sharp detail
# - Normal photo: variance is spread out, few pixels are <5% of P95
p95_var = float(np.percentile(local_var, 95))
if p95_var > 50.0: # Guard: sharp region must have real detail
var_thresh = p95_var * 0.05 # 5% of peak variance
low_var_mask = local_var < var_thresh
bokeh_fraction = float(np.mean(low_var_mask))
has_genuine_detail = True # Already guaranteed by p95 > 50 check
else:
# No region with genuine high-variance detail β€” not a bokeh image
bokeh_fraction = 0.0
low_var_mask = np.zeros_like(local_var, dtype=bool)
has_genuine_detail = False
# Bokeh explains autocorrelation if:
# 1. Large uniform region (>35% of image has variance < 5% of P95)
# 2. AND genuine sharp detail exists (P95 variance > 50)
if bokeh_fraction > 0.35 and has_genuine_detail:
bokeh_explained = True
if ms > 0.95 and not bokeh_explained:
s, n = 0.8, f"CRITICAL: Autocorrelation {ms:.3f} exceeds physical camera limit β€” AI generated"
result = {"test":"Autocorrelation Peak","max_secondary":round(ms,4),"score":s,"note":n}
result["override_suppression"] = True
return result
elif ms > 0.95 and bokeh_explained:
s, n = 0.15, f"High autocorrelation ({ms:.3f}) but large bokeh region ({bokeh_fraction:.0%} uniform background) β€” likely optical, not AI"
return {"test":"Autocorrelation Peak","max_secondary":round(ms,4),"score":s,"note":n,"bokeh_explained":True}
elif ms>0.3: s,n=0.6,f"Strong secondary peak ({ms:.3f}) β€” GAN checkerboard"
elif ms>0.15: s,n=0.3,f"Moderate peak ({ms:.3f})"
else: s,n=-0.2,f"Natural autocorrelation ({ms:.3f})"
return {"test":"Autocorrelation Peak","max_secondary":round(ms,4),"score":s,"note":n}
def m06_checkerboard(img):
gray=_g(img); h,w=gray.shape
if h<10 or w<10: return {"test":"Checkerboard Pattern","score":0.0,"note":"Too small"}
ha=float(np.corrcoef(gray[:,:-2].ravel()[::100],gray[:,2:].ravel()[::100])[0,1])
va=float(np.corrcoef(gray[:-2,:].ravel()[::100],gray[2:,:].ravel()[::100])[0,1])
h1=float(np.corrcoef(gray[:,:-1].ravel()[::100],gray[:,1:].ravel()[::100])[0,1])
v1=float(np.corrcoef(gray[:-1,:].ravel()[::100],gray[1:,:].ravel()[::100])[0,1])
delta=((ha-h1)+(va-v1))/2
if delta>0.1: s,n=0.5,f"Strong checkerboard (Ξ”={delta:.4f})"
elif delta>0.05: s,n=0.25,f"Mild checkerboard (Ξ”={delta:.4f})"
else: s,n=-0.1,f"No checkerboard ({delta:.4f})"
return {"test":"Checkerboard Pattern","delta":round(delta,6),"score":s,"note":n}
def m07_vae_boundaries(img):
gray=_g(img); h,w=gray.shape; best_r,best_s=1.0,0
for ps in [32,64,128]:
if h<ps*2 or w<ps*2: continue
hc,wc=(h//ps)*ps,(w//ps)*ps; g=gray[:hc,:wc]
bd,it=[],[]
for i in range(1,hc):
rd=np.abs(g[i,:]-g[i-1,:])
if i%ps==0: bd.append(float(np.mean(rd)))
elif i%ps!=1: it.append(float(np.mean(rd)))
if bd and it:
r=float(np.mean(bd))/(float(np.mean(it))+1e-9)
if r>best_r: best_r=r; best_s=ps
if best_r>1.3: s,n=0.4,f"VAE boundaries at {best_s}px ({best_r:.3f})"
elif best_r>1.1: s,n=0.2,f"Weak boundaries ({best_r:.3f})"
else: s,n=-0.1,f"No VAE boundaries ({best_r:.3f})"
return {"test":"VAE Patch Boundaries","ratio":round(best_r,4),"score":s,"note":n}
def m08_spectral_symmetry(img):
gray=_g(img); fft=np.fft.fftshift(np.fft.fft2(gray)); mag=np.log(np.abs(fft)+1)
h,w=mag.shape; cy,cx=h//2,w//2
top=mag[:cy,:]; bot=np.flipud(mag[cy+1:,:])
left=mag[:,:cx]; right=np.fliplr(mag[:,cx+1:])
mh,mw=min(top.shape[0],bot.shape[0]),min(top.shape[1],bot.shape[1])
asym_tb=float(np.mean(np.abs(top[:mh,:mw]-bot[:mh,:mw])))
mh2,mw2=min(left.shape[0],right.shape[0]),min(left.shape[1],right.shape[1])
asym_lr=float(np.mean(np.abs(left[:mh2,:mw2]-right[:mh2,:mw2])))
asym=(asym_tb+asym_lr)/2
if asym<0.1: s,n=-0.1,f"Symmetric spectrum ({asym:.3f})"
elif asym>0.5: s,n=0.3,f"Asymmetric spectrum ({asym:.3f})"
else: s,n=0.0,f"Spectral asymmetry={asym:.3f}"
return {"test":"Spectral Symmetry","asymmetry":round(asym,4),"score":s,"note":n}
def m09_upsampling_stride(img):
gray=_g(img); h,w=gray.shape
# Check for stride-2 upsampling artifacts
even=gray[::2,::2]; odd=gray[1::2,1::2]
mh,mw=min(even.shape[0],odd.shape[0]),min(even.shape[1],odd.shape[1])
diff=float(np.mean(np.abs(even[:mh,:mw]-odd[:mh,:mw])))
mean_val=float(np.mean(gray))
norm_diff=diff/(mean_val+1e-9)
if norm_diff>0.1: s,n=-0.1,f"Natural stride variation ({norm_diff:.4f})"
elif norm_diff<0.01: s,n=0.3,f"Stride-2 artifacts ({norm_diff:.4f})"
else: s,n=0.0,f"Stride diff={norm_diff:.4f}"
return {"test":"Upsampling Stride-2","norm_diff":round(norm_diff,4),"score":s,"note":n}
def m10_patch_diversity(img):
gray=_g(img); h,w=gray.shape; ps=32
hc,wc=(h//ps)*ps,(w//ps)*ps; gray=gray[:hc,:wc]
patches=[]
for i in range(0,hc,ps):
for j in range(0,wc,ps):
patches.append(gray[i:i+ps,j:j+ps].ravel())
if len(patches)<4: return {"test":"Patch Diversity","score":0.0,"note":"Too few patches"}
patches=np.array(patches); means=np.mean(patches,axis=1); stds=np.std(patches,axis=1)
diversity=float(np.std(stds)/(np.mean(stds)+1e-9))
if diversity>0.5: s,n=-0.2,f"High patch diversity ({diversity:.3f}) β€” natural"
elif diversity<0.15: s,n=0.3,f"Low patch diversity ({diversity:.3f}) β€” GAN mode collapse"
else: s,n=0.0,f"Patch diversity={diversity:.3f}"
return {"test":"Patch Diversity","diversity":round(diversity,4),"score":s,"note":n}
def m11_color_consistency(img):
rgb=np.array(img.convert("RGB")).astype(np.float64); h,w,_=rgb.shape; ps=64
hc,wc=(h//ps)*ps,(w//ps)*ps; rgb=rgb[:hc,:wc]
ratios=[]
for i in range(0,hc,ps):
for j in range(0,wc,ps):
p=rgb[i:i+ps,j:j+ps]; m=np.mean(p,axis=(0,1))
if m[1]>30: ratios.append(m[0]/(m[1]+1e-9)) # min green=30 to avoid near-black patches
if len(ratios)<4: return {"test":"Color Ratio Consistency","score":0.0,"note":"Few patches"}
cv=float(np.std(ratios))/(float(np.mean(ratios))+1e-9)
if cv>0.1: s,n=-0.2,f"Varied color ratios (CV={cv:.3f})"
elif cv<0.02: s,n=0.2,f"Suspiciously uniform color ({cv:.3f})"
else: s,n=0.0,f"Color CV={cv:.3f}"
return {"test":"Color Ratio Consistency","cv":round(cv,4),"score":s,"note":n}
def m12_spectral_rolloff_shape(img):
gray=_g(img); fft=np.abs(np.fft.fftshift(np.fft.fft2(gray)))
h,w=fft.shape; cy,cx=h//2,w//2
diag1=np.array([fft[cy+i,cx+i] for i in range(min(cy,cx)//2)])
diag2=np.array([fft[cy+i,cx-i] for i in range(min(cy,cx)//2)])
if len(diag1)>5:
d1=np.log(diag1+1); d2=np.log(diag2+1)
aniso=float(np.mean(np.abs(d1-d2)))/(float(np.mean(d1))+1e-9)
else: aniso=0
if aniso>0.1: s,n=-0.1,f"Anisotropic rolloff ({aniso:.3f})"
elif aniso<0.02: s,n=0.2,f"Isotropic rolloff ({aniso:.3f}) β€” AI-like"
else: s,n=0.0,f"Rolloff anisotropy={aniso:.3f}"
return {"test":"Spectral Rolloff Shape","anisotropy":round(aniso,4),"score":s,"note":n}
def m13_texture_repetition(img):
gray=_g(img); h,w=gray.shape; ps=64
if h<ps*3 or w<ps*3: return {"test":"Texture Repetition","score":0.0,"note":"Too small"}
hc,wc=(h//ps)*ps,(w//ps)*ps; gray=gray[:hc,:wc]
patches=[]
for i in range(0,hc,ps):
for j in range(0,wc,ps):
p=gray[i:i+ps,j:j+ps]; p=(p-np.mean(p))/(np.std(p)+1e-9)
patches.append(p.ravel())
if len(patches)<4: return {"test":"Texture Repetition","score":0.0,"note":"Few patches"}
patches=np.array(patches)
# Find max correlation between non-adjacent patches
max_corr=0
for i in range(min(len(patches),20)):
for j in range(i+2,min(len(patches),20)):
c=float(np.corrcoef(patches[i],patches[j])[0,1])
if c>max_corr: max_corr=c
if max_corr>0.8: s,n=0.4,f"Repeated textures ({max_corr:.3f}) β€” GAN copy"
elif max_corr>0.5: s,n=0.2,f"Similar textures ({max_corr:.3f})"
else: s,n=-0.1,f"Varied textures ({max_corr:.3f})"
return {"test":"Texture Repetition","max_corr":round(max_corr,4),"score":s,"note":n}
def m14_highfreq_noise_structure(img):
gray=_g(img); noise=gray-gaussian_filter(gray,1.0)
fft=np.abs(np.fft.fftshift(np.fft.fft2(noise))); h,w=fft.shape; cy,cx=h//2,w//2
# Radial power in HF noise
Y,X=np.mgrid[0:h,0:w]; R=np.sqrt((X-cx)**2+(Y-cy)**2)
Rm=min(cy,cx); hf=fft[R>Rm*0.5]; lf=fft[R<Rm*0.3]
ratio=float(np.mean(hf))/(float(np.mean(lf))+1e-9)
if ratio>2: s,n=-0.2,f"HF-dominant noise ({ratio:.2f}) β€” sensor"
elif ratio<0.5: s,n=0.3,f"LF-dominant noise ({ratio:.2f}) β€” AI smoothing"
else: s,n=0.0,f"Noise HF/LF={ratio:.2f}"
return {"test":"HF Noise Structure","ratio":round(ratio,3),"score":s,"note":n}
def m15_phase_coherence(img):
gray=_g(img); fft=np.fft.fft2(gray); phase=np.angle(fft)
h,w=phase.shape
# Natural images: smooth phase transitions
ph_dx=np.abs(np.diff(phase,axis=1)); ph_dy=np.abs(np.diff(phase,axis=0))
# Wrap-around correction
ph_dx[ph_dx>np.pi]=2*np.pi-ph_dx[ph_dx>np.pi]
ph_dy[ph_dy>np.pi]=2*np.pi-ph_dy[ph_dy>np.pi]
smoothness=float(np.mean(ph_dx)+np.mean(ph_dy))
if smoothness<2: s,n=-0.2,f"Coherent phase ({smoothness:.3f})"
elif smoothness>2.5: s,n=0.2,f"Incoherent phase ({smoothness:.3f})"
else: s,n=0.0,f"Phase coherence={smoothness:.3f}"
return {"test":"Phase Coherence","smoothness":round(smoothness,4),"score":s,"note":n}
ALL_TESTS=[m01_fft_grid_8x8,m02_fft_grid_16x16,m03_spectral_slope,m04_diffusion_notches,
m05_autocorrelation,m06_checkerboard,m07_vae_boundaries,m08_spectral_symmetry,
m09_upsampling_stride,m10_patch_diversity,m11_color_consistency,m12_spectral_rolloff_shape,
m13_texture_repetition,m14_highfreq_noise_structure,m15_phase_coherence]
def run_model_agent(img, modality_adjustments=None):
from agents.utils import run_agent_tests
from agents.optical_agent import AgentEvidence
findings, avg, conf, fail, rat = run_agent_tests(ALL_TESTS, img, "Generative Model Agent", modality_adjustments)
return AgentEvidence("Generative Model Agent",np.clip(avg,-1,1),conf,fail,rat,findings)