mohammad2012191 commited on
Commit
2d1cb2c
·
verified ·
1 Parent(s): 24396c2

Upload 3 files

Browse files
Files changed (4) hide show
  1. .gitattributes +1 -0
  2. predict.py +525 -0
  3. requirements.txt +262 -0
  4. technical_report_EzFake.pdf +3 -0
.gitattributes CHANGED
@@ -57,3 +57,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
57
  # Video files - compressed
58
  *.mp4 filter=lfs diff=lfs merge=lfs -text
59
  *.webm filter=lfs diff=lfs merge=lfs -text
60
+ technical_report_EzFake.pdf filter=lfs diff=lfs merge=lfs -text
predict.py ADDED
@@ -0,0 +1,525 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import json
3
+ import re
4
+ from pathlib import Path
5
+ from typing import List, Dict, Tuple, Optional, Any
6
+ import math
7
+
8
+ import numpy as np
9
+ import torch
10
+ import torch.nn as nn
11
+ from tqdm import tqdm
12
+ from PIL import Image, ImageOps
13
+ import matplotlib.cm as cm
14
+ import torchvision.transforms as T
15
+ from torchvision.transforms.functional import InterpolationMode
16
+
17
+ # Module 1 Imports
18
+ from transformers import AutoImageProcessor, AutoModelForImageClassification
19
+
20
+ # Module 2 Imports (InternVL)
21
+ from transformers import AutoModel, AutoTokenizer
22
+
23
+ # -----------------------------------------------------------------------------
24
+ # Configuration & Constants
25
+ # -----------------------------------------------------------------------------
26
+ IMG_EXTS = {".jpg", ".jpeg", ".png", ".webp", ".bmp", ".tif", ".tiff"}
27
+
28
+ # -----------------------------------------------------------------------------
29
+ # DYNAMIC PROMPT TEMPLATE
30
+ # -----------------------------------------------------------------------------
31
+ VLM_SYSTEM_PROMPT_TEMPLATE = """
32
+ Role: You are a Digital Forensics Expert.
33
+
34
+ Input Context:
35
+ Image-1: The suspect image.
36
+ Image-2: A Grad-CAM heatmap (Red = Pixel Artifacts detected).
37
+ Forensic Score: {authenticity_score:.2f} (0.0=Clear, 1.0=Flagged).
38
+
39
+ Technical Status: {status_msg}
40
+
41
+ Your Mission: {mission_msg}
42
+
43
+ Step-by-Step Analysis:
44
+ 1. Physics Check: Do shadows, reflections, and lighting match the environment?
45
+ 2. Biological Integrity: Check for wax-like skin, asymmetrical eyes, or blending lines on the neck.
46
+ 3. Logic Check: Are there impossible geometries or structural errors?
47
+
48
+ Output Requirements:
49
+ Output ONLY a JSON object.
50
+ "manipulation_type": Select the best fit from: {allowed_options}
51
+ "vlm_reasoning": {reasoning_instruction}
52
+
53
+ Constraint: {constraint_msg}
54
+ """
55
+
56
+ # -----------------------------------------------------------------------------
57
+ # Utils
58
+ # -----------------------------------------------------------------------------
59
+ def list_images(folder: Path) -> List[Path]:
60
+ return sorted([p for p in folder.rglob("*") if p.is_file() and p.suffix.lower() in IMG_EXTS])
61
+
62
+ def load_rgb(path: Path) -> Image.Image:
63
+ img = Image.open(path)
64
+ img = ImageOps.exif_transpose(img)
65
+ if img.mode != "RGB":
66
+ img = img.convert("RGB")
67
+ return img
68
+
69
+ def resize_pad_square(img: Image.Image, size: int) -> Image.Image:
70
+ w, h = img.size
71
+ if w <= 0 or h <= 0:
72
+ return img.resize((size, size), resample=Image.BICUBIC)
73
+ scale = size / float(max(w, h))
74
+ new_w = max(1, int(round(w * scale)))
75
+ new_h = max(1, int(round(h * scale)))
76
+ img = img.resize((new_w, new_h), resample=Image.BICUBIC)
77
+ pad_left = (size - new_w) // 2
78
+ pad_top = (size - new_h) // 2
79
+ pad_right = size - new_w - pad_left
80
+ pad_bottom = size - new_h - pad_top
81
+ img = ImageOps.expand(img, border=(pad_left, pad_top, pad_right, pad_bottom), fill=0)
82
+ return img
83
+
84
+ # -----------------------------------------------------------------------------
85
+ # Module 1: Forensic Detector Helpers
86
+ # -----------------------------------------------------------------------------
87
+ def get_norm_from_processor(processor) -> Tuple[List[float], List[float], float]:
88
+ mean = getattr(processor, "image_mean", [0.485, 0.456, 0.406])
89
+ std = getattr(processor, "image_std", [0.229, 0.224, 0.225])
90
+ rescale_factor = getattr(processor, "rescale_factor", 1.0 / 255.0)
91
+ return list(mean), list(std), float(rescale_factor)
92
+
93
+ def preprocess_one(img: Image.Image, size: int, mean: List[float], std: List[float], rescale_factor: float) -> Tuple[torch.Tensor, Image.Image]:
94
+ img_sq = resize_pad_square(img, size)
95
+ arr = np.array(img_sq).astype(np.float32)
96
+ arr = arr * rescale_factor
97
+ arr = np.transpose(arr, (2, 0, 1))
98
+ x = torch.from_numpy(arr)
99
+ m = torch.tensor(mean, dtype=torch.float32)[:, None, None]
100
+ s = torch.tensor(std, dtype=torch.float32)[:, None, None]
101
+ x = (x - m) / s
102
+ return x, img_sq
103
+
104
+ def preprocess_batch(imgs: List[Image.Image], size: int, mean: List[float], std: List[float], rescale_factor: float) -> torch.Tensor:
105
+ xs = []
106
+ for im in imgs:
107
+ x, _ = preprocess_one(im, size, mean, std, rescale_factor)
108
+ xs.append(x)
109
+ return torch.stack(xs, dim=0)
110
+
111
+ @torch.inference_mode()
112
+ def forward_fake_prob(model, pixel_values: torch.Tensor, fake_idx: int) -> torch.Tensor:
113
+ out = model(pixel_values=pixel_values)
114
+ logits = out.logits
115
+ if logits.shape[-1] == 1:
116
+ prob = torch.sigmoid(logits[:, 0])
117
+ else:
118
+ prob = torch.softmax(logits, dim=-1)[:, fake_idx]
119
+ return prob
120
+
121
+ @torch.inference_mode()
122
+ def predict_probs_batch(model, paths: List[Path], device: torch.device, size: int, mean: List[float], std: List[float], rescale_factor: float, fake_idx: int, use_tta: bool) -> List[float]:
123
+ raw_images = [load_rgb(p) for p in paths]
124
+ if not use_tta:
125
+ pv = preprocess_batch(raw_images, size, mean, std, rescale_factor).to(device)
126
+ probs = forward_fake_prob(model, pv, fake_idx)
127
+ return probs.detach().cpu().tolist()
128
+
129
+ # Base
130
+ pv_base = preprocess_batch(raw_images, size, mean, std, rescale_factor).to(device)
131
+ probs_sum = forward_fake_prob(model, pv_base, fake_idx)
132
+
133
+ # 4 Quadrants
134
+ imgs_tl, imgs_tr, imgs_bl, imgs_br = [], [], [], []
135
+ for img in raw_images:
136
+ w, h = img.size
137
+ mid_w, mid_h = w // 2, h // 2
138
+ imgs_tl.append(img.crop((0, 0, mid_w, mid_h)))
139
+ imgs_tr.append(img.crop((mid_w, 0, w, mid_h)))
140
+ imgs_bl.append(img.crop((0, mid_h, mid_w, h)))
141
+ imgs_br.append(img.crop((mid_w, mid_h, w, h)))
142
+
143
+ for quad_imgs in (imgs_tl, imgs_tr, imgs_bl, imgs_br):
144
+ pv_q = preprocess_batch(quad_imgs, size, mean, std, rescale_factor).to(device)
145
+ probs_sum = probs_sum + forward_fake_prob(model, pv_q, fake_idx)
146
+
147
+ probs = probs_sum / 5.0
148
+ return probs.detach().cpu().tolist()
149
+
150
+ # -----------------------------------------------------------------------------
151
+ # Grad-CAM
152
+ # -----------------------------------------------------------------------------
153
+ class GradCAM:
154
+ def __init__(self, model: nn.Module, target_layer: nn.Module):
155
+ self.model = model
156
+ self.target_layer = target_layer
157
+ self.activations = None
158
+ self.gradients = None
159
+ self._fwd = target_layer.register_forward_hook(self._forward_hook)
160
+ self._bwd = target_layer.register_full_backward_hook(self._backward_hook)
161
+
162
+ def close(self):
163
+ self._fwd.remove()
164
+ self._bwd.remove()
165
+
166
+ def _forward_hook(self, module, inp, out):
167
+ self.activations = out
168
+
169
+ def _backward_hook(self, module, grad_input, grad_output):
170
+ self.gradients = grad_output[0]
171
+
172
+ def __call__(self, pixel_values: torch.Tensor, class_index: int) -> torch.Tensor:
173
+ self.model.zero_grad(set_to_none=True)
174
+ out = self.model(pixel_values=pixel_values)
175
+ logits = out.logits
176
+ if logits.shape[-1] == 1:
177
+ score = logits[:, 0]
178
+ else:
179
+ score = logits[:, class_index]
180
+ score.sum().backward(retain_graph=False)
181
+
182
+ acts = self.activations
183
+ grads = self.gradients
184
+ weights = grads.mean(dim=(2, 3), keepdim=True)
185
+ cam = (weights * acts).sum(dim=1)
186
+ cam = torch.relu(cam)
187
+ cam_min = cam.amin(dim=(1, 2), keepdim=True)
188
+ cam_max = cam.amax(dim=(1, 2), keepdim=True)
189
+ cam = (cam - cam_min) / (cam_max - cam_min + 1e-6)
190
+ return cam[0].detach()
191
+
192
+ def make_overlay(pil_img: Image.Image, cam_01: np.ndarray, alpha: float = 0.45) -> Image.Image:
193
+ cam_01 = np.clip(cam_01, 0.0, 1.0)
194
+ heat = cm.get_cmap("jet")(cam_01)[:, :, :3]
195
+ heat_u8 = (heat * 255.0).astype(np.uint8)
196
+ base = np.array(pil_img).astype(np.uint8)
197
+ if heat_u8.shape[:2] != base.shape[:2]:
198
+ heat_pil = Image.fromarray(heat_u8).resize((base.shape[1], base.shape[0]), Image.BILINEAR)
199
+ heat_u8 = np.array(heat_pil)
200
+
201
+ overlay = (base * (1.0 - alpha) + heat_u8 * alpha).astype(np.uint8)
202
+ return Image.fromarray(overlay)
203
+
204
+ # -----------------------------------------------------------------------------
205
+ # Module 2: InternVL Preprocessing Utilities
206
+ # -----------------------------------------------------------------------------
207
+ def build_transform(input_size):
208
+ MEAN, STD = (0.485, 0.456, 0.406), (0.229, 0.224, 0.225)
209
+ transform = T.Compose([
210
+ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img),
211
+ T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC),
212
+ T.ToTensor(),
213
+ T.Normalize(mean=MEAN, std=STD)
214
+ ])
215
+ return transform
216
+
217
+ def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size):
218
+ best_ratio_diff = float('inf')
219
+ best_ratio = (1, 1)
220
+ area = width * height
221
+ for ratio in target_ratios:
222
+ target_aspect_ratio = ratio[0] / ratio[1]
223
+ ratio_diff = abs(aspect_ratio - target_aspect_ratio)
224
+ if ratio_diff < best_ratio_diff:
225
+ best_ratio_diff = ratio_diff
226
+ best_ratio = ratio
227
+ elif ratio_diff == best_ratio_diff:
228
+ if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]:
229
+ best_ratio = ratio
230
+ return best_ratio
231
+
232
+ def dynamic_preprocess(image, min_num=1, max_num=6, image_size=448, use_thumbnail=True):
233
+ orig_width, orig_height = image.size
234
+ aspect_ratio = orig_width / orig_height
235
+ target_ratios = set(
236
+ (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if
237
+ i * j <= max_num and i * j >= min_num)
238
+ target_ratios = sorted(list(target_ratios), key=lambda x: x[0] * x[1])
239
+ target_aspect_ratio = find_closest_aspect_ratio(
240
+ aspect_ratio, target_ratios, orig_width, orig_height, image_size)
241
+ target_width = image_size * target_aspect_ratio[0]
242
+ target_height = image_size * target_aspect_ratio[1]
243
+ blocks = target_aspect_ratio[0] * target_aspect_ratio[1]
244
+ resized_img = image.resize((target_width, target_height))
245
+ processed_images = []
246
+ for i in range(blocks):
247
+ box = (
248
+ (i % (target_width // image_size)) * image_size,
249
+ (i // (target_width // image_size)) * image_size,
250
+ ((i % (target_width // image_size)) + 1) * image_size,
251
+ ((i // (target_width // image_size)) + 1) * image_size
252
+ )
253
+ split_img = resized_img.crop(box)
254
+ processed_images.append(split_img)
255
+ if use_thumbnail and len(processed_images) > 1:
256
+ thumbnail_img = image.resize((image_size, image_size))
257
+ processed_images.append(thumbnail_img)
258
+ return processed_images
259
+
260
+ # -----------------------------------------------------------------------------
261
+ # Module 2: VLM Logic (InternVL)
262
+ # -----------------------------------------------------------------------------
263
+ def load_internvl(model_name: str, cache_dir: str):
264
+ print(f"Loading VLM: {model_name}...")
265
+ tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True, cache_dir=cache_dir)
266
+ # Using float16 or bfloat16 for efficiency
267
+ dtype = torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16
268
+
269
+ # === MULTI-GPU CHANGE ===
270
+ # Using device_map="auto" lets Hugging Face Accelerate split layers across GPUs 0,1,2,3
271
+ print("Dispatching model across available GPUs (device_map='auto')...")
272
+ model = AutoModel.from_pretrained(
273
+ model_name,
274
+ trust_remote_code=True,
275
+ torch_dtype=dtype,
276
+ low_cpu_mem_usage=True,
277
+ cache_dir=cache_dir,
278
+ use_flash_attn=False,
279
+ device_map="auto" # This enables Multi-GPU usage
280
+ ).eval()
281
+
282
+ return tokenizer, model
283
+
284
+ def run_vlm_audit(tokenizer, model, orig_path: str, cam_path: str, score: float) -> Dict[str, Any]:
285
+ """
286
+ Constructs the prompt and runs inference on InternVL.
287
+ """
288
+
289
+ # === COMPLEMENTARY AUDIT STRATEGY ===
290
+ if score >= 0.5:
291
+ # High Likelihood of Fake (Pixel Detector found artifacts)
292
+ status_msg = "Pixel-level artifacts DETECTED. The image is likely manipulated."
293
+ mission_msg = "Confirm the specific type of manipulation. Does the red heatmap align with semantic errors?"
294
+ allowed_options = "['In-painting', 'Full Synthesis', 'Face Swap', 'Filter']"
295
+ reasoning_instruction = "Explain which specific feature (eyes, neck, shadow) aligns with the heatmap to prove the manipulation."
296
+ constraint_msg = "You MUST classify the type of manipulation. Do not choose 'None' unless the pixel detector is clearly hallucinating (extremely rare)."
297
+
298
+ else:
299
+ # Low Likelihood (Pixel Detector is happy)
300
+ status_msg = "Pixel-level artifacts NOT detected. The image passed the noise/frequency check."
301
+ mission_msg = "Hunt for 'Semantic Impossibilities' that the pixel detector missed (e.g., bad physics, lighting errors). If the physics and logic are perfect, mark as None."
302
+ allowed_options = "['None', 'In-painting', 'Full Synthesis', 'Face Swap', 'Filter']"
303
+ reasoning_instruction = "If authentic, state 'No semantic anomalies found'. If fake, explain the physical impossibility (e.g. 'shadows go wrong direction') that proves it despite clean pixels."
304
+ constraint_msg = "Prefer 'None' if the image looks natural. Only flag if you find a logical or physical contradiction."
305
+
306
+ # Fill template
307
+ prompt_text = VLM_SYSTEM_PROMPT_TEMPLATE.format(
308
+ authenticity_score=score,
309
+ status_msg=status_msg,
310
+ mission_msg=mission_msg,
311
+ allowed_options=allowed_options,
312
+ reasoning_instruction=reasoning_instruction,
313
+ constraint_msg=constraint_msg
314
+ )
315
+
316
+ # 2. Load and Process Images
317
+ img1 = load_rgb(Path(orig_path))
318
+ img2 = load_rgb(Path(cam_path))
319
+
320
+ transform = build_transform(input_size=448)
321
+
322
+ # Process both images into tiles
323
+ tiles1 = dynamic_preprocess(img1, image_size=448, use_thumbnail=True, max_num=6)
324
+ tiles2 = dynamic_preprocess(img2, image_size=448, use_thumbnail=True, max_num=6)
325
+
326
+ # Stack pixels
327
+ # Note: We must move tensors to model.device (which is usually the device of the first layer)
328
+ pixel_values1 = [transform(t) for t in tiles1]
329
+ pixel_values2 = [transform(t) for t in tiles2]
330
+
331
+ # Move to GPU
332
+ target_device = model.device
333
+ pixel_values = torch.stack(pixel_values1 + pixel_values2).to(torch.bfloat16 if torch.cuda.is_bf16_supported() else torch.float16).to(target_device)
334
+
335
+ # 3. Construct Question
336
+ question = f"Image-1: <image>\nImage-2: <image>\n{prompt_text}"
337
+
338
+ generation_config = dict(max_new_tokens=512, do_sample=False)
339
+
340
+ try:
341
+ response = model.chat(tokenizer, pixel_values, question, generation_config)
342
+ except Exception as e:
343
+ return {"manipulation_type": "Error", "vlm_reasoning": f"VLM Inference Error: {e}"}
344
+
345
+ # 4. Extract JSON
346
+ try:
347
+ json_match = re.search(r"\{.*\}", response, re.DOTALL)
348
+ if json_match:
349
+ json_str = json_match.group(0)
350
+ data = json.loads(json_str)
351
+ return data
352
+ else:
353
+ return {"manipulation_type": "Unknown", "vlm_reasoning": response}
354
+ except Exception as e:
355
+ return {"manipulation_type": "Error", "vlm_reasoning": f"Failed to parse JSON: {response}"}
356
+
357
+ # -----------------------------------------------------------------------------
358
+ # Main Pipeline
359
+ # -----------------------------------------------------------------------------
360
+ def main():
361
+ ap = argparse.ArgumentParser()
362
+ ap.add_argument("--input_dir", type=str, required=True)
363
+ ap.add_argument("--output_file", type=str, default="predictions.json")
364
+ ap.add_argument("--model_id", type=str, default="buildborderless/CommunityForensics-DeepfakeDet-ViT")
365
+ ap.add_argument("--vlm_id", type=str, default="OpenGVLab/InternVL3_5-30B-A3B-MPO")
366
+ ap.add_argument("--cache_dir", type=str, default="./")
367
+ ap.add_argument("--device", type=str, default="auto")
368
+ ap.add_argument("--batch_size", type=int, default=8)
369
+ ap.add_argument("--tta", action="store_true", help="Enable TTA for ViT")
370
+ args = ap.parse_args()
371
+
372
+ # Device Setup (for ViT)
373
+ # InternVL handles its own device map, but ViT needs explicit device
374
+ if args.device == "auto":
375
+ # Put ViT on the first GPU explicitly
376
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
377
+ else:
378
+ device = torch.device(args.device)
379
+ print(f"Using device for Module 1 (ViT): {device}")
380
+
381
+ input_dir = Path(args.input_dir)
382
+ out_file = Path(args.output_file)
383
+ cam_dir = out_file.parent / "gradcam"
384
+ cam_dir.mkdir(parents=True, exist_ok=True)
385
+
386
+ # ---------------------------
387
+ # 1. Load Module 1 (ViT)
388
+ # ---------------------------
389
+ print(f"--- Loading Module 1: {args.model_id} ---")
390
+ processor = AutoImageProcessor.from_pretrained(args.model_id, cache_dir=args.cache_dir)
391
+ vit_model = AutoModelForImageClassification.from_pretrained(args.model_id, cache_dir=args.cache_dir).to(device).eval()
392
+
393
+ mean, std, rescale_factor = get_norm_from_processor(processor)
394
+ size = 384
395
+ try:
396
+ size = vit_model.config.image_size
397
+ if isinstance(size, (tuple, list)): size = size[0]
398
+ except:
399
+ pass
400
+
401
+ fake_idx = 1
402
+ if hasattr(vit_model.config, "label2id"):
403
+ for k, v in vit_model.config.label2id.items():
404
+ if "fake" in k.lower(): fake_idx = v; break
405
+
406
+ # Setup GradCAM
407
+ target_layer = None
408
+ for name, module in vit_model.named_modules():
409
+ if "patch_embeddings.projection" in name and isinstance(module, nn.Conv2d):
410
+ target_layer = module
411
+ break
412
+ if target_layer is None:
413
+ for module in vit_model.modules():
414
+ if isinstance(module, nn.Conv2d): target_layer = module
415
+
416
+ gradcam = GradCAM(vit_model, target_layer) if target_layer else None
417
+ print(f"GradCAM Layer: {target_layer}")
418
+
419
+ # ---------------------------
420
+ # 2. Run Module 1 Inference
421
+ # ---------------------------
422
+ paths = list_images(input_dir)
423
+ print(f"Found {len(paths)} images. Running Forensic Scan...")
424
+
425
+ results_map = {}
426
+
427
+ for i in tqdm(range(0, len(paths), args.batch_size), desc="ViT Scanning"):
428
+ batch_paths = paths[i:i+args.batch_size]
429
+ scores = predict_probs_batch(
430
+ model=vit_model,
431
+ paths=batch_paths,
432
+ device=device,
433
+ size=size,
434
+ mean=mean,
435
+ std=std,
436
+ rescale_factor=rescale_factor,
437
+ fake_idx=fake_idx,
438
+ use_tta=args.tta
439
+ )
440
+ for p, s in zip(batch_paths, scores):
441
+ results_map[p] = {"score": s, "cam_path": None}
442
+
443
+ print("Generating Heatmaps for ALL images...")
444
+ for p, data in tqdm(results_map.items(), desc="Grad-CAM Gen"):
445
+ if gradcam:
446
+ img = load_rgb(p)
447
+ x, img_sq = preprocess_one(img, size, mean, std, rescale_factor)
448
+ pv = x.unsqueeze(0).to(device)
449
+ pv.requires_grad_(True)
450
+
451
+ try:
452
+ cam = gradcam(pv, class_index=fake_idx)
453
+ cam_np = cam.cpu().numpy()
454
+ W, H = img_sq.size
455
+ cam_pil = Image.fromarray((cam_np * 255).astype(np.uint8)).resize((W, H), Image.BILINEAR)
456
+ cam_norm = np.array(cam_pil) / 255.0
457
+
458
+ overlay = make_overlay(img_sq, cam_norm)
459
+
460
+ rel_name = p.relative_to(input_dir)
461
+ save_path = cam_dir / (str(rel_name).replace("/", "_") + ".png")
462
+ save_path.parent.mkdir(parents=True, exist_ok=True)
463
+ overlay.save(save_path)
464
+
465
+ data["cam_path"] = str(save_path.absolute())
466
+ except Exception as e:
467
+ print(f"CAM Error on {p}: {e}")
468
+
469
+ if gradcam: gradcam.close()
470
+
471
+ # === CRITICAL MEMORY CLEANUP ===
472
+ del vit_model, gradcam, processor
473
+ torch.cuda.empty_cache()
474
+ # ===============================
475
+
476
+ # ---------------------------
477
+ # 3. Load Module 2 (InternVL)
478
+ # ---------------------------
479
+ print(f"--- Loading Module 2: {args.vlm_id} ---")
480
+ # Pass only the cache_dir, device is handled auto
481
+ tokenizer, vlm_model = load_internvl(args.vlm_id, args.cache_dir)
482
+
483
+ # ---------------------------
484
+ # 4. Fusion & Audit
485
+ # ---------------------------
486
+ final_json = []
487
+
488
+ print("Running VLM Semantic Audit on ALL images...")
489
+ for p, data in tqdm(results_map.items(), desc="VLM Reasoning"):
490
+ score = data["score"]
491
+ cam_path = data["cam_path"]
492
+
493
+ rel_name = str(p.relative_to(input_dir))
494
+
495
+ # Default Fallbacks
496
+ m_type = "None"
497
+ reasoning = "Forensic score is low and no anomalies detected."
498
+
499
+ if cam_path:
500
+ vlm_out = run_vlm_audit(
501
+ tokenizer,
502
+ vlm_model,
503
+ orig_path=str(p.absolute()),
504
+ cam_path=cam_path,
505
+ score=score
506
+ )
507
+ m_type = vlm_out.get("manipulation_type", "Unknown")
508
+ reasoning = vlm_out.get("vlm_reasoning", "VLM failed to reason.")
509
+ else:
510
+ reasoning = "VLM Skipped (Missing Heatmap)"
511
+
512
+ final_json.append({
513
+ "image_name": rel_name,
514
+ "authenticity_score": float(score),
515
+ "manipulation_type": m_type,
516
+ "vlm_reasoning": reasoning
517
+ })
518
+
519
+ with open(out_file, "w") as f:
520
+ json.dump(final_json, f, indent=2)
521
+
522
+ print(f"Done! Predictions saved to {out_file}")
523
+
524
+ if __name__ == "__main__":
525
+ main()
requirements.txt ADDED
@@ -0,0 +1,262 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ absl-py==2.3.1
2
+ accelerate==1.10.1
3
+ aiohappyeyeballs==2.6.1
4
+ aiohttp==3.13.3
5
+ aiosignal==1.4.0
6
+ albucore==0.0.24
7
+ albumentations==2.0.8
8
+ annotated-types==0.7.0
9
+ anyio==4.11.0
10
+ argon2-cffi==25.1.0
11
+ argon2-cffi-bindings==25.1.0
12
+ arrow==1.3.0
13
+ asttokens==3.0.0
14
+ async-lru==2.0.5
15
+ async-timeout==5.0.1
16
+ attrs==25.4.0
17
+ audioread==3.0.1
18
+ av==16.0.1
19
+ babel==2.17.0
20
+ beautifulsoup4==4.14.2
21
+ bitsandbytes==0.48.1
22
+ bleach==6.2.0
23
+ boto3==1.40.50
24
+ botocore==1.40.50
25
+ braceexpand==0.1.7
26
+ brotlicffi==1.0.9.2
27
+ cachetools==6.2.1
28
+ catboost==1.2.8
29
+ certifi==2025.10.5
30
+ cffi==2.0.0
31
+ charset-normalizer==3.3.2
32
+ click==8.3.0
33
+ colorama==0.4.6
34
+ coloredlogs==15.0.1
35
+ comm==0.2.3
36
+ contourpy==1.3.2
37
+ cycler==0.12.1
38
+ datasets==4.5.0
39
+ debugpy==1.8.17
40
+ decorator==5.2.1
41
+ decord==0.6.0
42
+ defusedxml==0.7.1
43
+ diffusers==0.35.2
44
+ dill==0.4.0
45
+ easydict==1.13
46
+ efficientnet_pytorch==0.7.1
47
+ einops==0.8.1
48
+ exceptiongroup==1.3.0
49
+ executing==2.2.1
50
+ fastjsonschema==2.21.2
51
+ filelock==3.17.0
52
+ flash-attn==2.6.3
53
+ flatbuffers==25.9.23
54
+ fonttools==4.60.1
55
+ fqdn==1.5.1
56
+ frozenlist==1.8.0
57
+ fsspec==2025.9.0
58
+ ftfy==6.3.1
59
+ gdown==5.2.1
60
+ gitdb==4.0.12
61
+ GitPython==3.1.45
62
+ gmpy2==2.2.1
63
+ google-ai-generativelanguage==0.6.15
64
+ google-api-core==2.28.1
65
+ google-api-python-client==2.185.0
66
+ google-auth==2.42.0
67
+ google-auth-httplib2==0.2.0
68
+ google-generativeai==0.8.5
69
+ googleapis-common-protos==1.71.0
70
+ graphviz==0.21
71
+ grpcio==1.76.0
72
+ grpcio-status==1.71.2
73
+ h11==0.16.0
74
+ h5py==3.15.1
75
+ hf-xet==1.2.0
76
+ hickle==5.0.3
77
+ httpcore==1.0.9
78
+ httplib2==0.31.0
79
+ httpx==0.28.1
80
+ huggingface-hub==0.36.0
81
+ humanfriendly==10.0
82
+ idna==3.7
83
+ imageio==2.37.0
84
+ importlib_metadata==8.7.0
85
+ ipdb==0.13.13
86
+ ipykernel==6.31.0
87
+ ipython==8.37.0
88
+ ipywidgets==8.1.8
89
+ isoduration==20.11.0
90
+ jedi==0.19.2
91
+ Jinja2==3.1.6
92
+ jmespath==1.0.1
93
+ joblib==1.5.2
94
+ json5==0.12.1
95
+ jsonpointer==3.0.0
96
+ jsonschema==4.25.1
97
+ jsonschema-specifications==2025.9.1
98
+ jupyter_client==8.6.3
99
+ jupyter_core==5.8.1
100
+ jupyter-events==0.12.0
101
+ jupyter-lsp==2.3.0
102
+ jupyter_server==2.17.0
103
+ jupyter_server_terminals==0.5.3
104
+ jupyterlab==4.4.9
105
+ jupyterlab_pygments==0.3.0
106
+ jupyterlab_server==2.27.3
107
+ jupyterlab_widgets==3.0.16
108
+ kaggle==1.7.4.5
109
+ kagglehub==0.3.13
110
+ kiwisolver==1.4.9
111
+ lark==1.3.0
112
+ lazy_loader==0.4
113
+ librosa==0.11.0
114
+ llvmlite==0.45.1
115
+ lxml==6.0.2
116
+ MarkupSafe==3.0.2
117
+ matplotlib==3.10.7
118
+ matplotlib-inline==0.1.7
119
+ mistune==3.1.4
120
+ mkl_fft==1.3.11
121
+ mkl_random==1.2.8
122
+ mkl-service==2.4.0
123
+ mne==1.10.2
124
+ mpmath==1.3.0
125
+ msgpack==1.1.2
126
+ multidict==6.7.0
127
+ multiprocess==0.70.18
128
+ narwhals==2.14.0
129
+ nbclient==0.10.2
130
+ nbconvert==7.16.6
131
+ nbformat==5.10.4
132
+ nest-asyncio==1.6.0
133
+ networkx==3.4.2
134
+ ninja==1.13.0
135
+ nltk==3.9.2
136
+ notebook==7.4.7
137
+ notebook_shim==0.2.4
138
+ numba==0.62.1
139
+ numpy==2.2.6
140
+ onnxruntime==1.23.2
141
+ open_clip_torch==3.2.0
142
+ opencv-python==4.12.0.88
143
+ opencv-python-headless==4.12.0.88
144
+ overrides==7.7.0
145
+ packaging==25.0
146
+ pandas==2.3.3
147
+ pandocfilters==1.5.1
148
+ parso==0.8.5
149
+ patsy==1.0.2
150
+ peft==0.17.0
151
+ pexpect==4.9.0
152
+ pillow==11.3.0
153
+ pip==25.3
154
+ platformdirs==4.5.0
155
+ plotly==6.5.0
156
+ pooch==1.8.2
157
+ portalocker==3.2.0
158
+ prometheus_client==0.23.1
159
+ prompt_toolkit==3.0.52
160
+ propcache==0.4.1
161
+ proto-plus==1.26.1
162
+ protobuf==5.29.5
163
+ psutil==7.1.0
164
+ ptyprocess==0.7.0
165
+ pure_eval==0.2.3
166
+ pyarrow==23.0.0
167
+ pyasn1==0.6.1
168
+ pyasn1_modules==0.4.2
169
+ pycocoevalcap==1.2
170
+ pycocotools==2.0.10
171
+ pycparser==2.23
172
+ pydantic==2.12.0
173
+ pydantic_core==2.41.1
174
+ Pygments==2.19.2
175
+ PyMatting==1.1.14
176
+ pyparsing==3.2.5
177
+ PySocks==1.7.1
178
+ python-dateutil==2.9.0.post0
179
+ python-docx==1.2.0
180
+ python-json-logger==4.0.0
181
+ python-slugify==8.0.4
182
+ pytorch-gradcam==0.2.1
183
+ pytz==2025.2
184
+ PyYAML==6.0.2
185
+ pyzmq==27.1.0
186
+ qwen-vl-utils==0.0.14
187
+ rarfile==4.2
188
+ referencing==0.36.2
189
+ regex==2025.9.18
190
+ rembg==2.0.69
191
+ requests==2.32.5
192
+ rfc3339-validator==0.1.4
193
+ rfc3986-validator==0.1.1
194
+ rfc3987-syntax==1.1.0
195
+ rouge_score==0.1.2
196
+ rpds-py==0.27.1
197
+ rsa==4.9.1
198
+ s3transfer==0.14.0
199
+ sacrebleu==2.5.1
200
+ safetensors==0.6.2
201
+ scikit-image==0.25.2
202
+ scikit-learn==1.7.2
203
+ scipy==1.15.3
204
+ seaborn==0.13.2
205
+ segmentation_models_pytorch==0.5.0
206
+ Send2Trash==1.8.3
207
+ sentence-transformers==5.2.0
208
+ sentencepiece==0.2.1
209
+ sentry-sdk==2.41.0
210
+ setuptools==80.9.0
211
+ shellingham==1.5.4
212
+ simsimd==6.5.3
213
+ six==1.17.0
214
+ smmap==5.0.2
215
+ sniffio==1.3.1
216
+ soundfile==0.13.1
217
+ soupsieve==2.8
218
+ soxr==1.0.0
219
+ stack-data==0.6.3
220
+ statsmodels==0.14.6
221
+ stringzilla==4.3.0
222
+ sympy==1.13.1
223
+ tabulate==0.9.0
224
+ termcolor==3.1.0
225
+ terminado==0.18.1
226
+ text-unidecode==1.3
227
+ threadpoolctl==3.6.0
228
+ tifffile==2025.5.10
229
+ timm==1.0.20
230
+ tinycss2==1.4.0
231
+ tokenizers==0.22.2
232
+ tomli==2.3.0
233
+ toolz==1.0.0
234
+ torch==2.5.1
235
+ torchaudio==2.5.1
236
+ torchvision==0.20.1
237
+ tornado==6.5.2
238
+ tqdm==4.67.1
239
+ traitlets==5.14.3
240
+ transformers==4.57.0
241
+ triton==3.1.0
242
+ typer-slim==0.21.1
243
+ types-python-dateutil==2.9.0.20251008
244
+ typing_extensions==4.15.0
245
+ typing-inspection==0.4.2
246
+ tzdata==2025.2
247
+ uri-template==1.3.0
248
+ uritemplate==4.2.0
249
+ urllib3==2.5.0
250
+ wandb==0.22.2
251
+ wcwidth==0.2.14
252
+ webcolors==24.11.1
253
+ webdataset==1.0.2
254
+ webencodings==0.5.1
255
+ websocket-client==1.9.0
256
+ wheel==0.45.1
257
+ widgetsnbextension==4.0.15
258
+ xformers==0.0.29
259
+ xlstm==2.0.0
260
+ xxhash==3.6.0
261
+ yarl==1.22.0
262
+ zipp==3.23.0
technical_report_EzFake.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69a4e810eec97edfcbd296ca6d5ccf3b0e94ed3df74c723840c16d0127631c0b
3
+ size 250850