cledouxluma commited on
Commit
f5bb51a
·
verified ·
1 Parent(s): b734cb2

Upload scripts/evaluate.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. scripts/evaluate.py +185 -0
scripts/evaluate.py ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Evaluate SCRFD model on WiderFace validation set.
4
+
5
+ Usage:
6
+ python scripts/evaluate.py \\
7
+ --model scrfd_34g \\
8
+ --checkpoint checkpoints/scrfd_34g_best.pth \\
9
+ --data-root data/wider_face \\
10
+ --output-dir results/scrfd_34g
11
+
12
+ Output:
13
+ - WiderFace Easy/Medium/Hard AP
14
+ - Prediction files in WiderFace submission format
15
+ - Speed benchmark results
16
+ """
17
+
18
+ import os
19
+ import sys
20
+ import argparse
21
+ import time
22
+ import json
23
+ from pathlib import Path
24
+
25
+ import numpy as np
26
+ import cv2
27
+ import torch
28
+
29
+ sys.path.insert(0, str(Path(__file__).resolve().parent.parent))
30
+
31
+ from models.detector import build_detector
32
+ from evaluation.widerface_eval import WiderFaceEvaluator
33
+ from evaluation.speed_benchmark import SpeedBenchmark
34
+
35
+
36
+ def parse_args():
37
+ parser = argparse.ArgumentParser(description='Evaluate SCRFD')
38
+ parser.add_argument('--model', type=str, default='scrfd_34g')
39
+ parser.add_argument('--checkpoint', type=str, required=True)
40
+ parser.add_argument('--data-root', type=str, default='data/wider_face')
41
+ parser.add_argument('--output-dir', type=str, default='results')
42
+ parser.add_argument('--input-size', type=int, default=640)
43
+ parser.add_argument('--score-thresh', type=float, default=0.02)
44
+ parser.add_argument('--nms-thresh', type=float, default=0.4)
45
+ parser.add_argument('--device', type=str, default='cuda')
46
+ parser.add_argument('--benchmark', action='store_true', default=True)
47
+ parser.add_argument('--multi-scale', action='store_true',
48
+ help='Multi-scale testing (slower, higher AP)')
49
+ parser.add_argument('--scales', nargs='+', type=int,
50
+ default=[500, 800, 1100, 1400, 1700],
51
+ help='Scales for multi-scale testing')
52
+ return parser.parse_args()
53
+
54
+
55
+ @torch.no_grad()
56
+ def evaluate_single_scale(model, evaluator, data_root, input_size, device,
57
+ score_thresh):
58
+ """Run single-scale evaluation."""
59
+ img_dir = os.path.join(data_root, 'WIDER_val', 'images')
60
+ mean = np.array([104.0, 117.0, 123.0], dtype=np.float32)
61
+
62
+ total_time = 0
63
+ num_images = 0
64
+
65
+ for event in sorted(os.listdir(img_dir)):
66
+ event_dir = os.path.join(img_dir, event)
67
+ if not os.path.isdir(event_dir):
68
+ continue
69
+
70
+ for img_name in sorted(os.listdir(event_dir)):
71
+ if not img_name.lower().endswith(('.jpg', '.jpeg', '.png')):
72
+ continue
73
+
74
+ img_path = os.path.join(event_dir, img_name)
75
+ img = cv2.imread(img_path)
76
+ if img is None:
77
+ continue
78
+
79
+ h, w = img.shape[:2]
80
+ filename = f'{event}/{img_name}'
81
+
82
+ # Preprocess
83
+ scale = input_size / max(h, w)
84
+ new_h, new_w = int(h * scale), int(w * scale)
85
+ resized = cv2.resize(img, (new_w, new_h))
86
+
87
+ padded = np.zeros((input_size, input_size, 3), dtype=np.float32)
88
+ padded[:new_h, :new_w] = resized
89
+ padded = (padded - mean).transpose(2, 0, 1)
90
+
91
+ tensor = torch.from_numpy(padded).unsqueeze(0).float().to(device)
92
+
93
+ # Inference
94
+ t0 = time.time()
95
+ results = model(tensor)
96
+ total_time += time.time() - t0
97
+ num_images += 1
98
+
99
+ # Post-process
100
+ r = results[0]
101
+ boxes = r['boxes'].cpu().numpy()
102
+ scores = r['scores'].cpu().numpy()
103
+
104
+ # Rescale to original
105
+ if len(boxes) > 0:
106
+ boxes /= scale
107
+ mask = scores >= score_thresh
108
+ boxes = boxes[mask]
109
+ scores = scores[mask]
110
+
111
+ evaluator.add_prediction(filename, boxes, scores)
112
+
113
+ if num_images % 200 == 0:
114
+ fps = num_images / max(total_time, 1e-6)
115
+ print(f" Processed {num_images} images ({fps:.1f} FPS)")
116
+
117
+ return total_time, num_images
118
+
119
+
120
+ def main():
121
+ args = parse_args()
122
+ os.makedirs(args.output_dir, exist_ok=True)
123
+
124
+ # Load model
125
+ print(f"Loading {args.model} from {args.checkpoint}")
126
+ model = build_detector(
127
+ args.model,
128
+ score_threshold=args.score_thresh,
129
+ nms_threshold=args.nms_thresh,
130
+ ).to(args.device)
131
+
132
+ checkpoint = torch.load(args.checkpoint, map_location='cpu')
133
+ state_dict = checkpoint.get('model_state_dict', checkpoint)
134
+ model.load_state_dict(state_dict, strict=False)
135
+ model.eval()
136
+
137
+ num_params = sum(p.numel() for p in model.parameters()) / 1e6
138
+ print(f" Parameters: {num_params:.2f}M")
139
+
140
+ # WiderFace evaluation
141
+ print("Running WiderFace evaluation...")
142
+ evaluator = WiderFaceEvaluator(
143
+ gt_dir=os.path.join(args.data_root, 'wider_face_split')
144
+ )
145
+
146
+ total_time, num_images = evaluate_single_scale(
147
+ model, evaluator, args.data_root, args.input_size,
148
+ args.device, args.score_thresh
149
+ )
150
+
151
+ # Results
152
+ results = evaluator.evaluate()
153
+ report = evaluator.generate_report()
154
+ print(report)
155
+
156
+ # Save predictions
157
+ evaluator.save_predictions(os.path.join(args.output_dir, 'predictions'))
158
+
159
+ # Speed benchmark
160
+ if args.benchmark:
161
+ print("\nRunning speed benchmark...")
162
+ bench = SpeedBenchmark(device=args.device)
163
+ for size in [320, 480, 640, 960]:
164
+ bench.benchmark_model(model, args.model, input_size=size)
165
+ bench.print_results()
166
+
167
+ # Save markdown table
168
+ with open(os.path.join(args.output_dir, 'speed_benchmark.md'), 'w') as f:
169
+ f.write(bench.to_markdown())
170
+
171
+ # Save results
172
+ results['num_images'] = num_images
173
+ results['total_time'] = total_time
174
+ results['avg_fps'] = num_images / max(total_time, 1e-6)
175
+ results['model'] = args.model
176
+ results['input_size'] = args.input_size
177
+
178
+ with open(os.path.join(args.output_dir, 'results.json'), 'w') as f:
179
+ json.dump(results, f, indent=2)
180
+
181
+ print(f"\nResults saved to {args.output_dir}/")
182
+
183
+
184
+ if __name__ == '__main__':
185
+ main()