Kexin-251202 commited on
Commit
056b87c
·
verified ·
1 Parent(s): 019a8c1

Centre menu

Browse files
Files changed (2) hide show
  1. src/App.css +1 -1
  2. src/components/FocusPageLocal.jsx +1040 -949
src/App.css CHANGED
@@ -39,7 +39,7 @@ body {
39
  background-color: white;
40
  display: flex;
41
  align-items: center;
42
- justify-content: flex-start;
43
  gap: 0;
44
  padding: 0 16px 0 20px;
45
  box-sizing: border-box;
 
39
  background-color: white;
40
  display: flex;
41
  align-items: center;
42
+ justify-content: center;
43
  gap: 0;
44
  padding: 0 16px 0 20px;
45
  box-sizing: border-box;
src/components/FocusPageLocal.jsx CHANGED
@@ -1,949 +1,1040 @@
1
- import React, { useState, useEffect, useRef } from 'react';
2
- import CalibrationOverlay from './CalibrationOverlay';
3
-
4
- const FLOW_STEPS = {
5
- intro: 'intro',
6
- permission: 'permission',
7
- ready: 'ready'
8
- };
9
-
10
- const FOCUS_STATES = {
11
- pending: 'pending',
12
- focused: 'focused',
13
- notFocused: 'not-focused'
14
- };
15
-
16
- function HelloIcon() {
17
- return (
18
- <svg width="96" height="96" viewBox="0 0 96 96" aria-hidden="true">
19
- <circle cx="48" cy="48" r="40" fill="#007BFF" />
20
- <path d="M30 38c0-4 2.7-7 6-7s6 3 6 7" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
21
- <path d="M54 38c0-4 2.7-7 6-7s6 3 6 7" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
22
- <path d="M30 52c3 11 10 17 18 17s15-6 18-17" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
23
- </svg>
24
- );
25
- }
26
-
27
- function CameraIcon() {
28
- return (
29
- <svg width="110" height="110" viewBox="0 0 110 110" aria-hidden="true">
30
- <rect x="30" y="36" width="50" height="34" rx="5" fill="none" stroke="#007BFF" strokeWidth="6" />
31
- <path d="M24 72h62c0 9-7 16-16 16H40c-9 0-16-7-16-16Z" fill="none" stroke="#007BFF" strokeWidth="6" />
32
- <path d="M55 28v8" stroke="#007BFF" strokeWidth="6" strokeLinecap="round" />
33
- <circle cx="55" cy="36" r="14" fill="none" stroke="#007BFF" strokeWidth="6" />
34
- <circle cx="55" cy="36" r="4" fill="#007BFF" />
35
- <path d="M46 83h18" stroke="#007BFF" strokeWidth="6" strokeLinecap="round" />
36
- </svg>
37
- );
38
- }
39
-
40
- const MODEL_ORDER = ['hybrid', 'xgboost', 'mlp', 'geometric'];
41
-
42
- const MODEL_INFO = {
43
- hybrid: {
44
- label: 'Hybrid',
45
- tagline: 'Best overall — combines ML with geometric scoring',
46
- how: 'Fuses MLP predictions (30%) with geometric face/eye scores (70%). Uses a weighted blend tuned with LOPO evaluation.',
47
- accuracy: 'N/A',
48
- f1: '0.8409',
49
- auc: 'N/A',
50
- threshold: '0.35',
51
- evaluation: 'LOPO tuning (9 participants, 144K frames)',
52
- features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
53
- strengths: 'Most robust across different people. LOPO mean F1 is 0.8409 at w_mlp=0.3, w_geo=0.7.',
54
- badge: 'Recommended',
55
- },
56
- xgboost: {
57
- label: 'XGBoost',
58
- tagline: 'Highest raw accuracy — gradient-boosted decision trees',
59
- how: 'Ensemble of 600 decision trees (max depth 8). Each tree learns to correct errors from previous trees. Outputs probability of focused state.',
60
- accuracy: '95.87%',
61
- f1: '0.9585',
62
- auc: '0.9908',
63
- threshold: '0.38',
64
- evaluation: 'Random split test (15%) + LOPO thresholds',
65
- features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
66
- strengths: 'Strong pattern recognition and fast inference. LOPO: AUC 0.8695, optimal threshold 0.280, F1 0.8549.',
67
- badge: null,
68
- },
69
- mlp: {
70
- label: 'MLP',
71
- tagline: 'Lightweight neural network — fast and efficient',
72
- how: 'Two-layer neural network (64→32 neurons). Takes 10 face features, applies learned weights, outputs focused/unfocused probability via softmax.',
73
- accuracy: '92.92%',
74
- f1: '0.9287',
75
- auc: '0.9714',
76
- threshold: '0.23',
77
- evaluation: 'Random split test (15%) + LOPO thresholds',
78
- features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
79
- strengths: 'Fastest inference and smallest model size. LOPO: AUC 0.8624, optimal threshold 0.228, F1 0.8578.',
80
- badge: null,
81
- },
82
- geometric: {
83
- label: 'Geometric',
84
- tagline: 'Baseline only — hardcoded thresholds, no learning',
85
- how: 'Uses fixed thresholds on head orientation (70%) and eye openness (30%). No training — just hand-tuned rules on 478 face landmarks. Cannot adapt to new faces or environments.',
86
- accuracy: 'N/A',
87
- f1: '0.8195',
88
- auc: 'N/A',
89
- threshold: '0.55',
90
- evaluation: 'LOPO geometric sweep',
91
- features: 'Head yaw/pitch/roll angles, eye aspect ratio (EAR), iris gaze offset, mouth aspect ratio (MAR)',
92
- strengths: 'No model files needed. Useful fallback when model checkpoints are unavailable.',
93
- badge: 'Baseline',
94
- },
95
- };
96
-
97
- function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActive }) {
98
- const [currentFrame, setCurrentFrame] = useState(15);
99
- const [timelineEvents, setTimelineEvents] = useState([]);
100
- const [stats, setStats] = useState(null);
101
- const [systemStats, setSystemStats] = useState(null);
102
- const [availableModels, setAvailableModels] = useState([]);
103
- const [currentModel, setCurrentModel] = useState('mlp');
104
- const [flowStep, setFlowStep] = useState(FLOW_STEPS.intro);
105
- const [cameraReady, setCameraReady] = useState(false);
106
- const [isStarting, setIsStarting] = useState(false);
107
- const [focusState, setFocusState] = useState(FOCUS_STATES.pending);
108
- const [cameraError, setCameraError] = useState('');
109
- const [calibration, setCalibration] = useState(null);
110
- const [l2csBoost, setL2csBoost] = useState(false);
111
- const [l2csBoostAvailable, setL2csBoostAvailable] = useState(false);
112
-
113
- const localVideoRef = useRef(null);
114
- const displayCanvasRef = useRef(null);
115
- const pipVideoRef = useRef(null);
116
- const pipStreamRef = useRef(null);
117
- const previewFrameRef = useRef(null);
118
-
119
- const formatDuration = (seconds) => {
120
- if (seconds === 0) return '0s';
121
- const mins = Math.floor(seconds / 60);
122
- const secs = Math.floor(seconds % 60);
123
- return `${mins}m ${secs}s`;
124
- };
125
-
126
- const stopPreviewLoop = () => {
127
- if (previewFrameRef.current) {
128
- cancelAnimationFrame(previewFrameRef.current);
129
- previewFrameRef.current = null;
130
- }
131
- };
132
-
133
- const startPreviewLoop = () => {
134
- stopPreviewLoop();
135
- const renderPreview = () => {
136
- const canvas = displayCanvasRef.current;
137
- const video = localVideoRef.current;
138
-
139
- if (!canvas || !video || !cameraReady || videoManager?.isStreaming) {
140
- previewFrameRef.current = null;
141
- return;
142
- }
143
-
144
- if (video.readyState >= 2) {
145
- const ctx = canvas.getContext('2d');
146
- ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
147
- }
148
-
149
- previewFrameRef.current = requestAnimationFrame(renderPreview);
150
- };
151
-
152
- previewFrameRef.current = requestAnimationFrame(renderPreview);
153
- };
154
-
155
- const getErrorMessage = (err) => {
156
- if (err?.name === 'NotAllowedError') {
157
- return 'Camera permission denied. Please allow camera access.';
158
- }
159
- if (err?.name === 'NotFoundError') {
160
- return 'No camera found. Please connect a camera.';
161
- }
162
- if (err?.name === 'NotReadableError') {
163
- return 'Camera is already in use by another application.';
164
- }
165
- if (err?.target?.url) {
166
- return `WebSocket connection failed: ${err.target.url}. Check that the backend server is running.`;
167
- }
168
- return err?.message || 'Failed to start focus session.';
169
- };
170
-
171
- useEffect(() => {
172
- if (!videoManager) return;
173
-
174
- const originalOnStatusUpdate = videoManager.callbacks.onStatusUpdate;
175
- const originalOnSessionEnd = videoManager.callbacks.onSessionEnd;
176
-
177
- videoManager.callbacks.onStatusUpdate = (isFocused) => {
178
- setTimelineEvents((prev) => {
179
- const newEvents = [...prev, { isFocused, timestamp: Date.now() }];
180
- if (newEvents.length > 60) newEvents.shift();
181
- return newEvents;
182
- });
183
- setFocusState(isFocused ? FOCUS_STATES.focused : FOCUS_STATES.notFocused);
184
- if (originalOnStatusUpdate) originalOnStatusUpdate(isFocused);
185
- };
186
-
187
- videoManager.callbacks.onSessionEnd = (summary) => {
188
- setFocusState(FOCUS_STATES.pending);
189
- setCameraReady(false);
190
- if (originalOnSessionEnd) originalOnSessionEnd(summary);
191
- };
192
-
193
- videoManager.callbacks.onCalibrationUpdate = (cal) => {
194
- setCalibration(cal && cal.active ? { ...cal } : null);
195
- };
196
-
197
- const statsInterval = setInterval(() => {
198
- if (videoManager && videoManager.getStats) {
199
- setStats(videoManager.getStats());
200
- }
201
- }, 1000);
202
-
203
- return () => {
204
- if (videoManager) {
205
- videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate;
206
- videoManager.callbacks.onSessionEnd = originalOnSessionEnd;
207
- videoManager.callbacks.onCalibrationUpdate = null;
208
- }
209
- clearInterval(statsInterval);
210
- };
211
- }, [videoManager]);
212
-
213
- // Fetch available models on mount
214
- useEffect(() => {
215
- fetch('/api/models')
216
- .then((res) => res.json())
217
- .then((data) => {
218
- if (data.available) setAvailableModels(data.available);
219
- if (data.current) {
220
- // If L2CS was the active model, switch to a base model + enable boost
221
- if (data.current === 'l2cs') {
222
- const fallback = data.available.find((m) => m !== 'l2cs') || 'mlp';
223
- setCurrentModel(fallback);
224
- handleModelChange(fallback);
225
- } else {
226
- setCurrentModel(data.current);
227
- }
228
- }
229
- if (data.l2cs_boost !== undefined) setL2csBoost(data.l2cs_boost);
230
- if (data.l2cs_boost_available !== undefined) setL2csBoostAvailable(data.l2cs_boost_available);
231
- })
232
- .catch((err) => console.error('Failed to fetch models:', err));
233
- }, []);
234
-
235
- useEffect(() => {
236
- if (flowStep === FLOW_STEPS.ready && cameraReady && !videoManager?.isStreaming) {
237
- startPreviewLoop();
238
- return;
239
- }
240
- stopPreviewLoop();
241
- }, [cameraReady, flowStep, videoManager?.isStreaming]);
242
-
243
- useEffect(() => {
244
- if (!isActive) {
245
- stopPreviewLoop();
246
- }
247
- }, [isActive]);
248
-
249
- useEffect(() => {
250
- return () => {
251
- stopPreviewLoop();
252
- if (pipVideoRef.current) {
253
- pipVideoRef.current.pause();
254
- pipVideoRef.current.srcObject = null;
255
- }
256
- if (pipStreamRef.current) {
257
- pipStreamRef.current.getTracks().forEach((t) => t.stop());
258
- pipStreamRef.current = null;
259
- }
260
- };
261
- }, []);
262
-
263
- // Poll server CPU/memory for UI
264
- useEffect(() => {
265
- const fetchSystem = () => {
266
- fetch('/api/stats/system')
267
- .then(res => res.json())
268
- .then(data => setSystemStats(data))
269
- .catch(() => setSystemStats(null));
270
- };
271
- fetchSystem();
272
- const interval = setInterval(fetchSystem, 3000);
273
- return () => clearInterval(interval);
274
- }, []);
275
-
276
- const handleModelChange = async (modelName) => {
277
- try {
278
- const res = await fetch('/api/settings', {
279
- method: 'PUT',
280
- headers: { 'Content-Type': 'application/json' },
281
- body: JSON.stringify({ model_name: modelName })
282
- });
283
- const result = await res.json();
284
- if (result.updated) {
285
- setCurrentModel(modelName);
286
- }
287
- } catch (err) {
288
- console.error('Failed to switch model:', err);
289
- }
290
- };
291
-
292
- const handleEnableCamera = async () => {
293
- if (!videoManager) return;
294
-
295
- try {
296
- setCameraError('');
297
- await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current);
298
- setCameraReady(true);
299
- setFlowStep(FLOW_STEPS.ready);
300
- setFocusState(FOCUS_STATES.pending);
301
- } catch (err) {
302
- const errorMessage = getErrorMessage(err);
303
- setCameraError(errorMessage);
304
- console.error('Camera init error:', err);
305
- }
306
- };
307
-
308
- const applyEyeGazeChange = async (enable, withCalibration = true) => {
309
- try {
310
- const res = await fetch('/api/settings', {
311
- method: 'PUT',
312
- headers: { 'Content-Type': 'application/json' },
313
- body: JSON.stringify({ l2cs_boost: enable })
314
- });
315
- if (!res.ok) return;
316
- setL2csBoost(enable);
317
-
318
- if (enable && withCalibration && videoManager && videoManager.isStreaming) {
319
- videoManager.startCalibration();
320
- } else if (!enable && videoManager) {
321
- videoManager.cancelCalibration();
322
- }
323
- } catch (err) {
324
- console.error('Failed to toggle eye gaze:', err);
325
- }
326
- };
327
-
328
- const handleEyeGazeToggle = async () => {
329
- const next = !l2csBoost;
330
- await applyEyeGazeChange(next, false);
331
- };
332
-
333
- const handleStart = async () => {
334
- try {
335
- setIsStarting(true);
336
- setSessionResult(null);
337
- setTimelineEvents([]);
338
- setFocusState(FOCUS_STATES.pending);
339
- setCameraError('');
340
-
341
- if (!cameraReady) {
342
- await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current);
343
- setCameraReady(true);
344
- setFlowStep(FLOW_STEPS.ready);
345
- }
346
-
347
- await videoManager.startStreaming();
348
- } catch (err) {
349
- const errorMessage = getErrorMessage(err);
350
- setCameraError(errorMessage);
351
- setFocusState(FOCUS_STATES.pending);
352
- console.error('Start error:', err);
353
- alert(`Failed to start: ${errorMessage}\n\nCheck browser console for details.`);
354
- } finally {
355
- setIsStarting(false);
356
- }
357
- };
358
-
359
- const handleStop = async () => {
360
- if (videoManager) {
361
- await videoManager.stopStreaming();
362
- }
363
- try {
364
- if (document.pictureInPictureElement === pipVideoRef.current) {
365
- await document.exitPictureInPicture();
366
- }
367
- } catch (_) {}
368
- if (pipVideoRef.current) {
369
- pipVideoRef.current.pause();
370
- pipVideoRef.current.srcObject = null;
371
- }
372
- if (pipStreamRef.current) {
373
- pipStreamRef.current.getTracks().forEach((t) => t.stop());
374
- pipStreamRef.current = null;
375
- }
376
- stopPreviewLoop();
377
- setFocusState(FOCUS_STATES.pending);
378
- setCameraReady(false);
379
- };
380
-
381
- const handlePiP = async () => {
382
- try {
383
- //
384
- if (!videoManager || !videoManager.isStreaming) {
385
- alert('Please start the video first.');
386
- return;
387
- }
388
-
389
- if (!displayCanvasRef.current) {
390
- alert('Video not ready.');
391
- return;
392
- }
393
-
394
- //
395
- if (document.pictureInPictureElement === pipVideoRef.current) {
396
- await document.exitPictureInPicture();
397
- console.log('PiP exited');
398
- return;
399
- }
400
-
401
- //
402
- if (!document.pictureInPictureEnabled) {
403
- alert('Picture-in-Picture is not supported in this browser.');
404
- return;
405
- }
406
-
407
- //
408
- const pipVideo = pipVideoRef.current;
409
- if (!pipVideo) {
410
- alert('PiP video element not ready.');
411
- return;
412
- }
413
-
414
- const isSafariPiP = typeof pipVideo.webkitSetPresentationMode === 'function';
415
-
416
- //
417
- let stream = pipStreamRef.current;
418
- if (!stream) {
419
- const capture = displayCanvasRef.current.captureStream;
420
- if (typeof capture === 'function') {
421
- stream = capture.call(displayCanvasRef.current, 30);
422
- }
423
- if (!stream || stream.getTracks().length === 0) {
424
- const cameraStream = localVideoRef.current?.srcObject;
425
- if (!cameraStream) {
426
- alert('Camera stream not ready.');
427
- return;
428
- }
429
- stream = cameraStream;
430
- }
431
- pipStreamRef.current = stream;
432
- }
433
-
434
- //
435
- if (!stream || stream.getTracks().length === 0) {
436
- alert('Failed to capture video stream from canvas.');
437
- return;
438
- }
439
-
440
- pipVideo.srcObject = stream;
441
-
442
- //
443
- if (pipVideo.readyState < 2) {
444
- await new Promise((resolve) => {
445
- const onReady = () => {
446
- pipVideo.removeEventListener('loadeddata', onReady);
447
- pipVideo.removeEventListener('canplay', onReady);
448
- resolve();
449
- };
450
- pipVideo.addEventListener('loadeddata', onReady);
451
- pipVideo.addEventListener('canplay', onReady);
452
- //
453
- setTimeout(resolve, 600);
454
- });
455
- }
456
-
457
- try {
458
- await pipVideo.play();
459
- } catch (_) {}
460
-
461
- //
462
- if (isSafariPiP) {
463
- try {
464
- pipVideo.webkitSetPresentationMode('picture-in-picture');
465
- console.log('PiP activated (Safari)');
466
- return;
467
- } catch (e) {
468
- //
469
- const cameraStream = localVideoRef.current?.srcObject;
470
- if (cameraStream && cameraStream !== pipVideo.srcObject) {
471
- pipVideo.srcObject = cameraStream;
472
- try {
473
- await pipVideo.play();
474
- } catch (_) {}
475
- pipVideo.webkitSetPresentationMode('picture-in-picture');
476
- console.log('PiP activated (Safari fallback)');
477
- return;
478
- }
479
- throw e;
480
- }
481
- }
482
-
483
- //
484
- if (typeof pipVideo.requestPictureInPicture === 'function') {
485
- await pipVideo.requestPictureInPicture();
486
- console.log('PiP activated');
487
- } else {
488
- alert('Picture-in-Picture is not supported in this browser.');
489
- }
490
-
491
- } catch (err) {
492
- console.error('PiP error:', err);
493
- alert(`Failed to enter Picture-in-Picture: ${err.message}`);
494
- }
495
- };
496
-
497
- const handleFloatingWindow = () => {
498
- handlePiP();
499
- };
500
-
501
- const handleFrameChange = (val) => {
502
- const rate = parseInt(val, 10);
503
- setCurrentFrame(rate);
504
- if (videoManager) {
505
- videoManager.setFrameRate(rate);
506
- }
507
- };
508
-
509
- const handlePreview = () => {
510
- if (!videoManager || !videoManager.isStreaming) {
511
- alert('Please start a session first.');
512
- return;
513
- }
514
-
515
- //
516
- const currentStats = videoManager.getStats();
517
-
518
- if (!currentStats.sessionId) {
519
- alert('No active session.');
520
- return;
521
- }
522
-
523
- const sessionDuration = Math.floor((Date.now() - (videoManager.sessionStartTime || Date.now())) / 1000);
524
- const totalFrames = currentStats.framesProcessed || 0;
525
- const focusedFrames = currentStats.focusedFrames ?? 0;
526
- const focusScore = totalFrames > 0 ? focusedFrames / totalFrames : 0;
527
-
528
- setSessionResult({
529
- duration_seconds: sessionDuration,
530
- focus_score: focusScore,
531
- total_frames: totalFrames,
532
- focused_frames: focusedFrames
533
- });
534
- };
535
-
536
- const handleCloseOverlay = () => {
537
- setSessionResult(null);
538
- };
539
-
540
- const pageStyle = isActive
541
- ? undefined
542
- : {
543
- position: 'absolute',
544
- width: '1px',
545
- height: '1px',
546
- overflow: 'hidden',
547
- opacity: 0,
548
- pointerEvents: 'none'
549
- };
550
-
551
- const focusStateLabel = {
552
- [FOCUS_STATES.pending]: 'Pending',
553
- [FOCUS_STATES.focused]: 'Focused',
554
- [FOCUS_STATES.notFocused]: 'Not Focused'
555
- }[focusState];
556
-
557
- const introHighlights = [
558
- {
559
- title: 'Live focus tracking',
560
- text: 'Head pose, gaze, and eye openness are read continuously during the session.'
561
- },
562
- {
563
- title: 'Quick setup',
564
- text: 'Front-facing light and a stable camera angle give the cleanest preview.'
565
- },
566
- {
567
- title: 'Private by default',
568
- text: 'Only session metadata is stored, not the raw camera footage.'
569
- }
570
- ];
571
-
572
- const permissionSteps = [
573
- {
574
- title: 'Allow browser access',
575
- text: 'Approve the camera prompt so the preview can appear immediately.'
576
- },
577
- {
578
- title: 'Check your framing',
579
- text: 'Keep your face visible and centered for more stable landmark detection.'
580
- },
581
- {
582
- title: 'Start when ready',
583
- text: 'After the preview appears, use the page controls to begin or stop.'
584
- }
585
- ];
586
-
587
- const renderIntroCard = () => {
588
- if (flowStep === FLOW_STEPS.intro) {
589
- return (
590
- <div className="focus-flow-overlay">
591
- <div className="focus-flow-card">
592
- <div className="focus-flow-header">
593
- <div>
594
- <div className="focus-flow-eyebrow">Focus Session</div>
595
- <h2>Before you begin</h2>
596
- </div>
597
- <div className="focus-flow-icon">
598
- <HelloIcon />
599
- </div>
600
- </div>
601
-
602
- <p className="focus-flow-lead">
603
- The focus page uses your live camera preview to estimate attention in real time.
604
- Review the setup notes below, then continue to camera access.
605
- </p>
606
-
607
- <div className="focus-flow-grid">
608
- {introHighlights.map((item) => (
609
- <article key={item.title} className="focus-flow-panel">
610
- <h3>{item.title}</h3>
611
- <p>{item.text}</p>
612
- </article>
613
- ))}
614
- </div>
615
-
616
- <div className="focus-flow-glasses-note">
617
- <strong>Wearing glasses?</strong> Glasses may reduce detection accuracy on some models. If results seem inaccurate, try switching to a different model (e.g. Geometric or MLP).
618
- </div>
619
-
620
- <div className="focus-flow-footer">
621
- <div className="focus-flow-note">
622
- You can still change frame rate and available model options after the preview loads.
623
- </div>
624
- <button className="focus-flow-button" onClick={() => setFlowStep(FLOW_STEPS.permission)}>
625
- Continue
626
- </button>
627
- </div>
628
- </div>
629
- </div>
630
- );
631
- }
632
-
633
- if (flowStep === FLOW_STEPS.permission && !cameraReady) {
634
- return (
635
- <div className="focus-flow-overlay">
636
- <div className="focus-flow-card">
637
- <div className="focus-flow-header">
638
- <div>
639
- <div className="focus-flow-eyebrow">Camera Setup</div>
640
- <h2>Enable camera access</h2>
641
- </div>
642
- <div className="focus-flow-icon">
643
- <CameraIcon />
644
- </div>
645
- </div>
646
-
647
- <p className="focus-flow-lead">
648
- Once access is granted, your preview appears here and the rest of the Focus page
649
- behaves like the other dashboard screens.
650
- </p>
651
-
652
- <div className="focus-flow-steps">
653
- {permissionSteps.map((item, index) => (
654
- <div key={item.title} className="focus-flow-step">
655
- <div className="focus-flow-step-number">{index + 1}</div>
656
- <div className="focus-flow-step-copy">
657
- <h3>{item.title}</h3>
658
- <p>{item.text}</p>
659
- </div>
660
- </div>
661
- ))}
662
- </div>
663
-
664
- {cameraError ? <div className="focus-inline-error">{cameraError}</div> : null}
665
-
666
- <div className="focus-flow-footer">
667
- <button
668
- type="button"
669
- className="focus-flow-secondary"
670
- onClick={() => setFlowStep(FLOW_STEPS.intro)}
671
- >
672
- Back
673
- </button>
674
- <button className="focus-flow-button" onClick={handleEnableCamera}>
675
- Enable Camera
676
- </button>
677
- </div>
678
- </div>
679
- </div>
680
- );
681
- }
682
-
683
- return null;
684
- };
685
-
686
- return (
687
- <main id="page-b" className="page" style={pageStyle}>
688
- {renderIntroCard()}
689
-
690
- <section id="display-area" className="focus-display-shell">
691
- <video
692
- ref={pipVideoRef}
693
- muted
694
- playsInline
695
- autoPlay
696
- style={{
697
- position: 'absolute',
698
- width: '1px',
699
- height: '1px',
700
- opacity: 0,
701
- pointerEvents: 'none'
702
- }}
703
- />
704
- {/* local video (hidden, for capture) */}
705
- <video
706
- ref={localVideoRef}
707
- muted
708
- playsInline
709
- autoPlay
710
- style={{ display: 'none' }}
711
- />
712
-
713
- {/* processed video (canvas) */}
714
- <canvas
715
- ref={displayCanvasRef}
716
- width={640}
717
- height={480}
718
- style={{
719
- width: '100%',
720
- height: '100%',
721
- objectFit: 'contain',
722
- backgroundColor: '#101010'
723
- }}
724
- />
725
-
726
- {flowStep === FLOW_STEPS.ready ? (
727
- <>
728
- <div className={`focus-state-pill ${focusState}`}>
729
- <span className="focus-state-dot" />
730
- {focusStateLabel}
731
- </div>
732
- {!cameraReady && !videoManager?.isStreaming ? (
733
- <div className="focus-idle-overlay">
734
- <p>Camera is paused.</p>
735
- <span>Use Start to enable the camera and begin detection.</span>
736
- </div>
737
- ) : null}
738
- </>
739
- ) : null}
740
-
741
- {sessionResult && (
742
- <div className="session-result-overlay">
743
- <h3>Session Complete!</h3>
744
- <div className="result-item">
745
- <span className="label">Duration:</span>
746
- <span className="value">{formatDuration(sessionResult.duration_seconds)}</span>
747
- </div>
748
- <div className="result-item">
749
- <span className="label">Focus Score:</span>
750
- <span className="value">{(sessionResult.focus_score * 100).toFixed(1)}%</span>
751
- </div>
752
-
753
- <button
754
- onClick={handleCloseOverlay}
755
- style={{
756
- marginTop: '20px',
757
- padding: '8px 20px',
758
- background: 'transparent',
759
- border: '1px solid white',
760
- color: 'white',
761
- borderRadius: '20px',
762
- cursor: 'pointer'
763
- }}
764
- >
765
- Close
766
- </button>
767
- </div>
768
- )}
769
-
770
- </section>
771
-
772
- {flowStep === FLOW_STEPS.ready ? (
773
- <>
774
- {/* Model selector */}
775
- {availableModels.length > 0 ? (
776
- <section className="focus-model-strip">
777
- <span className="focus-model-label">Model:</span>
778
- {MODEL_ORDER.filter((n) => availableModels.includes(n)).map((name) => (
779
- <button
780
- key={name}
781
- onClick={() => handleModelChange(name)}
782
- className={`focus-model-button ${currentModel === name ? 'active' : ''}`}
783
- >
784
- {MODEL_INFO[name]?.label || name}
785
- </button>
786
- ))}
787
-
788
- {l2csBoostAvailable && (
789
- <>
790
- <span className="focus-model-sep" />
791
- <button
792
- onClick={handleEyeGazeToggle}
793
- className={`eye-gaze-toggle ${l2csBoost ? 'on' : 'off'}`}
794
- title={l2csBoost ? 'Eye gaze tracking active — click to disable' : 'Enable eye gaze tracking (requires calibration)'}
795
- >
796
- <svg width="16" height="16" viewBox="0 0 16 16" className="eye-gaze-icon" aria-hidden="true">
797
- <ellipse cx="8" cy="8" rx="7" ry="4.5" fill="none" stroke="currentColor" strokeWidth="1.4" />
798
- <circle cx="8" cy="8" r="2.2" fill="currentColor" />
799
- </svg>
800
- {l2csBoost ? 'Eye Gaze On' : 'Eye Gaze'}
801
- </button>
802
- {l2csBoost && stats && stats.isStreaming && (
803
- <button
804
- onClick={() => videoManager && videoManager.startCalibration()}
805
- className="focus-model-button recalibrate"
806
- title="Re-run gaze calibration"
807
- >
808
- Recalibrate
809
- </button>
810
- )}
811
- </>
812
- )}
813
- </section>
814
- ) : null}
815
-
816
- {/* Server stats */}
817
- {systemStats && systemStats.cpu_percent != null && (
818
- <section className="focus-system-stats">
819
- <span>CPU: <strong>{systemStats.cpu_percent}%</strong></span>
820
- <span className="focus-system-stats-sep" />
821
- <span>RAM: <strong>{systemStats.memory_percent}%</strong> ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)</span>
822
- </section>
823
- )}
824
-
825
- <section id="timeline-area">
826
- <div className="timeline-label">Timeline</div>
827
- <div id="timeline-visuals">
828
- {timelineEvents.map((event, index) => (
829
- <div
830
- key={index}
831
- className="timeline-block"
832
- style={{
833
- backgroundColor: event.isFocused ? '#28a745' : '#dc3545',
834
- width: '10px',
835
- height: '20px',
836
- borderRadius: '2px',
837
- flexShrink: 0
838
- }}
839
- title={event.isFocused ? 'Focused' : 'Distracted'}
840
- />
841
- ))}
842
- </div>
843
- <div id="timeline-line" />
844
- </section>
845
-
846
- <section id="control-panel">
847
- <button id="btn-cam-start" className="action-btn green" onClick={handleStart} disabled={isStarting}>
848
- {isStarting ? 'Starting...' : 'Start'}
849
- </button>
850
-
851
- <button id="btn-floating" className="action-btn blue" onClick={handlePiP}>
852
- Floating Window
853
- </button>
854
-
855
- <button id="btn-preview" className="action-btn orange" onClick={handlePreview}>
856
- Preview Result
857
- </button>
858
-
859
- <button id="btn-cam-stop" className="action-btn red" onClick={handleStop}>
860
- Stop
861
- </button>
862
- </section>
863
-
864
- {cameraError ? (
865
- <div className="focus-inline-error focus-inline-error-standalone">{cameraError}</div>
866
- ) : null}
867
-
868
- {/* Model info card — below action buttons */}
869
- {MODEL_INFO[currentModel] && (
870
- <section className="model-card">
871
- <div className="model-card-header">
872
- <h3 className="model-card-title">{MODEL_INFO[currentModel].label}</h3>
873
- {MODEL_INFO[currentModel].badge && (
874
- <span className={MODEL_INFO[currentModel].badge === 'Baseline' ? 'model-card-badge-baseline' : 'model-card-badge'}>
875
- {MODEL_INFO[currentModel].badge}
876
- </span>
877
- )}
878
- </div>
879
- <p className="model-card-tagline">{MODEL_INFO[currentModel].tagline}</p>
880
-
881
- <div className="model-card-metrics">
882
- <div className="model-card-metric">
883
- <span className="model-card-metric-value">{MODEL_INFO[currentModel].accuracy}</span>
884
- <span className="model-card-metric-label">Accuracy</span>
885
- </div>
886
- <div className="model-card-metric">
887
- <span className="model-card-metric-value">{MODEL_INFO[currentModel].f1}</span>
888
- <span className="model-card-metric-label">F1 Score</span>
889
- </div>
890
- <div className="model-card-metric">
891
- <span className="model-card-metric-value">{MODEL_INFO[currentModel].auc}</span>
892
- <span className="model-card-metric-label">ROC-AUC</span>
893
- </div>
894
- <div className="model-card-metric">
895
- <span className="model-card-metric-value">{MODEL_INFO[currentModel].threshold}</span>
896
- <span className="model-card-metric-label">Threshold</span>
897
- </div>
898
- </div>
899
-
900
- <div className="model-card-details">
901
- <div className="model-card-section">
902
- <h4>How it works</h4>
903
- <p>{MODEL_INFO[currentModel].how}</p>
904
- </div>
905
- <div className="model-card-section">
906
- <h4>Features used</h4>
907
- <p>{MODEL_INFO[currentModel].features}</p>
908
- </div>
909
- <div className="model-card-section">
910
- <h4>Strengths</h4>
911
- <p>{MODEL_INFO[currentModel].strengths}</p>
912
- </div>
913
- </div>
914
-
915
- <div className="model-card-eval">
916
- Evaluated with {MODEL_INFO[currentModel].evaluation}
917
- </div>
918
- </section>
919
- )}
920
-
921
- <section id="frame-control">
922
- <label htmlFor="frame-slider">Frame Rate (FPS)</label>
923
- <input
924
- type="range"
925
- id="frame-slider"
926
- min="10"
927
- max="30"
928
- value={currentFrame}
929
- onChange={(e) => handleFrameChange(e.target.value)}
930
- />
931
- <input
932
- type="number"
933
- id="frame-input"
934
- min="10"
935
- max="30"
936
- value={currentFrame}
937
- onChange={(e) => handleFrameChange(e.target.value)}
938
- />
939
- </section>
940
- </>
941
- ) : null}
942
-
943
- {/* Calibration overlay (fixed fullscreen, must be outside overflow:hidden containers) */}
944
- <CalibrationOverlay calibration={calibration} videoManager={videoManager} />
945
- </main>
946
- );
947
- }
948
-
949
- export default FocusPageLocal;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import React, { useState, useEffect, useRef } from 'react';
2
+ import CalibrationOverlay from './CalibrationOverlay';
3
+
4
+ const FLOW_STEPS = {
5
+ intro: 'intro',
6
+ permission: 'permission',
7
+ ready: 'ready'
8
+ };
9
+
10
+ const FOCUS_STATES = {
11
+ pending: 'pending',
12
+ focused: 'focused',
13
+ notFocused: 'not-focused'
14
+ };
15
+
16
+ function HelloIcon() {
17
+ return (
18
+ <svg width="96" height="96" viewBox="0 0 96 96" aria-hidden="true">
19
+ <circle cx="48" cy="48" r="40" fill="#007BFF" />
20
+ <path d="M30 38c0-4 2.7-7 6-7s6 3 6 7" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
21
+ <path d="M54 38c0-4 2.7-7 6-7s6 3 6 7" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
22
+ <path d="M30 52c3 11 10 17 18 17s15-6 18-17" fill="none" stroke="#fff" strokeWidth="6" strokeLinecap="round" />
23
+ </svg>
24
+ );
25
+ }
26
+
27
+ function CameraIcon() {
28
+ return (
29
+ <svg width="110" height="110" viewBox="0 0 110 110" aria-hidden="true">
30
+ <rect x="30" y="36" width="50" height="34" rx="5" fill="none" stroke="#007BFF" strokeWidth="6" />
31
+ <path d="M24 72h62c0 9-7 16-16 16H40c-9 0-16-7-16-16Z" fill="none" stroke="#007BFF" strokeWidth="6" />
32
+ <path d="M55 28v8" stroke="#007BFF" strokeWidth="6" strokeLinecap="round" />
33
+ <circle cx="55" cy="36" r="14" fill="none" stroke="#007BFF" strokeWidth="6" />
34
+ <circle cx="55" cy="36" r="4" fill="#007BFF" />
35
+ <path d="M46 83h18" stroke="#007BFF" strokeWidth="6" strokeLinecap="round" />
36
+ </svg>
37
+ );
38
+ }
39
+
40
+ const MODEL_ORDER = ['hybrid', 'xgboost', 'mlp', 'geometric'];
41
+
42
+ const MODEL_INFO = {
43
+ hybrid: {
44
+ label: 'Hybrid',
45
+ tagline: 'Best overall — combines ML with geometric scoring',
46
+ how: 'Fuses XGBoost predictions (30%) with geometric face/eye scores (70%). Uses a weighted blend tuned with LOPO evaluation.',
47
+ accuracy: 'N/A',
48
+ f1: '0.8409',
49
+ auc: 'N/A',
50
+ threshold: '0.46',
51
+ evaluation: 'LOPO tuning (9 participants, 144K frames)',
52
+ features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
53
+ strengths: 'Most robust across different people. Latest LOPO mean F1 is 0.8409 at w_mlp=0.3.',
54
+ badge: 'Recommended',
55
+ },
56
+ xgboost: {
57
+ label: 'XGBoost',
58
+ tagline: 'Highest raw accuracy — gradient-boosted decision trees',
59
+ how: 'Ensemble of 600 decision trees (max depth 8). Each tree learns to correct errors from previous trees. Outputs probability of focused state.',
60
+ accuracy: '95.87%',
61
+ f1: '0.9585',
62
+ auc: '0.9908',
63
+ threshold: '0.38',
64
+ evaluation: 'Random split test (15%) + LOPO thresholds',
65
+ features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
66
+ strengths: 'Strong pattern recognition and fast inference. LOPO: AUC 0.8695, optimal threshold 0.280, F1 0.8549.',
67
+ badge: null,
68
+ },
69
+ mlp: {
70
+ label: 'MLP',
71
+ tagline: 'Lightweight neural network — fast and efficient',
72
+ how: 'Two-layer neural network (64→32 neurons). Takes 10 face features, applies learned weights, outputs focused/unfocused probability via softmax.',
73
+ accuracy: '92.92%',
74
+ f1: '0.9287',
75
+ auc: '0.9714',
76
+ threshold: '0.23',
77
+ evaluation: 'Random split test (15%) + LOPO thresholds',
78
+ features: '10 features: head deviation, face score, eye scores (EAR), gaze offset, pitch, horizontal gaze, PERCLOS',
79
+ strengths: 'Fastest inference and smallest model size. LOPO: AUC 0.8624, optimal threshold 0.228, F1 0.8578.',
80
+ badge: null,
81
+ },
82
+ geometric: {
83
+ label: 'Geometric',
84
+ tagline: 'Baseline only — hardcoded thresholds, no learning',
85
+ how: 'Uses fixed thresholds on head orientation (70%) and eye openness (30%). No training — just hand-tuned rules on 478 face landmarks. Cannot adapt to new faces or environments.',
86
+ accuracy: 'N/A',
87
+ f1: '0.8195',
88
+ auc: 'N/A',
89
+ threshold: '0.55',
90
+ evaluation: 'LOPO geometric sweep',
91
+ features: 'Head yaw/pitch/roll angles, eye aspect ratio (EAR), iris gaze offset, mouth aspect ratio (MAR)',
92
+ strengths: 'No model files needed. Useful fallback when model checkpoints are unavailable.',
93
+ badge: 'Baseline',
94
+ },
95
+ };
96
+
97
+ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActive, isTutorialActive, setIsTutorialActive, setHasSeenTutorial }) {
98
+ const [currentFrame, setCurrentFrame] = useState(15);
99
+ const [timelineEvents, setTimelineEvents] = useState([]);
100
+ const [stats, setStats] = useState(null);
101
+ const [systemStats, setSystemStats] = useState(null);
102
+ const [availableModels, setAvailableModels] = useState([]);
103
+ const [currentModel, setCurrentModel] = useState('mlp');
104
+ const [flowStep, setFlowStep] = useState(FLOW_STEPS.ready);
105
+ const [cameraReady, setCameraReady] = useState(false);
106
+ const [isStarting, setIsStarting] = useState(false);
107
+ const [focusState, setFocusState] = useState(FOCUS_STATES.pending);
108
+ const [cameraError, setCameraError] = useState('');
109
+ const [calibration, setCalibration] = useState(null);
110
+ const [l2csBoost, setL2csBoost] = useState(false);
111
+ const [l2csBoostAvailable, setL2csBoostAvailable] = useState(false);
112
+ const [showEyeGazeModal, setShowEyeGazeModal] = useState(false);
113
+ const [eyeGazeDontShow, setEyeGazeDontShow] = useState(
114
+ () => localStorage.getItem('focusguard_eyegaze_noshowalert') === 'true'
115
+ );
116
+
117
+ const localVideoRef = useRef(null);
118
+ const displayCanvasRef = useRef(null);
119
+ const pipVideoRef = useRef(null);
120
+ const pipStreamRef = useRef(null);
121
+ const previewFrameRef = useRef(null);
122
+
123
+ useEffect(() => {
124
+ if (isTutorialActive) {
125
+ setFlowStep(FLOW_STEPS.intro);
126
+ } else {
127
+ setFlowStep(FLOW_STEPS.ready);
128
+ }
129
+ }, [isTutorialActive]);
130
+
131
+ const formatDuration = (seconds) => {
132
+ if (seconds === 0) return '0s';
133
+ const mins = Math.floor(seconds / 60);
134
+ const secs = Math.floor(seconds % 60);
135
+ return `${mins}m ${secs}s`;
136
+ };
137
+
138
+ const stopPreviewLoop = () => {
139
+ if (previewFrameRef.current) {
140
+ cancelAnimationFrame(previewFrameRef.current);
141
+ previewFrameRef.current = null;
142
+ }
143
+ };
144
+
145
+ const startPreviewLoop = () => {
146
+ stopPreviewLoop();
147
+ const renderPreview = () => {
148
+ const canvas = displayCanvasRef.current;
149
+ const video = localVideoRef.current;
150
+
151
+ if (!canvas || !video || !cameraReady || videoManager?.isStreaming) {
152
+ previewFrameRef.current = null;
153
+ return;
154
+ }
155
+
156
+ if (video.readyState >= 2) {
157
+ const ctx = canvas.getContext('2d');
158
+ ctx.drawImage(video, 0, 0, canvas.width, canvas.height);
159
+ }
160
+
161
+ previewFrameRef.current = requestAnimationFrame(renderPreview);
162
+ };
163
+
164
+ previewFrameRef.current = requestAnimationFrame(renderPreview);
165
+ };
166
+
167
+ const getErrorMessage = (err) => {
168
+ if (err?.name === 'NotAllowedError') {
169
+ return 'Camera permission denied. Please allow camera access.';
170
+ }
171
+ if (err?.name === 'NotFoundError') {
172
+ return 'No camera found. Please connect a camera.';
173
+ }
174
+ if (err?.name === 'NotReadableError') {
175
+ return 'Camera is already in use by another application.';
176
+ }
177
+ if (err?.target?.url) {
178
+ return `WebSocket connection failed: ${err.target.url}. Check that the backend server is running.`;
179
+ }
180
+ return err?.message || 'Failed to start focus session.';
181
+ };
182
+
183
+ useEffect(() => {
184
+ if (!videoManager) return;
185
+
186
+ const originalOnStatusUpdate = videoManager.callbacks.onStatusUpdate;
187
+ const originalOnSessionEnd = videoManager.callbacks.onSessionEnd;
188
+
189
+ videoManager.callbacks.onStatusUpdate = (isFocused) => {
190
+ setTimelineEvents((prev) => {
191
+ const newEvents = [...prev, { isFocused, timestamp: Date.now() }];
192
+ if (newEvents.length > 60) newEvents.shift();
193
+ return newEvents;
194
+ });
195
+ setFocusState(isFocused ? FOCUS_STATES.focused : FOCUS_STATES.notFocused);
196
+ if (originalOnStatusUpdate) originalOnStatusUpdate(isFocused);
197
+ };
198
+
199
+ videoManager.callbacks.onSessionEnd = (summary) => {
200
+ setFocusState(FOCUS_STATES.pending);
201
+ setCameraReady(false);
202
+ if (originalOnSessionEnd) originalOnSessionEnd(summary);
203
+ };
204
+
205
+ videoManager.callbacks.onCalibrationUpdate = (cal) => {
206
+ setCalibration(cal && cal.active ? { ...cal } : null);
207
+ };
208
+
209
+ const statsInterval = setInterval(() => {
210
+ if (videoManager && videoManager.getStats) {
211
+ setStats(videoManager.getStats());
212
+ }
213
+ }, 1000);
214
+
215
+ return () => {
216
+ if (videoManager) {
217
+ videoManager.callbacks.onStatusUpdate = originalOnStatusUpdate;
218
+ videoManager.callbacks.onSessionEnd = originalOnSessionEnd;
219
+ videoManager.callbacks.onCalibrationUpdate = null;
220
+ }
221
+ clearInterval(statsInterval);
222
+ };
223
+ }, [videoManager]);
224
+
225
+ useEffect(() => {
226
+ fetch('/api/models')
227
+ .then((res) => res.json())
228
+ .then((data) => {
229
+ if (data.available) setAvailableModels(data.available);
230
+ if (data.current) {
231
+ if (data.current === 'l2cs') {
232
+ const fallback = data.available.find((m) => m !== 'l2cs') || 'mlp';
233
+ setCurrentModel(fallback);
234
+ handleModelChange(fallback);
235
+ } else {
236
+ setCurrentModel(data.current);
237
+ }
238
+ }
239
+ if (data.l2cs_boost !== undefined) setL2csBoost(data.l2cs_boost);
240
+ if (data.l2cs_boost_available !== undefined) setL2csBoostAvailable(data.l2cs_boost_available);
241
+ })
242
+ .catch((err) => console.error('Failed to fetch models:', err));
243
+ }, []);
244
+
245
+ useEffect(() => {
246
+ if (flowStep === FLOW_STEPS.ready && cameraReady && !videoManager?.isStreaming) {
247
+ startPreviewLoop();
248
+ return;
249
+ }
250
+ stopPreviewLoop();
251
+ }, [cameraReady, flowStep, videoManager?.isStreaming]);
252
+
253
+ useEffect(() => {
254
+ if (!isActive) {
255
+ stopPreviewLoop();
256
+ }
257
+ }, [isActive]);
258
+
259
+ useEffect(() => {
260
+ return () => {
261
+ stopPreviewLoop();
262
+ if (pipVideoRef.current) {
263
+ pipVideoRef.current.pause();
264
+ pipVideoRef.current.srcObject = null;
265
+ }
266
+ if (pipStreamRef.current) {
267
+ pipStreamRef.current.getTracks().forEach((t) => t.stop());
268
+ pipStreamRef.current = null;
269
+ }
270
+ };
271
+ }, []);
272
+
273
+ useEffect(() => {
274
+ const fetchSystem = () => {
275
+ fetch('/api/stats/system')
276
+ .then(res => res.json())
277
+ .then(data => setSystemStats(data))
278
+ .catch(() => setSystemStats(null));
279
+ };
280
+ fetchSystem();
281
+ const interval = setInterval(fetchSystem, 3000);
282
+ return () => clearInterval(interval);
283
+ }, []);
284
+
285
+ const handleModelChange = async (modelName) => {
286
+ try {
287
+ const res = await fetch('/api/settings', {
288
+ method: 'PUT',
289
+ headers: { 'Content-Type': 'application/json' },
290
+ body: JSON.stringify({ model_name: modelName })
291
+ });
292
+ const result = await res.json();
293
+ if (result.updated) {
294
+ setCurrentModel(modelName);
295
+ }
296
+ } catch (err) {
297
+ console.error('Failed to switch model:', err);
298
+ }
299
+ };
300
+
301
+ const closeTutorial = () => {
302
+ setFlowStep(FLOW_STEPS.ready);
303
+ setIsTutorialActive(false);
304
+ setHasSeenTutorial(true);
305
+ };
306
+
307
+ const handleEnableCamera = async () => {
308
+ if (!videoManager) return;
309
+ try {
310
+ setCameraError('');
311
+ await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current);
312
+ setCameraReady(true);
313
+ closeTutorial();
314
+ setFocusState(FOCUS_STATES.pending);
315
+ } catch (err) {
316
+ const errorMessage = getErrorMessage(err);
317
+ setCameraError(errorMessage);
318
+ console.error('Camera init error:', err);
319
+ }
320
+ };
321
+
322
+ const applyEyeGazeChange = async (enable, withCalibration = true) => {
323
+ try {
324
+ const res = await fetch('/api/settings', {
325
+ method: 'PUT',
326
+ headers: { 'Content-Type': 'application/json' },
327
+ body: JSON.stringify({ l2cs_boost: enable })
328
+ });
329
+ if (!res.ok) return;
330
+ setL2csBoost(enable);
331
+
332
+ if (enable && withCalibration && videoManager && videoManager.isStreaming) {
333
+ videoManager.startCalibration();
334
+ } else if (!enable && videoManager) {
335
+ videoManager.cancelCalibration();
336
+ }
337
+ } catch (err) {
338
+ console.error('Failed to toggle eye gaze:', err);
339
+ }
340
+ };
341
+
342
+ const handleEyeGazeToggle = async () => {
343
+ const next = !l2csBoost;
344
+ if (next && !eyeGazeDontShow) {
345
+ setShowEyeGazeModal(true);
346
+ return;
347
+ }
348
+ await applyEyeGazeChange(next, true);
349
+ };
350
+
351
+ const handleEyeGazeModalAction = async (withCalibration) => {
352
+ if (eyeGazeDontShow) {
353
+ localStorage.setItem('focusguard_eyegaze_noshowalert', 'true');
354
+ }
355
+ setShowEyeGazeModal(false);
356
+ await applyEyeGazeChange(true, withCalibration);
357
+ };
358
+
359
+ const handleStart = async () => {
360
+ try {
361
+ setIsStarting(true);
362
+ setSessionResult(null);
363
+ setTimelineEvents([]);
364
+ setFocusState(FOCUS_STATES.pending);
365
+ setCameraError('');
366
+
367
+ if (!cameraReady) {
368
+ await videoManager.initCamera(localVideoRef.current, displayCanvasRef.current);
369
+ setCameraReady(true);
370
+ setFlowStep(FLOW_STEPS.ready);
371
+ }
372
+
373
+ await videoManager.startStreaming();
374
+ } catch (err) {
375
+ const errorMessage = getErrorMessage(err);
376
+ setCameraError(errorMessage);
377
+ setFocusState(FOCUS_STATES.pending);
378
+ console.error('Start error:', err);
379
+ alert(`Failed to start: ${errorMessage}\n\nCheck browser console for details.`);
380
+ } finally {
381
+ setIsStarting(false);
382
+ }
383
+ };
384
+
385
+ const handleStop = async () => {
386
+ if (videoManager) {
387
+ await videoManager.stopStreaming();
388
+ }
389
+ try {
390
+ if (document.pictureInPictureElement === pipVideoRef.current) {
391
+ await document.exitPictureInPicture();
392
+ }
393
+ } catch (_) {}
394
+ if (pipVideoRef.current) {
395
+ pipVideoRef.current.pause();
396
+ pipVideoRef.current.srcObject = null;
397
+ }
398
+ if (pipStreamRef.current) {
399
+ pipStreamRef.current.getTracks().forEach((t) => t.stop());
400
+ pipStreamRef.current = null;
401
+ }
402
+ stopPreviewLoop();
403
+ setFocusState(FOCUS_STATES.pending);
404
+ setCameraReady(false);
405
+ };
406
+
407
+ const handlePiP = async () => {
408
+ try {
409
+ if (!videoManager || !videoManager.isStreaming) {
410
+ alert('Please start the video first.');
411
+ return;
412
+ }
413
+ if (!displayCanvasRef.current) {
414
+ alert('Video not ready.');
415
+ return;
416
+ }
417
+ if (document.pictureInPictureElement === pipVideoRef.current) {
418
+ await document.exitPictureInPicture();
419
+ console.log('PiP exited');
420
+ return;
421
+ }
422
+ if (!document.pictureInPictureEnabled) {
423
+ alert('Picture-in-Picture is not supported in this browser.');
424
+ return;
425
+ }
426
+
427
+ const pipVideo = pipVideoRef.current;
428
+ if (!pipVideo) {
429
+ alert('PiP video element not ready.');
430
+ return;
431
+ }
432
+
433
+ const isSafariPiP = typeof pipVideo.webkitSetPresentationMode === 'function';
434
+ let stream = pipStreamRef.current;
435
+ if (!stream) {
436
+ const capture = displayCanvasRef.current.captureStream;
437
+ if (typeof capture === 'function') {
438
+ stream = capture.call(displayCanvasRef.current, 30);
439
+ }
440
+ if (!stream || stream.getTracks().length === 0) {
441
+ const cameraStream = localVideoRef.current?.srcObject;
442
+ if (!cameraStream) {
443
+ alert('Camera stream not ready.');
444
+ return;
445
+ }
446
+ stream = cameraStream;
447
+ }
448
+ pipStreamRef.current = stream;
449
+ }
450
+
451
+ if (!stream || stream.getTracks().length === 0) {
452
+ alert('Failed to capture video stream from canvas.');
453
+ return;
454
+ }
455
+
456
+ pipVideo.srcObject = stream;
457
+ if (pipVideo.readyState < 2) {
458
+ await new Promise((resolve) => {
459
+ const onReady = () => {
460
+ pipVideo.removeEventListener('loadeddata', onReady);
461
+ pipVideo.removeEventListener('canplay', onReady);
462
+ resolve();
463
+ };
464
+ pipVideo.addEventListener('loadeddata', onReady);
465
+ pipVideo.addEventListener('canplay', onReady);
466
+ setTimeout(resolve, 600);
467
+ });
468
+ }
469
+
470
+ try {
471
+ await pipVideo.play();
472
+ } catch (_) {}
473
+
474
+ if (isSafariPiP) {
475
+ try {
476
+ pipVideo.webkitSetPresentationMode('picture-in-picture');
477
+ console.log('PiP activated (Safari)');
478
+ return;
479
+ } catch (e) {
480
+ const cameraStream = localVideoRef.current?.srcObject;
481
+ if (cameraStream && cameraStream !== pipVideo.srcObject) {
482
+ pipVideo.srcObject = cameraStream;
483
+ try {
484
+ await pipVideo.play();
485
+ } catch (_) {}
486
+ pipVideo.webkitSetPresentationMode('picture-in-picture');
487
+ console.log('PiP activated (Safari fallback)');
488
+ return;
489
+ }
490
+ throw e;
491
+ }
492
+ }
493
+
494
+ if (typeof pipVideo.requestPictureInPicture === 'function') {
495
+ await pipVideo.requestPictureInPicture();
496
+ console.log('PiP activated');
497
+ } else {
498
+ alert('Picture-in-Picture is not supported in this browser.');
499
+ }
500
+
501
+ } catch (err) {
502
+ console.error('PiP error:', err);
503
+ alert(`Failed to enter Picture-in-Picture: ${err.message}`);
504
+ }
505
+ };
506
+
507
+ const handleFloatingWindow = () => {
508
+ handlePiP();
509
+ };
510
+
511
+ const handleFrameChange = (val) => {
512
+ const rate = parseInt(val, 10);
513
+ setCurrentFrame(rate);
514
+ if (videoManager) {
515
+ videoManager.setFrameRate(rate);
516
+ }
517
+ };
518
+
519
+ const handlePreview = () => {
520
+ if (!videoManager || !videoManager.isStreaming) {
521
+ alert('Please start a session first.');
522
+ return;
523
+ }
524
+ const currentStats = videoManager.getStats();
525
+ if (!currentStats.sessionId) {
526
+ alert('No active session.');
527
+ return;
528
+ }
529
+ const sessionDuration = Math.floor((Date.now() - (videoManager.sessionStartTime || Date.now())) / 1000);
530
+ const totalFrames = currentStats.framesProcessed || 0;
531
+ const focusedFrames = currentStats.focusedFrames ?? 0;
532
+ const focusScore = totalFrames > 0 ? focusedFrames / totalFrames : 0;
533
+
534
+ setSessionResult({
535
+ duration_seconds: sessionDuration,
536
+ focus_score: focusScore,
537
+ total_frames: totalFrames,
538
+ focused_frames: focusedFrames
539
+ });
540
+ };
541
+
542
+ const handleCloseOverlay = () => {
543
+ setSessionResult(null);
544
+ };
545
+
546
+ const pageStyle = isActive
547
+ ? undefined
548
+ : {
549
+ position: 'absolute',
550
+ width: '1px',
551
+ height: '1px',
552
+ overflow: 'hidden',
553
+ opacity: 0,
554
+ pointerEvents: 'none'
555
+ };
556
+
557
+ const focusStateLabel = {
558
+ [FOCUS_STATES.pending]: 'Pending',
559
+ [FOCUS_STATES.focused]: 'Focused',
560
+ [FOCUS_STATES.notFocused]: 'Not Focused'
561
+ }[focusState];
562
+
563
+ const introHighlights = [
564
+ {
565
+ title: 'Live focus tracking',
566
+ text: 'Head pose, gaze, and eye openness are read continuously during the session.'
567
+ },
568
+ {
569
+ title: 'Quick setup',
570
+ text: 'Front-facing light and a stable camera angle give the cleanest preview.'
571
+ },
572
+ {
573
+ title: 'Private by default',
574
+ text: 'Only session metadata is stored, not the raw camera footage.'
575
+ },
576
+ {
577
+ title: 'Sync across devices',
578
+ text: 'Your history auto-saves to this browser. To switch devices, use the Data Management tools at the bottom of the My Records tab to export or import your data.'
579
+ }
580
+ ];
581
+
582
+ const permissionSteps = [
583
+ {
584
+ title: 'Allow browser access',
585
+ text: 'Approve the camera prompt so the preview can appear immediately.'
586
+ },
587
+ {
588
+ title: 'Check your framing',
589
+ text: 'Keep your face visible and centered for more stable landmark detection.'
590
+ },
591
+ {
592
+ title: 'Start when ready',
593
+ text: 'After the preview appears, use the page controls to begin or stop.'
594
+ }
595
+ ];
596
+
597
+ const renderIntroCard = () => {
598
+ if (flowStep === FLOW_STEPS.intro) {
599
+ return (
600
+ <div className="focus-flow-overlay">
601
+ <div className="focus-flow-card">
602
+ <div className="focus-flow-header">
603
+ <div>
604
+ <div className="focus-flow-eyebrow">Focus Session</div>
605
+ <h2>Before you begin</h2>
606
+ </div>
607
+ <div className="focus-flow-icon">
608
+ <HelloIcon />
609
+ </div>
610
+ </div>
611
+
612
+ <p className="focus-flow-lead">
613
+ The focus page uses your live camera preview to estimate attention in real time.
614
+ Review the setup notes below, then continue to camera access.
615
+ </p>
616
+
617
+ <div className="focus-flow-grid">
618
+ {introHighlights.map((item) => (
619
+ <article key={item.title} className="focus-flow-panel">
620
+ <h3>{item.title}</h3>
621
+ <p>{item.text}</p>
622
+ </article>
623
+ ))}
624
+ </div>
625
+
626
+ <div className="focus-flow-glasses-note">
627
+ <strong>Wearing glasses?</strong> Glasses may reduce detection accuracy on some models. If results seem inaccurate, try switching to a different model (e.g. Geometric or MLP).
628
+ </div>
629
+
630
+ <div className="focus-flow-footer">
631
+ <div className="focus-flow-note">
632
+ You can still change frame rate and available model options after the preview loads.
633
+ </div>
634
+ <div style={{ display: 'flex', gap: '10px' }}>
635
+ <button className="focus-flow-secondary" onClick={closeTutorial}>
636
+ Skip
637
+ </button>
638
+ <button className="focus-flow-button" onClick={() => setFlowStep(FLOW_STEPS.permission)}>
639
+ Continue
640
+ </button>
641
+ </div>
642
+ </div>
643
+ </div>
644
+ </div>
645
+ );
646
+ }
647
+
648
+ if (flowStep === FLOW_STEPS.permission && !cameraReady) {
649
+ return (
650
+ <div className="focus-flow-overlay">
651
+ <div className="focus-flow-card">
652
+ <div className="focus-flow-header">
653
+ <div>
654
+ <div className="focus-flow-eyebrow">Camera Setup</div>
655
+ <h2>Enable camera access</h2>
656
+ </div>
657
+ <div className="focus-flow-icon">
658
+ <CameraIcon />
659
+ </div>
660
+ </div>
661
+
662
+ <p className="focus-flow-lead">
663
+ Once access is granted, your preview appears here and the rest of the Focus page
664
+ behaves like the other dashboard screens.
665
+ </p>
666
+
667
+ <div className="focus-flow-steps">
668
+ {permissionSteps.map((item, index) => (
669
+ <div key={item.title} className="focus-flow-step">
670
+ <div className="focus-flow-step-number">{index + 1}</div>
671
+ <div className="focus-flow-step-copy">
672
+ <h3>{item.title}</h3>
673
+ <p>{item.text}</p>
674
+ </div>
675
+ </div>
676
+ ))}
677
+ </div>
678
+
679
+ {cameraError ? <div className="focus-inline-error">{cameraError}</div> : null}
680
+
681
+ <div className="focus-flow-footer">
682
+ <button
683
+ type="button"
684
+ className="focus-flow-secondary"
685
+ onClick={() => setFlowStep(FLOW_STEPS.intro)}
686
+ >
687
+ Back
688
+ </button>
689
+ <button className="focus-flow-button" onClick={handleEnableCamera}>
690
+ Enable Camera
691
+ </button>
692
+ </div>
693
+ </div>
694
+ </div>
695
+ );
696
+ }
697
+
698
+ return null;
699
+ };
700
+
701
+ const renderEyeGazeModal = () => {
702
+ if (!showEyeGazeModal) return null;
703
+ return (
704
+ <div className="focus-flow-overlay" style={{ zIndex: 2000 }}>
705
+ <div className="focus-flow-card">
706
+ <div className="focus-flow-header">
707
+ <div>
708
+ <div className="focus-flow-eyebrow">Eye Gaze Tracking</div>
709
+ <h2>Before you enable</h2>
710
+ </div>
711
+ <div className="focus-flow-icon">
712
+ <svg width="96" height="96" viewBox="0 0 96 96" aria-hidden="true">
713
+ <ellipse cx="48" cy="48" rx="38" ry="24" fill="none" stroke="#007BFF" strokeWidth="5" />
714
+ <circle cx="48" cy="48" r="13" fill="none" stroke="#007BFF" strokeWidth="5" />
715
+ <circle cx="48" cy="48" r="5" fill="#007BFF" />
716
+ </svg>
717
+ </div>
718
+ </div>
719
+
720
+ <p className="focus-flow-lead">
721
+ Eye gaze tracking runs an additional deep neural network (L2CS-Net) alongside your current model.
722
+ Please read the notes below before proceeding.
723
+ </p>
724
+
725
+ <div className="focus-flow-grid">
726
+ <article className="focus-flow-panel focus-flow-panel-warn">
727
+ <h3>Performance impact</h3>
728
+ <p>Enabling eye gaze tracking increases CPU usage and may reduce frame rate. If the system feels sluggish, consider disabling it.</p>
729
+ </article>
730
+ <article className="focus-flow-panel">
731
+ <h3>Calibration (recommended)</h3>
732
+ <p>For best accuracy, calibrate by looking at 9 screen positions one at a time, followed by 1 validation point. The whole process takes about 30 seconds.</p>
733
+ </article>
734
+ </div>
735
+
736
+ <div className="focus-flow-steps">
737
+ <div className="focus-flow-step">
738
+ <div className="focus-flow-step-number">1</div>
739
+ <div className="focus-flow-step-copy">
740
+ <h3>Click "Start Calibration"</h3>
741
+ <p>A dot will appear on screen. Look directly at it and keep your gaze steady. It will cycle through 9 positions then show a final validation dot.</p>
742
+ </div>
743
+ </div>
744
+ <div className="focus-flow-step">
745
+ <div className="focus-flow-step-number">2</div>
746
+ <div className="focus-flow-step-copy">
747
+ <h3>Or skip for now</h3>
748
+ <p>Click "Skip" to enable eye gaze tracking without calibrating. You can recalibrate at any time using the "Recalibrate" button during a session.</p>
749
+ </div>
750
+ </div>
751
+ </div>
752
+
753
+ <label className="eye-gaze-modal-checkbox">
754
+ <input
755
+ type="checkbox"
756
+ checked={eyeGazeDontShow}
757
+ onChange={(e) => setEyeGazeDontShow(e.target.checked)}
758
+ />
759
+ Don't show this again
760
+ </label>
761
+
762
+ <div className="focus-flow-footer">
763
+ <button
764
+ type="button"
765
+ className="focus-flow-secondary"
766
+ onClick={() => handleEyeGazeModalAction(false)}
767
+ >
768
+ Skip
769
+ </button>
770
+ <button
771
+ className="focus-flow-button"
772
+ onClick={() => handleEyeGazeModalAction(true)}
773
+ >
774
+ Start Calibration
775
+ </button>
776
+ </div>
777
+ </div>
778
+ </div>
779
+ );
780
+ };
781
+
782
+ return (
783
+ <main id="page-b" className="page" style={pageStyle}>
784
+ {renderIntroCard()}
785
+ {renderEyeGazeModal()}
786
+
787
+ <section id="display-area" className="focus-display-shell">
788
+ <video
789
+ ref={pipVideoRef}
790
+ muted
791
+ playsInline
792
+ autoPlay
793
+ style={{
794
+ position: 'absolute',
795
+ width: '1px',
796
+ height: '1px',
797
+ opacity: 0,
798
+ pointerEvents: 'none'
799
+ }}
800
+ />
801
+ <video
802
+ ref={localVideoRef}
803
+ muted
804
+ playsInline
805
+ autoPlay
806
+ style={{ display: 'none' }}
807
+ />
808
+
809
+ <canvas
810
+ ref={displayCanvasRef}
811
+ width={640}
812
+ height={480}
813
+ style={{
814
+ width: '100%',
815
+ height: '100%',
816
+ objectFit: 'contain',
817
+ backgroundColor: '#101010'
818
+ }}
819
+ />
820
+
821
+ {flowStep === FLOW_STEPS.ready ? (
822
+ <>
823
+ <div className={`focus-state-pill ${focusState}`}>
824
+ <span className="focus-state-dot" />
825
+ {focusStateLabel}
826
+ </div>
827
+ {!cameraReady && !videoManager?.isStreaming ? (
828
+ <div className="focus-idle-overlay">
829
+ <p>Camera is paused.</p>
830
+ <span>Use Start to enable the camera and begin detection.</span>
831
+ </div>
832
+ ) : null}
833
+ </>
834
+ ) : null}
835
+
836
+ {sessionResult && (
837
+ <div className="session-result-overlay">
838
+ <h3>Session Complete!</h3>
839
+ <div className="result-item">
840
+ <span className="label">Duration:</span>
841
+ <span className="value">{formatDuration(sessionResult.duration_seconds)}</span>
842
+ </div>
843
+ <div className="result-item">
844
+ <span className="label">Focus Score:</span>
845
+ <span className="value">{(sessionResult.focus_score * 100).toFixed(1)}%</span>
846
+ </div>
847
+
848
+ <button
849
+ onClick={handleCloseOverlay}
850
+ style={{
851
+ marginTop: '20px',
852
+ padding: '8px 20px',
853
+ background: 'transparent',
854
+ border: '1px solid white',
855
+ color: 'white',
856
+ borderRadius: '20px',
857
+ cursor: 'pointer'
858
+ }}
859
+ >
860
+ Close
861
+ </button>
862
+ </div>
863
+ )}
864
+
865
+ </section>
866
+
867
+ {flowStep === FLOW_STEPS.ready ? (
868
+ <>
869
+ {availableModels.length > 0 ? (
870
+ <section className="focus-model-strip">
871
+ <span className="focus-model-label">Model:</span>
872
+ {MODEL_ORDER.filter((n) => availableModels.includes(n)).map((name) => (
873
+ <button
874
+ key={name}
875
+ onClick={() => handleModelChange(name)}
876
+ className={`focus-model-button ${currentModel === name ? 'active' : ''}`}
877
+ >
878
+ {MODEL_INFO[name]?.label || name}
879
+ </button>
880
+ ))}
881
+
882
+ {l2csBoostAvailable && (
883
+ <>
884
+ <span className="focus-model-sep" />
885
+ <button
886
+ onClick={handleEyeGazeToggle}
887
+ className={`eye-gaze-toggle ${l2csBoost ? 'on' : 'off'}`}
888
+ title={l2csBoost ? 'Eye gaze tracking active — click to disable' : 'Enable eye gaze tracking (requires calibration)'}
889
+ >
890
+ <svg width="16" height="16" viewBox="0 0 16 16" className="eye-gaze-icon" aria-hidden="true">
891
+ <ellipse cx="8" cy="8" rx="7" ry="4.5" fill="none" stroke="currentColor" strokeWidth="1.4" />
892
+ <circle cx="8" cy="8" r="2.2" fill="currentColor" />
893
+ </svg>
894
+ {l2csBoost ? 'Eye Gaze On' : 'Eye Gaze'}
895
+ </button>
896
+ {l2csBoost && stats && stats.isStreaming && (
897
+ <button
898
+ onClick={() => videoManager && videoManager.startCalibration()}
899
+ className="focus-model-button recalibrate"
900
+ title="Re-run gaze calibration"
901
+ >
902
+ Recalibrate
903
+ </button>
904
+ )}
905
+ </>
906
+ )}
907
+ </section>
908
+ ) : null}
909
+
910
+ {systemStats && systemStats.cpu_percent != null && (
911
+ <section className="focus-system-stats">
912
+ <span>CPU: <strong>{systemStats.cpu_percent}%</strong></span>
913
+ <span className="focus-system-stats-sep" />
914
+ <span>RAM: <strong>{systemStats.memory_percent}%</strong> ({systemStats.memory_used_mb}/{systemStats.memory_total_mb} MB)</span>
915
+ </section>
916
+ )}
917
+
918
+ <section id="timeline-area">
919
+ <div className="timeline-label">Timeline</div>
920
+ <div id="timeline-visuals">
921
+ {timelineEvents.map((event, index) => (
922
+ <div
923
+ key={index}
924
+ className="timeline-block"
925
+ style={{
926
+ backgroundColor: event.isFocused ? '#28a745' : '#dc3545',
927
+ width: '10px',
928
+ height: '20px',
929
+ borderRadius: '2px',
930
+ flexShrink: 0
931
+ }}
932
+ title={event.isFocused ? 'Focused' : 'Distracted'}
933
+ />
934
+ ))}
935
+ </div>
936
+ <div id="timeline-line" />
937
+ </section>
938
+
939
+ <section id="control-panel">
940
+ <button id="btn-cam-start" className="action-btn green" onClick={handleStart} disabled={isStarting}>
941
+ {isStarting ? 'Starting...' : 'Start'}
942
+ </button>
943
+
944
+ <button id="btn-floating" className="action-btn blue" onClick={handlePiP}>
945
+ Floating Window
946
+ </button>
947
+
948
+ <button id="btn-preview" className="action-btn orange" onClick={handlePreview}>
949
+ Preview Result
950
+ </button>
951
+
952
+ <button id="btn-cam-stop" className="action-btn red" onClick={handleStop}>
953
+ Stop
954
+ </button>
955
+ </section>
956
+
957
+ {cameraError ? (
958
+ <div className="focus-inline-error focus-inline-error-standalone">{cameraError}</div>
959
+ ) : null}
960
+
961
+ {MODEL_INFO[currentModel] && (
962
+ <section className="model-card">
963
+ <div className="model-card-header">
964
+ <h3 className="model-card-title">{MODEL_INFO[currentModel].label}</h3>
965
+ {MODEL_INFO[currentModel].badge && (
966
+ <span className={MODEL_INFO[currentModel].badge === 'Baseline' ? 'model-card-badge-baseline' : 'model-card-badge'}>
967
+ {MODEL_INFO[currentModel].badge}
968
+ </span>
969
+ )}
970
+ </div>
971
+ <p className="model-card-tagline">{MODEL_INFO[currentModel].tagline}</p>
972
+
973
+ <div className="model-card-metrics">
974
+ <div className="model-card-metric">
975
+ <span className="model-card-metric-value">{MODEL_INFO[currentModel].accuracy}</span>
976
+ <span className="model-card-metric-label">Accuracy</span>
977
+ </div>
978
+ <div className="model-card-metric">
979
+ <span className="model-card-metric-value">{MODEL_INFO[currentModel].f1}</span>
980
+ <span className="model-card-metric-label">F1 Score</span>
981
+ </div>
982
+ <div className="model-card-metric">
983
+ <span className="model-card-metric-value">{MODEL_INFO[currentModel].auc}</span>
984
+ <span className="model-card-metric-label">ROC-AUC</span>
985
+ </div>
986
+ <div className="model-card-metric">
987
+ <span className="model-card-metric-value">{MODEL_INFO[currentModel].threshold}</span>
988
+ <span className="model-card-metric-label">Threshold</span>
989
+ </div>
990
+ </div>
991
+
992
+ <div className="model-card-details">
993
+ <div className="model-card-section">
994
+ <h4>How it works</h4>
995
+ <p>{MODEL_INFO[currentModel].how}</p>
996
+ </div>
997
+ <div className="model-card-section">
998
+ <h4>Features used</h4>
999
+ <p>{MODEL_INFO[currentModel].features}</p>
1000
+ </div>
1001
+ <div className="model-card-section">
1002
+ <h4>Strengths</h4>
1003
+ <p>{MODEL_INFO[currentModel].strengths}</p>
1004
+ </div>
1005
+ </div>
1006
+
1007
+ <div className="model-card-eval">
1008
+ Evaluated with {MODEL_INFO[currentModel].evaluation}
1009
+ </div>
1010
+ </section>
1011
+ )}
1012
+
1013
+ <section id="frame-control">
1014
+ <label htmlFor="frame-slider">Frame Rate (FPS)</label>
1015
+ <input
1016
+ type="range"
1017
+ id="frame-slider"
1018
+ min="10"
1019
+ max="30"
1020
+ value={currentFrame}
1021
+ onChange={(e) => handleFrameChange(e.target.value)}
1022
+ />
1023
+ <input
1024
+ type="number"
1025
+ id="frame-input"
1026
+ min="10"
1027
+ max="30"
1028
+ value={currentFrame}
1029
+ onChange={(e) => handleFrameChange(e.target.value)}
1030
+ />
1031
+ </section>
1032
+ </>
1033
+ ) : null}
1034
+
1035
+ <CalibrationOverlay calibration={calibration} videoManager={videoManager} />
1036
+ </main>
1037
+ );
1038
+ }
1039
+
1040
+ export default FocusPageLocal;