Yingtao-Zheng commited on
Commit
0616f67
Β·
1 Parent(s): a8d94d8

Fix hybird bug, center timeline, glasses warning in intro pop-up, eye gaze warning pop-up, help page update.

Browse files
.coverage DELETED
Binary file (86 kB)
 
src/App.css CHANGED
@@ -337,6 +337,34 @@ body {
337
  line-height: 1.6;
338
  }
339
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
340
  .focus-flow-button,
341
  .focus-flow-secondary {
342
  border: none;
@@ -748,6 +776,17 @@ body {
748
  display: flex;
749
  flex-direction: column;
750
  justify-content: flex-end;
 
 
 
 
 
 
 
 
 
 
 
751
  }
752
 
753
  .timeline-label {
 
337
  line-height: 1.6;
338
  }
339
 
340
+ .focus-flow-glasses-note {
341
+ background: #fffbea;
342
+ border: 1px solid #f5c518;
343
+ border-radius: 10px;
344
+ padding: 12px 16px;
345
+ font-size: 0.9rem;
346
+ color: #5a4a00;
347
+ line-height: 1.55;
348
+ margin-top: 4px;
349
+ margin-bottom: 4px;
350
+ }
351
+
352
+ .focus-flow-panel-warn {
353
+ border-left: 3px solid #f5a623;
354
+ background: #fff9f0;
355
+ }
356
+
357
+ .eye-gaze-modal-checkbox {
358
+ display: flex;
359
+ align-items: center;
360
+ gap: 8px;
361
+ margin-top: 16px;
362
+ font-size: 0.9rem;
363
+ color: #667281;
364
+ cursor: pointer;
365
+ user-select: none;
366
+ }
367
+
368
  .focus-flow-button,
369
  .focus-flow-secondary {
370
  border: none;
 
776
  display: flex;
777
  flex-direction: column;
778
  justify-content: flex-end;
779
+ align-self: center;
780
+ margin: 0 auto;
781
+ }
782
+
783
+ #timeline-visuals {
784
+ display: flex;
785
+ justify-content: center;
786
+ flex-wrap: wrap;
787
+ align-items: flex-end;
788
+ gap: 2px;
789
+ width: 100%;
790
  }
791
 
792
  .timeline-label {
src/components/FocusPageLocal.jsx CHANGED
@@ -109,6 +109,10 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
109
  const [calibration, setCalibration] = useState(null);
110
  const [l2csBoost, setL2csBoost] = useState(false);
111
  const [l2csBoostAvailable, setL2csBoostAvailable] = useState(false);
 
 
 
 
112
 
113
  const localVideoRef = useRef(null);
114
  const displayCanvasRef = useRef(null);
@@ -305,22 +309,19 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
305
  }
306
  };
307
 
308
- const handleEyeGazeToggle = async () => {
309
- const next = !l2csBoost;
310
  try {
311
  const res = await fetch('/api/settings', {
312
  method: 'PUT',
313
  headers: { 'Content-Type': 'application/json' },
314
- body: JSON.stringify({ l2cs_boost: next })
315
  });
316
  if (!res.ok) return;
317
- setL2csBoost(next);
318
 
319
- if (next && videoManager && videoManager.isStreaming) {
320
- // Turning ON β†’ auto-start calibration
321
  videoManager.startCalibration();
322
- } else if (!next && videoManager) {
323
- // Turning OFF β†’ cancel any active calibration
324
  videoManager.cancelCalibration();
325
  }
326
  } catch (err) {
@@ -328,6 +329,24 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
328
  }
329
  };
330
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
331
  const handleStart = async () => {
332
  try {
333
  setIsStarting(true);
@@ -611,6 +630,10 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
611
  ))}
612
  </div>
613
 
 
 
 
 
614
  <div className="focus-flow-footer">
615
  <div className="focus-flow-note">
616
  You can still change frame rate and available model options after the preview loads.
@@ -677,9 +700,91 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
677
  return null;
678
  };
679
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
680
  return (
681
  <main id="page-b" className="page" style={pageStyle}>
682
  {renderIntroCard()}
 
683
 
684
  <section id="display-area" className="focus-display-shell">
685
  <video
@@ -827,9 +932,8 @@ function FocusPageLocal({ videoManager, sessionResult, setSessionResult, isActiv
827
  backgroundColor: event.isFocused ? '#28a745' : '#dc3545',
828
  width: '10px',
829
  height: '20px',
830
- display: 'inline-block',
831
- marginRight: '2px',
832
- borderRadius: '2px'
833
  }}
834
  title={event.isFocused ? 'Focused' : 'Distracted'}
835
  />
 
109
  const [calibration, setCalibration] = useState(null);
110
  const [l2csBoost, setL2csBoost] = useState(false);
111
  const [l2csBoostAvailable, setL2csBoostAvailable] = useState(false);
112
+ const [showEyeGazeModal, setShowEyeGazeModal] = useState(false);
113
+ const [eyeGazeDontShow, setEyeGazeDontShow] = useState(
114
+ () => localStorage.getItem('focusguard_eyegaze_noshowalert') === 'true'
115
+ );
116
 
117
  const localVideoRef = useRef(null);
118
  const displayCanvasRef = useRef(null);
 
309
  }
310
  };
311
 
312
+ const applyEyeGazeChange = async (enable, withCalibration = true) => {
 
313
  try {
314
  const res = await fetch('/api/settings', {
315
  method: 'PUT',
316
  headers: { 'Content-Type': 'application/json' },
317
+ body: JSON.stringify({ l2cs_boost: enable })
318
  });
319
  if (!res.ok) return;
320
+ setL2csBoost(enable);
321
 
322
+ if (enable && withCalibration && videoManager && videoManager.isStreaming) {
 
323
  videoManager.startCalibration();
324
+ } else if (!enable && videoManager) {
 
325
  videoManager.cancelCalibration();
326
  }
327
  } catch (err) {
 
329
  }
330
  };
331
 
332
+ const handleEyeGazeToggle = async () => {
333
+ const next = !l2csBoost;
334
+ if (next && !eyeGazeDontShow) {
335
+ // Show the warning/calibration modal before enabling
336
+ setShowEyeGazeModal(true);
337
+ return;
338
+ }
339
+ await applyEyeGazeChange(next, true);
340
+ };
341
+
342
+ const handleEyeGazeModalAction = async (withCalibration) => {
343
+ if (eyeGazeDontShow) {
344
+ localStorage.setItem('focusguard_eyegaze_noshowalert', 'true');
345
+ }
346
+ setShowEyeGazeModal(false);
347
+ await applyEyeGazeChange(true, withCalibration);
348
+ };
349
+
350
  const handleStart = async () => {
351
  try {
352
  setIsStarting(true);
 
630
  ))}
631
  </div>
632
 
633
+ <div className="focus-flow-glasses-note">
634
+ <strong>Wearing glasses?</strong> Glasses may reduce detection accuracy on some models. If results seem inaccurate, try switching to a different model (e.g. Geometric or MLP).
635
+ </div>
636
+
637
  <div className="focus-flow-footer">
638
  <div className="focus-flow-note">
639
  You can still change frame rate and available model options after the preview loads.
 
700
  return null;
701
  };
702
 
703
+ const renderEyeGazeModal = () => {
704
+ if (!showEyeGazeModal) return null;
705
+ return (
706
+ <div className="focus-flow-overlay" style={{ zIndex: 2000 }}>
707
+ <div className="focus-flow-card">
708
+ <div className="focus-flow-header">
709
+ <div>
710
+ <div className="focus-flow-eyebrow">Eye Gaze Tracking</div>
711
+ <h2>Before you enable</h2>
712
+ </div>
713
+ <div className="focus-flow-icon">
714
+ <svg width="96" height="96" viewBox="0 0 96 96" aria-hidden="true">
715
+ <ellipse cx="48" cy="48" rx="38" ry="24" fill="none" stroke="#007BFF" strokeWidth="5" />
716
+ <circle cx="48" cy="48" r="13" fill="none" stroke="#007BFF" strokeWidth="5" />
717
+ <circle cx="48" cy="48" r="5" fill="#007BFF" />
718
+ </svg>
719
+ </div>
720
+ </div>
721
+
722
+ <p className="focus-flow-lead">
723
+ Eye gaze tracking runs an additional deep neural network (L2CS-Net) alongside your current model.
724
+ Please read the notes below before proceeding.
725
+ </p>
726
+
727
+ <div className="focus-flow-grid">
728
+ <article className="focus-flow-panel focus-flow-panel-warn">
729
+ <h3>Performance impact</h3>
730
+ <p>Enabling eye gaze tracking increases CPU usage and may reduce frame rate. If the system feels sluggish, consider disabling it.</p>
731
+ </article>
732
+ <article className="focus-flow-panel">
733
+ <h3>Calibration (recommended)</h3>
734
+ <p>For best accuracy, calibrate by looking at 9 screen positions one at a time, followed by 1 validation point. The whole process takes about 30 seconds.</p>
735
+ </article>
736
+ </div>
737
+
738
+ <div className="focus-flow-steps">
739
+ <div className="focus-flow-step">
740
+ <div className="focus-flow-step-number">1</div>
741
+ <div className="focus-flow-step-copy">
742
+ <h3>Click "Start Calibration"</h3>
743
+ <p>A dot will appear on screen. Look directly at it and keep your gaze steady. It will cycle through 9 positions then show a final validation dot.</p>
744
+ </div>
745
+ </div>
746
+ <div className="focus-flow-step">
747
+ <div className="focus-flow-step-number">2</div>
748
+ <div className="focus-flow-step-copy">
749
+ <h3>Or skip for now</h3>
750
+ <p>Click "Skip" to enable eye gaze tracking without calibrating. You can recalibrate at any time using the "Recalibrate" button during a session.</p>
751
+ </div>
752
+ </div>
753
+ </div>
754
+
755
+ <label className="eye-gaze-modal-checkbox">
756
+ <input
757
+ type="checkbox"
758
+ checked={eyeGazeDontShow}
759
+ onChange={(e) => setEyeGazeDontShow(e.target.checked)}
760
+ />
761
+ Don't show this again
762
+ </label>
763
+
764
+ <div className="focus-flow-footer">
765
+ <button
766
+ type="button"
767
+ className="focus-flow-secondary"
768
+ onClick={() => handleEyeGazeModalAction(false)}
769
+ >
770
+ Skip
771
+ </button>
772
+ <button
773
+ className="focus-flow-button"
774
+ onClick={() => handleEyeGazeModalAction(true)}
775
+ >
776
+ Start Calibration
777
+ </button>
778
+ </div>
779
+ </div>
780
+ </div>
781
+ );
782
+ };
783
+
784
  return (
785
  <main id="page-b" className="page" style={pageStyle}>
786
  {renderIntroCard()}
787
+ {renderEyeGazeModal()}
788
 
789
  <section id="display-area" className="focus-display-shell">
790
  <video
 
932
  backgroundColor: event.isFocused ? '#28a745' : '#dc3545',
933
  width: '10px',
934
  height: '20px',
935
+ borderRadius: '2px',
936
+ flexShrink: 0
 
937
  }}
938
  title={event.isFocused ? 'Focused' : 'Distracted'}
939
  />
src/components/Help.jsx CHANGED
@@ -28,11 +28,12 @@ function Help() {
28
  <h2>How to Use Focus Guard</h2>
29
  <ol>
30
  <li>Navigate to the Focus page from the menu</li>
31
- <li>Allow camera access when prompted</li>
32
  <li>Click the green "Start" button to begin monitoring</li>
33
- <li>Position yourself in front of the camera</li>
34
  <li>The system will track your focus in real-time using face mesh analysis</li>
35
- <li>Use the model selector to switch between detection models (MLP, XGBoost, Geometric, Hybrid)</li>
 
36
  <li>Click "Stop" when you're done to save the session</li>
37
  </ol>
38
  </section>
@@ -51,20 +52,38 @@ function Help() {
51
 
52
  <section className="help-section">
53
  <h2>Available Models</h2>
54
- <p><strong>MLP:</strong> Neural network trained on extracted facial features. Good balance of speed and accuracy.</p>
55
- <p><strong>XGBoost:</strong> Gradient-boosted tree model using 10 selected features. Strong on tabular data with fast inference.</p>
56
- <p><strong>Geometric:</strong> Rule-based scoring using head pose and eye openness. No ML model needed, lightweight.</p>
57
- <p><strong>Hybrid:</strong> Combines MLP predictions with geometric scoring for robust results.</p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
58
  </section>
59
 
60
  <section className="help-section">
61
  <h2>Adjusting Settings</h2>
62
- <p><strong>Frame Rate:</strong> Controls how many frames per second are sent for analysis. Recommended: 15-30 FPS. Minimum is 10 FPS to ensure temporal features (blink rate, PERCLOS) remain accurate.</p>
63
- <p><strong>Model Selection:</strong> Switch models in real-time using the pill buttons above the timeline. Different models may perform better depending on your lighting and setup.</p>
 
64
  </section>
65
 
66
  <section className="help-section">
67
- <h2>Privacy & Data</h2>
68
  <p>Video frames are processed in real-time on the server and are never stored. Only focus status metadata (timestamps, confidence scores) is saved to the session database. View past runs under <strong>My Records</strong>; stats and badges live under <strong>My Achievement</strong>.</p>
69
  <p style={{ marginTop: '12px' }}>
70
  <button
@@ -94,13 +113,17 @@ function Help() {
94
  <summary>Why is my focus score low?</summary>
95
  <p>Ensure good lighting so the face mesh can detect your landmarks clearly. Face the camera directly and avoid large head movements. Try switching to a different model if one isn't working well for your setup.</p>
96
  </details>
 
 
 
 
97
  <details>
98
  <summary>Can I use this without a camera?</summary>
99
  <p>No, camera access is required. The system relies on real-time face landmark detection to determine focus.</p>
100
  </details>
101
  <details>
102
  <summary>Does this work on mobile?</summary>
103
- <p>Yes, it works on mobile browsers that support camera access and WebSocket connections. Performance depends on your device and network speed.</p>
104
  </details>
105
  <details>
106
  <summary>Is my data private?</summary>
@@ -108,17 +131,23 @@ function Help() {
108
  </details>
109
  <details>
110
  <summary>Why does the face mesh lag behind my movements?</summary>
111
- <p>The face mesh overlay updates each time the server returns a detection result. The camera feed itself renders at 60fps locally. Any visible lag depends on network latency and server processing time.</p>
 
 
 
 
112
  </details>
113
  </section>
114
 
115
  <section className="help-section">
116
  <h2>Technical Info</h2>
117
  <p><strong>Face Detection:</strong> MediaPipe Face Mesh (478 landmarks)</p>
118
- <p><strong>Feature Extraction:</strong> Head pose (yaw/pitch/roll), EAR, MAR, gaze offset, PERCLOS, blink rate</p>
119
- <p><strong>ML Models:</strong> MLP (scikit-learn), XGBoost, Geometric, Hybrid</p>
120
- <p><strong>Storage:</strong> SQLite database</p>
 
121
  <p><strong>Framework:</strong> FastAPI + React (Vite) + WebSocket</p>
 
122
  </section>
123
  </div>
124
  </main>
 
28
  <h2>How to Use Focus Guard</h2>
29
  <ol>
30
  <li>Navigate to the Focus page from the menu</li>
31
+ <li>Read the setup notes in the intro screen, then allow camera access when prompted</li>
32
  <li>Click the green "Start" button to begin monitoring</li>
33
+ <li>Position yourself in front of the camera with good lighting</li>
34
  <li>The system will track your focus in real-time using face mesh analysis</li>
35
+ <li>Use the model selector to switch between detection models (Hybrid, XGBoost, MLP, Geometric)</li>
36
+ <li>Optionally enable <strong>Eye Gaze</strong> tracking for additional gaze-based focus signals</li>
37
  <li>Click "Stop" when you're done to save the session</li>
38
  </ol>
39
  </section>
 
52
 
53
  <section className="help-section">
54
  <h2>Available Models</h2>
55
+ <p><strong>Hybrid</strong> <em>(Recommended)</em>: Combines XGBoost predictions with geometric face/eye scoring using a trained logistic regression combiner. Most robust across different people. LOPO F1: 0.8409.</p>
56
+ <p><strong>XGBoost:</strong> Gradient-boosted tree model using 10 selected features. Highest raw accuracy (95.87% pooled, LOPO AUC 0.8695). Strong on tabular data with fast inference.</p>
57
+ <p><strong>MLP:</strong> Two-layer neural network (10β†’64β†’32 neurons) trained with PyTorch. Good balance of speed and accuracy (92.92% pooled, LOPO AUC 0.8624). Fastest inference.</p>
58
+ <p><strong>Geometric:</strong> Rule-based scoring using head pose and eye openness. No ML model needed β€” lightweight fallback when model checkpoints are unavailable. LOPO F1: 0.8195.</p>
59
+ <p style={{ marginTop: '10px', color: '#667281', fontSize: '0.9rem' }}>
60
+ <strong>Tip:</strong> If you wear glasses or have unusual lighting, try different models to find the one that works best for your setup.
61
+ </p>
62
+ </section>
63
+
64
+ <section className="help-section">
65
+ <h2>Eye Gaze Tracking</h2>
66
+ <p>The <strong>Eye Gaze</strong> button enables L2CS-Net, a deep neural network that estimates your gaze direction from the eye region. It runs alongside your selected base model and can improve focus detection accuracy.</p>
67
+ <p style={{ marginTop: '8px' }}><strong>Performance note:</strong> Eye gaze tracking increases CPU usage and may reduce frame rate. If the system feels sluggish, disable it.</p>
68
+ <h3 style={{ marginTop: '14px', fontSize: '1rem' }}>Calibration</h3>
69
+ <p>For best accuracy, calibrate when prompted:</p>
70
+ <ol>
71
+ <li>Click "Start Calibration" in the warning dialog (or use the "Recalibrate" button during a session)</li>
72
+ <li>Look directly at each dot as it appears on screen β€” there are <strong>9 calibration points</strong> across the screen</li>
73
+ <li>A final <strong>validation point</strong> confirms accuracy before calibration is applied</li>
74
+ </ol>
75
+ <p>You can skip calibration and recalibrate at any time using the "Recalibrate" button, which appears in the model strip when Eye Gaze is on and a session is running.</p>
76
  </section>
77
 
78
  <section className="help-section">
79
  <h2>Adjusting Settings</h2>
80
+ <p><strong>Frame Rate:</strong> Controls how many frames per second are sent for analysis. Range: 10–30 FPS. A minimum of 10 FPS is enforced to keep temporal features (blink rate, PERCLOS) accurate.</p>
81
+ <p><strong>Model Selection:</strong> Switch models in real-time using the pill buttons above the timeline. The active model is highlighted. Different models may perform better depending on your lighting, setup, and whether you wear glasses.</p>
82
+ <p><strong>Floating Window:</strong> Opens a Picture-in-Picture window with your camera feed so you can keep the video visible while working in other apps.</p>
83
  </section>
84
 
85
  <section className="help-section">
86
+ <h2>Privacy &amp; Data</h2>
87
  <p>Video frames are processed in real-time on the server and are never stored. Only focus status metadata (timestamps, confidence scores) is saved to the session database. View past runs under <strong>My Records</strong>; stats and badges live under <strong>My Achievement</strong>.</p>
88
  <p style={{ marginTop: '12px' }}>
89
  <button
 
113
  <summary>Why is my focus score low?</summary>
114
  <p>Ensure good lighting so the face mesh can detect your landmarks clearly. Face the camera directly and avoid large head movements. Try switching to a different model if one isn't working well for your setup.</p>
115
  </details>
116
+ <details>
117
+ <summary>Does wearing glasses affect accuracy?</summary>
118
+ <p>Yes, glasses can reduce accuracy β€” especially for eye-based features like EAR and gaze offset β€” because the lenses may distort landmark positions or cause reflections. If you wear glasses, try different models (e.g. Geometric or MLP may handle glasses better than XGBoost for some users). Avoid using Eye Gaze tracking with glasses as it may significantly degrade results.</p>
119
+ </details>
120
  <details>
121
  <summary>Can I use this without a camera?</summary>
122
  <p>No, camera access is required. The system relies on real-time face landmark detection to determine focus.</p>
123
  </details>
124
  <details>
125
  <summary>Does this work on mobile?</summary>
126
+ <p>Yes, it works on mobile browsers that support camera access and WebSocket connections. Performance depends on your device and network speed. Eye Gaze tracking is not recommended on mobile due to performance constraints.</p>
127
  </details>
128
  <details>
129
  <summary>Is my data private?</summary>
 
131
  </details>
132
  <details>
133
  <summary>Why does the face mesh lag behind my movements?</summary>
134
+ <p>The face mesh overlay updates each time the server returns a detection result. The camera feed itself renders locally. Any visible lag depends on network latency and server processing time. Reducing the frame rate slider can help if lag is noticeable.</p>
135
+ </details>
136
+ <details>
137
+ <summary>The Hybrid model doesn't seem to work differently from XGBoost β€” why?</summary>
138
+ <p>The Hybrid model uses a trained logistic regression combiner on top of XGBoost and geometric scores. If the combiner file wasn't available at startup, it falls back to a simple weighted average which behaves similarly to XGBoost. Check the server logs for "[HYBRID]" messages to confirm the combiner loaded correctly.</p>
139
  </details>
140
  </section>
141
 
142
  <section className="help-section">
143
  <h2>Technical Info</h2>
144
  <p><strong>Face Detection:</strong> MediaPipe Face Mesh (478 landmarks)</p>
145
+ <p><strong>Feature Extraction:</strong> Head pose (yaw/pitch/roll), EAR, MAR, gaze offset, PERCLOS, blink rate β€” 10 features selected via LOFO analysis</p>
146
+ <p><strong>ML Models:</strong> PyTorch MLP (10β†’64β†’32β†’2), XGBoost (600 trees), Geometric (rule-based), Hybrid (LR combiner over XGBoost+Geo)</p>
147
+ <p><strong>Eye Gaze:</strong> L2CS-Net (ResNet50 backbone, trained on Gaze360) with 9-point polynomial calibration</p>
148
+ <p><strong>Storage:</strong> SQLite database (sessions, events, settings)</p>
149
  <p><strong>Framework:</strong> FastAPI + React (Vite) + WebSocket</p>
150
+ <p><strong>Evaluation:</strong> Leave-One-Person-Out (LOPO) cross-validation on 9 participants, 144K frames</p>
151
  </section>
152
  </div>
153
  </main>
ui/pipeline.py CHANGED
@@ -442,11 +442,14 @@ class HybridFocusPipeline:
442
  if not os.path.isfile(resolved_combiner):
443
  resolved_combiner = os.path.join(_PROJECT_ROOT, combiner_path)
444
  if os.path.isfile(resolved_combiner):
445
- blob = joblib.load(resolved_combiner)
446
- self._combiner = blob.get("combiner")
447
- if self._combiner is None:
448
- self._combiner = blob
449
- print(f"[HYBRID] LR combiner loaded from {resolved_combiner}")
 
 
 
450
  else:
451
  print(f"[HYBRID] combiner_path not found: {resolved_combiner}, using heuristic weights")
452
  if self._use_xgb:
 
442
  if not os.path.isfile(resolved_combiner):
443
  resolved_combiner = os.path.join(_PROJECT_ROOT, combiner_path)
444
  if os.path.isfile(resolved_combiner):
445
+ try:
446
+ blob = joblib.load(resolved_combiner)
447
+ self._combiner = blob.get("combiner")
448
+ if self._combiner is None:
449
+ self._combiner = blob
450
+ print(f"[HYBRID] LR combiner loaded from {resolved_combiner}")
451
+ except Exception as e:
452
+ print(f"[HYBRID] Failed to load combiner ({e}); using heuristic weights")
453
  else:
454
  print(f"[HYBRID] combiner_path not found: {resolved_combiner}, using heuristic weights")
455
  if self._use_xgb: