{ "cells": [ { "cell_type": "markdown", "metadata": {}, "source": [ "# XGBoost Training + LOPO evaluation (ClearML-compatible)\n", "\n", "XGBoost for focus classification.\n", "- Single CFG dict (ClearML `task.connect(CFG)`)\n", "- 70/15/15 stratified random split with per-round loss logging\n", "- Test evaluation: accuracy, F1, ROC-AUC\n", "- ClearML scalar logging (opt-in)\n", "- LOPO comparison at the end" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 1. Imports and CFG" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import json\n", "import os\n", "import sys\n", "import random\n", "\n", "import numpy as np\n", "from xgboost import XGBClassifier\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.metrics import (\n", " accuracy_score, f1_score, roc_auc_score,\n", " classification_report, confusion_matrix, ConfusionMatrixDisplay,\n", ")\n", "import matplotlib.pyplot as plt\n", "import warnings\n", "warnings.filterwarnings(\"ignore\")\n", "\n", "# Add project root to sys.path\n", "_cwd = os.getcwd()\n", "PROJECT_ROOT = _cwd if os.path.isdir(os.path.join(_cwd, \"models\")) else os.path.abspath(os.path.join(_cwd, \"..\"))\n", "if PROJECT_ROOT not in sys.path:\n", " sys.path.insert(0, PROJECT_ROOT)\n", "\n", "from data_preparation.prepare_dataset import load_per_person, SELECTED_FEATURES, _split_and_scale\n", "\n", "CFG = {\n", " \"model_name\": \"face_orientation\",\n", " \"seed\": 42,\n", " \"split_ratios\": (0.7, 0.15, 0.15),\n", " \"scale\": False, # tree-based model — scaling unnecessary\n", " \"n_estimators\": 600,\n", " \"max_depth\": 8,\n", " \"learning_rate\": 0.149,\n", " \"subsample\": 0.9625,\n", " \"colsample_bytree\": 0.9013,\n", " \"reg_alpha\": 1.1407,\n", " \"reg_lambda\": 2.4181,\n", " \"eval_metric\": \"logloss\",\n", " \"checkpoints_dir\": os.path.join(PROJECT_ROOT, \"models\", \"xgboost\", \"checkpoints\"),\n", " \"logs_dir\": os.path.join(PROJECT_ROOT, \"evaluation\", \"logs\"),\n", "}\n", "\n", "print(f\"Project root: {PROJECT_ROOT}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 2. ClearML (optional)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "USE_CLEARML = False # set True when ClearML credentials are configured\n", "task = None\n", "\n", "if USE_CLEARML:\n", " from clearml import Task\n", " task = Task.init(\n", " project_name=\"FocusGuards Large Group Project\",\n", " task_name=\"XGBoost Training + LOPO\",\n", " tags=[\"training\", \"xgboost\"]\n", " )\n", " task.connect(CFG)\n", " print(\"[ClearML] Connected\")\n", "else:\n", " print(\"[ClearML] Disabled (set USE_CLEARML = True to enable)\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 3. Load data" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def set_seed(seed):\n", " random.seed(seed)\n", " np.random.seed(seed)\n", "\n", "set_seed(CFG[\"seed\"])\n", "\n", "by_person, X_all, y_all = load_per_person(CFG[\"model_name\"])\n", "person_names = sorted(by_person.keys())\n", "num_features = X_all.shape[1]\n", "num_classes = int(y_all.max()) + 1\n", "print(f\"\\nPersons: {person_names}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 4. Random split (70/15/15)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "splits, _ = _split_and_scale(X_all, y_all, CFG[\"split_ratios\"], CFG[\"seed\"], CFG[\"scale\"])\n", "X_train, y_train = splits[\"X_train\"], splits[\"y_train\"]\n", "X_val, y_val = splits[\"X_val\"], splits[\"y_val\"]\n", "X_test, y_test = splits[\"X_test\"], splits[\"y_test\"]\n", "\n", "print(f\"Features: {num_features}, Classes: {num_classes}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 5. Model definition and training" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "model = XGBClassifier(\n", " n_estimators=CFG[\"n_estimators\"],\n", " max_depth=CFG[\"max_depth\"],\n", " learning_rate=CFG[\"learning_rate\"],\n", " subsample=CFG[\"subsample\"],\n", " colsample_bytree=CFG[\"colsample_bytree\"],\n", " reg_alpha=CFG[\"reg_alpha\"],\n", " reg_lambda=CFG[\"reg_lambda\"],\n", " eval_metric=CFG[\"eval_metric\"],\n", " use_label_encoder=False,\n", " random_state=CFG[\"seed\"],\n", " verbosity=1,\n", ")\n", "\n", "model.fit(\n", " X_train, y_train,\n", " eval_set=[(X_train, y_train), (X_val, y_val)],\n", " verbose=10,\n", ")\n", "\n", "print(f\"\\n[TRAIN] Training complete: {CFG['n_estimators']} rounds\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 6. Per-round loss logging" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "evals = model.evals_result()\n", "train_losses = evals[\"validation_0\"][CFG[\"eval_metric\"]]\n", "val_losses = evals[\"validation_1\"][CFG[\"eval_metric\"]]\n", "rounds = list(range(1, len(train_losses) + 1))\n", "\n", "if task is not None:\n", " for i, (tl, vl) in enumerate(zip(train_losses, val_losses)):\n", " task.logger.report_scalar(\"Loss\", \"Train\", tl, iteration=i + 1)\n", " task.logger.report_scalar(\"Loss\", \"Val\", vl, iteration=i + 1)\n", " task.logger.flush()\n", "\n", "print(f\"Final train logloss: {train_losses[-1]:.4f}\")\n", "print(f\"Final val logloss: {val_losses[-1]:.4f}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 7. Loss curve" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig, ax = plt.subplots(figsize=(8, 4))\n", "ax.plot(rounds, train_losses, label=\"Train\")\n", "ax.plot(rounds, val_losses, label=\"Val\")\n", "ax.set_xlabel(\"Boosting round\")\n", "ax.set_ylabel(\"Log loss\")\n", "ax.set_title(f\"XGBoost Training — {CFG['model_name']}\")\n", "ax.legend()\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 8. Test evaluation (random split)" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "test_preds = model.predict(X_test)\n", "test_probs = model.predict_proba(X_test)\n", "test_acc = float(accuracy_score(y_test, test_preds))\n", "test_f1 = float(f1_score(y_test, test_preds, average=\"weighted\"))\n", "if num_classes > 2:\n", " test_auc = float(roc_auc_score(y_test, test_probs, multi_class=\"ovr\", average=\"weighted\"))\n", "else:\n", " test_auc = float(roc_auc_score(y_test, test_probs[:, 1]))\n", "\n", "print(f\"[TEST] Accuracy: {test_acc:.2%}\")\n", "print(f\"[TEST] F1: {test_f1:.4f}\")\n", "print(f\"[TEST] ROC-AUC: {test_auc:.4f}\")\n", "\n", "if task is not None:\n", " task.logger.report_single_value(\"test_accuracy\", test_acc)\n", " task.logger.report_single_value(\"test_f1\", test_f1)\n", " task.logger.report_single_value(\"test_auc\", test_auc)\n", " task.logger.flush()\n", "\n", "print(\"\\nClassification report:\")\n", "print(classification_report(y_test, test_preds, target_names=[\"Unfocused (0)\", \"Focused (1)\"]))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 9. Confusion matrix" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig, ax = plt.subplots(figsize=(5, 4))\n", "cm = confusion_matrix(y_test, test_preds)\n", "ConfusionMatrixDisplay(cm, display_labels=[\"Unfocused\", \"Focused\"]).plot(ax=ax, cmap=\"Blues\")\n", "ax.set_title(f\"XGBoost confusion matrix — test acc {test_acc:.2%}\")\n", "plt.tight_layout()\n", "plt.show()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 10. Save checkpoint and JSON log" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "os.makedirs(CFG[\"checkpoints_dir\"], exist_ok=True)\n", "model_path = os.path.join(CFG[\"checkpoints_dir\"], f\"{CFG['model_name']}_best.json\")\n", "model.save_model(model_path)\n", "\n", "history = {\n", " \"model_name\": f\"xgboost_{CFG['model_name']}\",\n", " \"n_estimators\": CFG[\"n_estimators\"],\n", " \"max_depth\": CFG[\"max_depth\"],\n", " \"epochs\": rounds,\n", " \"train_loss\": [round(v, 4) for v in train_losses],\n", " \"val_loss\": [round(v, 4) for v in val_losses],\n", " \"test_acc\": round(test_acc, 4),\n", " \"test_f1\": round(test_f1, 4),\n", " \"test_auc\": round(test_auc, 4),\n", "}\n", "\n", "os.makedirs(CFG[\"logs_dir\"], exist_ok=True)\n", "log_path = os.path.join(CFG[\"logs_dir\"], f\"xgboost_{CFG['model_name']}_training_log.json\")\n", "with open(log_path, \"w\") as f:\n", " json.dump(history, f, indent=2)\n", "\n", "print(f\"[CKPT] Model: {model_path}\")\n", "print(f\"[LOG] History: {log_path}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 11. LOPO comparison (XGBoost)\n", "\n", "Train+test with Leave-One-Person-Out so we can compare fairly with MLP/RF under LOPO." ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "def train_xgb_on_splits(X_train, y_train, X_test, y_test, cfg):\n", " m = XGBClassifier(\n", " n_estimators=cfg[\"n_estimators\"],\n", " max_depth=cfg[\"max_depth\"],\n", " learning_rate=cfg[\"learning_rate\"],\n", " subsample=cfg[\"subsample\"],\n", " colsample_bytree=cfg[\"colsample_bytree\"],\n", " reg_alpha=cfg[\"reg_alpha\"],\n", " reg_lambda=cfg[\"reg_lambda\"],\n", " eval_metric=cfg[\"eval_metric\"],\n", " use_label_encoder=False,\n", " random_state=cfg[\"seed\"],\n", " verbosity=0,\n", " )\n", " m.fit(X_train, y_train, verbose=False)\n", "\n", " preds = m.predict(X_test)\n", " probs = m.predict_proba(X_test)\n", " n_cls = probs.shape[1]\n", " acc = accuracy_score(y_test, preds)\n", " f1 = f1_score(y_test, preds, average=\"weighted\")\n", " auc = roc_auc_score(y_test, probs[:, 1]) if n_cls == 2 else roc_auc_score(y_test, probs, multi_class=\"ovr\", average=\"weighted\")\n", " return {\"accuracy\": acc, \"f1\": f1, \"roc_auc\": auc}" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "print(\"XGBoost LOPO evaluation\")\n", "print(\"-\" * 60)\n", "\n", "lopo_results = []\n", "for test_person in person_names:\n", " train_persons = [p for p in person_names if p != test_person]\n", " X_tr = np.concatenate([by_person[p][0] for p in train_persons], axis=0)\n", " y_tr = np.concatenate([by_person[p][1] for p in train_persons], axis=0)\n", " X_te, y_te = by_person[test_person]\n", "\n", " set_seed(CFG[\"seed\"])\n", " metrics = train_xgb_on_splits(X_tr, y_tr, X_te, y_te, CFG)\n", " metrics[\"test_person\"] = test_person\n", " metrics[\"n_test\"] = len(y_te)\n", " lopo_results.append(metrics)\n", " print(f\" test={test_person}: acc={metrics['accuracy']:.2%} F1={metrics['f1']:.4f} AUC={metrics['roc_auc']:.4f} (n={len(y_te)})\")\n", "\n", "print(\"\\nXGBoost LOPO summary (mean +/- std):\")\n", "for m in [\"accuracy\", \"f1\", \"roc_auc\"]:\n", " vals = [r[m] for r in lopo_results]\n", " print(f\" {m}: {np.mean(vals):.4f} +/- {np.std(vals):.4f}\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 12. Random split vs LOPO summary" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "import pandas as pd\n", "\n", "lopo_acc = np.mean([r[\"accuracy\"] for r in lopo_results])\n", "lopo_f1 = np.mean([r[\"f1\"] for r in lopo_results])\n", "lopo_auc = np.mean([r[\"roc_auc\"] for r in lopo_results])\n", "\n", "summary = pd.DataFrame([\n", " {\"Method\": \"Random split (70/15/15)\", \"Accuracy\": f\"{test_acc:.2%}\", \"F1\": f\"{test_f1:.4f}\", \"ROC-AUC\": f\"{test_auc:.4f}\"},\n", " {\"Method\": \"LOPO (mean)\", \"Accuracy\": f\"{lopo_acc:.2%}\", \"F1\": f\"{lopo_f1:.4f}\", \"ROC-AUC\": f\"{lopo_auc:.4f}\"},\n", "])\n", "display(summary)\n", "\n", "print(\"\\nPer-fold LOPO results:\")\n", "display(pd.DataFrame(lopo_results))\n", "\n", "print(\"\\nCompare these XGBoost LOPO numbers with MLP (from mlp.ipynb).\")\n", "print(\"If XGB LOPO > MLP LOPO, XGB generalises better across unseen persons.\")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ "## 13. Per-person accuracy bar chart" ] }, { "cell_type": "code", "execution_count": null, "metadata": {}, "outputs": [], "source": [ "fig, ax = plt.subplots(figsize=(10, 4))\n", "names_sorted = [r[\"test_person\"] for r in lopo_results]\n", "accs = [r[\"accuracy\"] for r in lopo_results]\n", "ax.bar(names_sorted, accs, color=\"steelblue\", edgecolor=\"black\")\n", "ax.axhline(y=lopo_acc, color=\"red\", linestyle=\"--\", label=f\"Mean = {lopo_acc:.2%}\")\n", "ax.set_ylabel(\"Accuracy\")\n", "ax.set_xlabel(\"Left-out person\")\n", "ax.set_title(\"XGBoost LOPO: test accuracy per left-out person\")\n", "ax.legend()\n", "plt.xticks(rotation=45, ha=\"right\")\n", "plt.tight_layout()\n", "plt.show()" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "name": "python", "version": "3.13.0" } }, "nbformat": 4, "nbformat_minor": 4 }