Deployment Added
Browse files- Dockerfile +21 -0
- LICENSE +21 -0
- app.py +283 -0
- config.yaml +47 -0
- evaluate.py +126 -0
- logs/api.log +135 -0
- logs/best_model_confusion_matrix.png +0 -0
- logs/best_model_report.txt +12 -0
- logs/data_loader.log +92 -0
- logs/evaluate.log +58 -0
- logs/export_onnx.log +88 -0
- logs/model_comparison.png +0 -0
- logs/models.log +48 -0
- logs/predict.log +96 -0
- logs/train.log +86 -0
- onnx_models/benchmark_results.json +17 -0
- onnx_models/model_dynamic_int8.onnx +3 -0
- onnx_models/model_fp32.onnx +3 -0
- onnx_models/model_static_int8.onnx +3 -0
- predict.py +164 -0
- requirements.txt +16 -0
- save_model.py +64 -0
- saved_models/ft_best.h5 +3 -0
- src/__init__.py +0 -0
- src/data_loader.py +65 -0
- src/export_onnx.py +282 -0
- src/models.py +98 -0
- src/utils.py +321 -0
- static/index.html +679 -0
- train.py +406 -0
Dockerfile
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
ENV PYTHONDONTWRITEBYTECODE=1 \
|
| 4 |
+
PYTHONUNBUFFERED=1 \
|
| 5 |
+
PIP_NO_CACHE_DIR=1
|
| 6 |
+
|
| 7 |
+
WORKDIR /app
|
| 8 |
+
|
| 9 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 10 |
+
libgl1 \
|
| 11 |
+
libglib2.0-0 \
|
| 12 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 13 |
+
|
| 14 |
+
COPY requirements.txt /app/requirements.txt
|
| 15 |
+
RUN pip install --upgrade pip && pip install -r /app/requirements.txt
|
| 16 |
+
|
| 17 |
+
COPY . /app
|
| 18 |
+
|
| 19 |
+
EXPOSE 7860
|
| 20 |
+
|
| 21 |
+
CMD ["uvicorn", "app:app", "--host", "0.0.0.0", "--port", "7860"]
|
LICENSE
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MIT License
|
| 2 |
+
|
| 3 |
+
Copyright (c) 2026 Md Shoaib Shahriar Ibrahim
|
| 4 |
+
|
| 5 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 6 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 7 |
+
in the Software without restriction, including without limitation the rights
|
| 8 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 9 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 10 |
+
furnished to do so, subject to the following conditions:
|
| 11 |
+
|
| 12 |
+
The above copyright notice and this permission notice shall be included in all
|
| 13 |
+
copies or substantial portions of the Software.
|
| 14 |
+
|
| 15 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 16 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 17 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 18 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 19 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 20 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
| 21 |
+
SOFTWARE.
|
app.py
ADDED
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# main.py — FastAPI backend
|
| 2 |
+
# Endpoints:
|
| 3 |
+
# GET / -> serve static/index.html
|
| 4 |
+
# GET /health -> health check
|
| 5 |
+
# GET /benchmark -> cached benchmark results
|
| 6 |
+
# GET /models/info -> metadata about loaded models
|
| 7 |
+
# POST /predict -> TF model prediction
|
| 8 |
+
# POST /predict/onnx -> ONNX FP32 prediction
|
| 9 |
+
# POST /predict/dynamic -> Dynamic INT8 prediction
|
| 10 |
+
# POST /predict/static -> Static INT8 prediction
|
| 11 |
+
# POST /predict/gradcam -> TF prediction + Grad-CAM overlay image
|
| 12 |
+
|
| 13 |
+
import os
|
| 14 |
+
import io
|
| 15 |
+
import json
|
| 16 |
+
import time
|
| 17 |
+
import base64
|
| 18 |
+
import tempfile
|
| 19 |
+
import numpy as np
|
| 20 |
+
import cv2
|
| 21 |
+
import matplotlib
|
| 22 |
+
matplotlib.use("Agg")
|
| 23 |
+
from PIL import Image
|
| 24 |
+
|
| 25 |
+
from fastapi import FastAPI, File, UploadFile, HTTPException
|
| 26 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 27 |
+
from fastapi.responses import FileResponse
|
| 28 |
+
from fastapi.staticfiles import StaticFiles
|
| 29 |
+
from pydantic import BaseModel
|
| 30 |
+
|
| 31 |
+
from src.utils import load_config, get_logger
|
| 32 |
+
from predict import BrainTumorPredictor
|
| 33 |
+
|
| 34 |
+
logger = get_logger("api")
|
| 35 |
+
cfg = load_config("config.yaml")
|
| 36 |
+
|
| 37 |
+
# ---------------------------------------------------------------------------
|
| 38 |
+
# App setup
|
| 39 |
+
# ---------------------------------------------------------------------------
|
| 40 |
+
|
| 41 |
+
app = FastAPI(
|
| 42 |
+
title = "Brain Tumor Classification API",
|
| 43 |
+
description = "MRI brain tumor classification with TF, ONNX FP32, Dynamic INT8, Static INT8 + Grad-CAM",
|
| 44 |
+
version = "1.0.0",
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
app.add_middleware(
|
| 48 |
+
CORSMiddleware,
|
| 49 |
+
allow_origins = ["*"],
|
| 50 |
+
allow_credentials = True,
|
| 51 |
+
allow_methods = ["*"],
|
| 52 |
+
allow_headers = ["*"],
|
| 53 |
+
)
|
| 54 |
+
|
| 55 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 56 |
+
STATIC_DIR = os.path.join(BASE_DIR, "static")
|
| 57 |
+
|
| 58 |
+
if os.path.exists(STATIC_DIR):
|
| 59 |
+
app.mount("/static", StaticFiles(directory=STATIC_DIR), name="static")
|
| 60 |
+
else:
|
| 61 |
+
logger.warning(f"Static directory not found: {STATIC_DIR}")
|
| 62 |
+
|
| 63 |
+
# ---------------------------------------------------------------------------
|
| 64 |
+
# Load predictors at startup
|
| 65 |
+
# ---------------------------------------------------------------------------
|
| 66 |
+
|
| 67 |
+
predictors: dict = {}
|
| 68 |
+
load_errors: dict = {} # stores error messages for failed backends
|
| 69 |
+
|
| 70 |
+
@app.on_event("startup")
|
| 71 |
+
async def load_models():
|
| 72 |
+
global predictors, load_errors
|
| 73 |
+
logger.info("Loading models ...")
|
| 74 |
+
|
| 75 |
+
# TF backend — always try first
|
| 76 |
+
try:
|
| 77 |
+
predictors["tensorflow"] = BrainTumorPredictor(cfg, backend="tensorflow")
|
| 78 |
+
logger.info("[OK] TF model loaded.")
|
| 79 |
+
except Exception as e:
|
| 80 |
+
load_errors["tensorflow"] = str(e)
|
| 81 |
+
logger.error(f"[FAIL] TF model failed to load: {e}")
|
| 82 |
+
|
| 83 |
+
# ONNX backends — optional, warn if not found
|
| 84 |
+
for backend in ["onnx_fp32", "onnx_dynamic", "onnx_static"]:
|
| 85 |
+
try:
|
| 86 |
+
predictors[backend] = BrainTumorPredictor(cfg, backend=backend)
|
| 87 |
+
logger.info(f"[OK] {backend} loaded.")
|
| 88 |
+
except FileNotFoundError as e:
|
| 89 |
+
load_errors[backend] = str(e)
|
| 90 |
+
logger.warning(f"[SKIP] {backend} — file not found. Run export_onnx.py first.")
|
| 91 |
+
except Exception as e:
|
| 92 |
+
load_errors[backend] = str(e)
|
| 93 |
+
logger.error(f"[FAIL] {backend} failed: {e}")
|
| 94 |
+
|
| 95 |
+
logger.info(f"Loaded backends : {list(predictors.keys())}")
|
| 96 |
+
if load_errors:
|
| 97 |
+
logger.warning(f"Failed backends : {list(load_errors.keys())}")
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
# ---------------------------------------------------------------------------
|
| 101 |
+
# Helpers
|
| 102 |
+
# ---------------------------------------------------------------------------
|
| 103 |
+
|
| 104 |
+
def save_upload_temp(file: UploadFile) -> str:
|
| 105 |
+
suffix = os.path.splitext(file.filename)[-1] or ".jpg"
|
| 106 |
+
tmp = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
|
| 107 |
+
tmp.write(file.file.read())
|
| 108 |
+
tmp.flush()
|
| 109 |
+
return tmp.name
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def numpy_to_b64(img_array: np.ndarray) -> str:
|
| 113 |
+
img_pil = Image.fromarray(img_array.astype(np.uint8))
|
| 114 |
+
buf = io.BytesIO()
|
| 115 |
+
img_pil.save(buf, format="PNG")
|
| 116 |
+
return base64.b64encode(buf.getvalue()).decode("utf-8")
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
def predict_with_backend(backend: str, tmp_path: str) -> dict:
|
| 120 |
+
if backend not in predictors:
|
| 121 |
+
# Give a specific error message explaining what to do
|
| 122 |
+
if backend in load_errors:
|
| 123 |
+
detail = (
|
| 124 |
+
f"Backend '{backend}' failed to load: {load_errors[backend]}. "
|
| 125 |
+
f"Run export_onnx.py first to generate ONNX models."
|
| 126 |
+
)
|
| 127 |
+
else:
|
| 128 |
+
detail = (
|
| 129 |
+
f"Backend '{backend}' is not available. "
|
| 130 |
+
f"Run export_onnx.py to generate ONNX models."
|
| 131 |
+
)
|
| 132 |
+
raise HTTPException(status_code=503, detail=detail)
|
| 133 |
+
|
| 134 |
+
t0 = time.time()
|
| 135 |
+
result = predictors[backend].predict(tmp_path)
|
| 136 |
+
result["latency_ms"] = round((time.time() - t0) * 1000, 2)
|
| 137 |
+
return result
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
# ---------------------------------------------------------------------------
|
| 141 |
+
# Pydantic response models
|
| 142 |
+
# ---------------------------------------------------------------------------
|
| 143 |
+
|
| 144 |
+
class PredictionResponse(BaseModel):
|
| 145 |
+
predicted_class : str
|
| 146 |
+
confidence : float
|
| 147 |
+
all_probabilities : dict
|
| 148 |
+
backend : str
|
| 149 |
+
latency_ms : float
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class GradCamResponse(BaseModel):
|
| 153 |
+
predicted_class : str
|
| 154 |
+
confidence : float
|
| 155 |
+
all_probabilities : dict
|
| 156 |
+
backend : str
|
| 157 |
+
latency_ms : float
|
| 158 |
+
gradcam_b64 : str
|
| 159 |
+
heatmap_b64 : str
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
# ---------------------------------------------------------------------------
|
| 163 |
+
# Routes
|
| 164 |
+
# ---------------------------------------------------------------------------
|
| 165 |
+
|
| 166 |
+
@app.get("/")
|
| 167 |
+
async def serve_index():
|
| 168 |
+
index_path = os.path.join(STATIC_DIR, "index.html")
|
| 169 |
+
if not os.path.exists(index_path):
|
| 170 |
+
raise HTTPException(status_code=404, detail="static/index.html not found.")
|
| 171 |
+
return FileResponse(index_path)
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
@app.get("/health")
|
| 175 |
+
async def health():
|
| 176 |
+
return {
|
| 177 |
+
"status" : "ok",
|
| 178 |
+
"loaded_backends" : list(predictors.keys()),
|
| 179 |
+
"failed_backends" : load_errors,
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
|
| 183 |
+
@app.get("/models/info")
|
| 184 |
+
async def models_info():
|
| 185 |
+
meta_path = os.path.join(cfg["models"]["save_dir"], "model_metadata.json")
|
| 186 |
+
if not os.path.exists(meta_path):
|
| 187 |
+
raise HTTPException(status_code=404, detail="model_metadata.json not found.")
|
| 188 |
+
with open(meta_path) as f:
|
| 189 |
+
return json.load(f)
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
@app.get("/benchmark")
|
| 193 |
+
async def get_benchmark():
|
| 194 |
+
bench_path = os.path.join(cfg["models"]["onnx_dir"], "benchmark_results.json")
|
| 195 |
+
if not os.path.exists(bench_path):
|
| 196 |
+
raise HTTPException(
|
| 197 |
+
status_code=404,
|
| 198 |
+
detail="benchmark_results.json not found. Run export_onnx.py first.",
|
| 199 |
+
)
|
| 200 |
+
with open(bench_path) as f:
|
| 201 |
+
return json.load(f)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
@app.post("/predict", response_model=PredictionResponse)
|
| 205 |
+
async def predict_tf(file: UploadFile = File(...)):
|
| 206 |
+
tmp = save_upload_temp(file)
|
| 207 |
+
try:
|
| 208 |
+
return predict_with_backend("tensorflow", tmp)
|
| 209 |
+
finally:
|
| 210 |
+
os.unlink(tmp)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
@app.post("/predict/onnx", response_model=PredictionResponse)
|
| 214 |
+
async def predict_onnx_fp32(file: UploadFile = File(...)):
|
| 215 |
+
tmp = save_upload_temp(file)
|
| 216 |
+
try:
|
| 217 |
+
return predict_with_backend("onnx_fp32", tmp)
|
| 218 |
+
finally:
|
| 219 |
+
os.unlink(tmp)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
@app.post("/predict/dynamic", response_model=PredictionResponse)
|
| 223 |
+
async def predict_dynamic(file: UploadFile = File(...)):
|
| 224 |
+
tmp = save_upload_temp(file)
|
| 225 |
+
try:
|
| 226 |
+
return predict_with_backend("onnx_dynamic", tmp)
|
| 227 |
+
finally:
|
| 228 |
+
os.unlink(tmp)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
@app.post("/predict/static", response_model=PredictionResponse)
|
| 232 |
+
async def predict_static(file: UploadFile = File(...)):
|
| 233 |
+
tmp = save_upload_temp(file)
|
| 234 |
+
try:
|
| 235 |
+
return predict_with_backend("onnx_static", tmp)
|
| 236 |
+
finally:
|
| 237 |
+
os.unlink(tmp)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
@app.post("/predict/gradcam", response_model=GradCamResponse)
|
| 241 |
+
async def predict_gradcam(file: UploadFile = File(...)):
|
| 242 |
+
if "tensorflow" not in predictors:
|
| 243 |
+
raise HTTPException(status_code=503, detail="TF model not loaded.")
|
| 244 |
+
|
| 245 |
+
tmp = save_upload_temp(file)
|
| 246 |
+
try:
|
| 247 |
+
t0 = time.time()
|
| 248 |
+
result = predictors["tensorflow"].predict_with_gradcam(tmp)
|
| 249 |
+
latency = round((time.time() - t0) * 1000, 2)
|
| 250 |
+
|
| 251 |
+
gradcam_b64 = numpy_to_b64(result["gradcam_overlay"])
|
| 252 |
+
heatmap_b64 = numpy_to_b64(
|
| 253 |
+
cv2.applyColorMap(
|
| 254 |
+
np.uint8(255 * result["heatmap"]), cv2.COLORMAP_JET
|
| 255 |
+
)[:, :, ::-1]
|
| 256 |
+
)
|
| 257 |
+
|
| 258 |
+
return {
|
| 259 |
+
"predicted_class" : result["predicted_class"],
|
| 260 |
+
"confidence" : result["confidence"],
|
| 261 |
+
"all_probabilities": result["all_probabilities"],
|
| 262 |
+
"backend" : result["backend"],
|
| 263 |
+
"latency_ms" : latency,
|
| 264 |
+
"gradcam_b64" : gradcam_b64,
|
| 265 |
+
"heatmap_b64" : heatmap_b64,
|
| 266 |
+
}
|
| 267 |
+
finally:
|
| 268 |
+
os.unlink(tmp)
|
| 269 |
+
|
| 270 |
+
|
| 271 |
+
# ---------------------------------------------------------------------------
|
| 272 |
+
# Run
|
| 273 |
+
# ---------------------------------------------------------------------------
|
| 274 |
+
|
| 275 |
+
if __name__ == "__main__":
|
| 276 |
+
import uvicorn
|
| 277 |
+
api_cfg = cfg["api"]
|
| 278 |
+
uvicorn.run(
|
| 279 |
+
"main:app",
|
| 280 |
+
host = api_cfg["host"],
|
| 281 |
+
port = api_cfg["port"],
|
| 282 |
+
reload = api_cfg["reload"],
|
| 283 |
+
)
|
config.yaml
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
project:
|
| 2 |
+
name: "Brain Tumor Classification"
|
| 3 |
+
version: "1.0.0"
|
| 4 |
+
seed: 42
|
| 5 |
+
|
| 6 |
+
data:
|
| 7 |
+
train_dir: "./data/Training"
|
| 8 |
+
test_dir: "./data/Testing"
|
| 9 |
+
image_size: [150, 150]
|
| 10 |
+
batch_size: 16
|
| 11 |
+
validation_split: 0.2
|
| 12 |
+
classes: ["glioma", "meningioma", "notumor", "pituitary"]
|
| 13 |
+
|
| 14 |
+
augmentation:
|
| 15 |
+
rotation_range: 15
|
| 16 |
+
width_shift_range: 0.1
|
| 17 |
+
height_shift_range: 0.1
|
| 18 |
+
zoom_range: 0.1
|
| 19 |
+
horizontal_flip: true
|
| 20 |
+
brightness_range: [0.9, 1.1]
|
| 21 |
+
|
| 22 |
+
training:
|
| 23 |
+
epochs: 20
|
| 24 |
+
early_stopping_patience: 5
|
| 25 |
+
reduce_lr_patience: 3
|
| 26 |
+
reduce_lr_factor: 0.5
|
| 27 |
+
min_lr: 0.000001
|
| 28 |
+
|
| 29 |
+
optuna:
|
| 30 |
+
n_trials: 10
|
| 31 |
+
direction: "maximize"
|
| 32 |
+
|
| 33 |
+
models:
|
| 34 |
+
save_dir: "./saved_models"
|
| 35 |
+
onnx_dir: "./onnx_models"
|
| 36 |
+
logs_dir: "./logs"
|
| 37 |
+
|
| 38 |
+
mlflow:
|
| 39 |
+
dagshub_username: "shoaib.shahriar01"
|
| 40 |
+
dagshub_token: "2df3bdfdd248ea99d5af9a5d3e485bfaf6ca7c81"
|
| 41 |
+
dagshub_repo: "brain-tumor-classification"
|
| 42 |
+
experiment_name: "Brain_Tumor_Classification"
|
| 43 |
+
|
| 44 |
+
api:
|
| 45 |
+
host: "0.0.0.0"
|
| 46 |
+
port: 8000
|
| 47 |
+
reload: true
|
evaluate.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# evaluate.py — confusion matrix, classification report, per-class metrics
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import numpy as np
|
| 5 |
+
import matplotlib.pyplot as plt
|
| 6 |
+
import seaborn as sns
|
| 7 |
+
import tensorflow as tf
|
| 8 |
+
from sklearn.metrics import classification_report, confusion_matrix
|
| 9 |
+
from utils import get_logger, load_config
|
| 10 |
+
from src.data_loader import get_data_generators
|
| 11 |
+
|
| 12 |
+
logger = get_logger("evaluate")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def evaluate_model(model, test_data, class_names: list,
|
| 16 |
+
model_name: str = "model", save_dir: str = "./logs"):
|
| 17 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 18 |
+
logger.info(f"Evaluating {model_name} on test set ...")
|
| 19 |
+
|
| 20 |
+
# Reset generator so it always starts from the beginning
|
| 21 |
+
test_data.reset()
|
| 22 |
+
|
| 23 |
+
y_pred_prob = model.predict(test_data, verbose=1)
|
| 24 |
+
y_pred = np.argmax(y_pred_prob, axis=1)
|
| 25 |
+
y_true = test_data.classes
|
| 26 |
+
|
| 27 |
+
# Align lengths — generator may yield slightly more due to batch rounding
|
| 28 |
+
min_len = min(len(y_true), len(y_pred))
|
| 29 |
+
y_true = y_true[:min_len]
|
| 30 |
+
y_pred = y_pred[:min_len]
|
| 31 |
+
|
| 32 |
+
# Classification report
|
| 33 |
+
report = classification_report(y_true, y_pred, target_names=class_names, digits=4)
|
| 34 |
+
logger.info(f"\nClassification Report - {model_name}:\n{report}")
|
| 35 |
+
|
| 36 |
+
report_path = os.path.join(save_dir, f"{model_name}_report.txt")
|
| 37 |
+
with open(report_path, "w", encoding="utf-8") as f:
|
| 38 |
+
f.write(f"Classification Report - {model_name}\n")
|
| 39 |
+
f.write("=" * 60 + "\n")
|
| 40 |
+
f.write(report)
|
| 41 |
+
logger.info(f"Report saved -> {report_path}")
|
| 42 |
+
|
| 43 |
+
# Confusion matrix
|
| 44 |
+
cm = confusion_matrix(y_true, y_pred)
|
| 45 |
+
plt.figure(figsize=(8, 6))
|
| 46 |
+
sns.heatmap(
|
| 47 |
+
cm, annot=True, fmt="d", cmap="Blues",
|
| 48 |
+
xticklabels=class_names, yticklabels=class_names,
|
| 49 |
+
linewidths=0.5,
|
| 50 |
+
)
|
| 51 |
+
plt.title(f"Confusion Matrix - {model_name}")
|
| 52 |
+
plt.ylabel("True Label")
|
| 53 |
+
plt.xlabel("Predicted Label")
|
| 54 |
+
plt.tight_layout()
|
| 55 |
+
|
| 56 |
+
cm_path = os.path.join(save_dir, f"{model_name}_confusion_matrix.png")
|
| 57 |
+
plt.savefig(cm_path, dpi=100, bbox_inches="tight")
|
| 58 |
+
plt.show()
|
| 59 |
+
plt.close()
|
| 60 |
+
logger.info(f"Confusion matrix saved -> {cm_path}")
|
| 61 |
+
|
| 62 |
+
# Use sklearn accuracy (more reliable than model.evaluate on generators)
|
| 63 |
+
test_acc = float((y_pred == y_true).mean())
|
| 64 |
+
logger.info(f"{model_name} - Test accuracy: {test_acc:.4f}")
|
| 65 |
+
|
| 66 |
+
return {
|
| 67 |
+
"test_accuracy" : test_acc,
|
| 68 |
+
"y_true" : y_true.tolist(),
|
| 69 |
+
"y_pred" : y_pred.tolist(),
|
| 70 |
+
}
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def evaluate_all_models(models_dict: dict, test_data, class_names: list,
|
| 74 |
+
save_dir: str = "./logs"):
|
| 75 |
+
results = {}
|
| 76 |
+
for name, model in models_dict.items():
|
| 77 |
+
safe_name = name.lower().replace(" ", "_")
|
| 78 |
+
results[name] = evaluate_model(
|
| 79 |
+
model, test_data, class_names,
|
| 80 |
+
model_name=safe_name, save_dir=save_dir,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
logger.info("\n" + "=" * 45)
|
| 84 |
+
logger.info(f"{'Model':<25} {'Test Acc':>10}")
|
| 85 |
+
logger.info("=" * 45)
|
| 86 |
+
for name, r in results.items():
|
| 87 |
+
logger.info(f"{name:<25} {r['test_accuracy']:>10.4f}")
|
| 88 |
+
logger.info("=" * 45)
|
| 89 |
+
return results
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
# ---------------------------------------------------------------------------
|
| 93 |
+
# Main
|
| 94 |
+
# ---------------------------------------------------------------------------
|
| 95 |
+
|
| 96 |
+
if __name__ == "__main__":
|
| 97 |
+
cfg = load_config("config.yaml")
|
| 98 |
+
_, _, test_data = get_data_generators(cfg)
|
| 99 |
+
|
| 100 |
+
class_names = cfg["data"]["classes"]
|
| 101 |
+
save_dir = cfg["models"]["save_dir"]
|
| 102 |
+
logs_dir = cfg["models"]["logs_dir"]
|
| 103 |
+
os.makedirs(logs_dir, exist_ok=True)
|
| 104 |
+
|
| 105 |
+
# Load best saved model
|
| 106 |
+
best_model_path = os.path.join(save_dir, "ft_best.h5")
|
| 107 |
+
if not os.path.exists(best_model_path):
|
| 108 |
+
logger.error(f"Model not found at {best_model_path}. Run train.py first.")
|
| 109 |
+
exit(1)
|
| 110 |
+
|
| 111 |
+
logger.info(f"Loading model from {best_model_path}")
|
| 112 |
+
|
| 113 |
+
# compile=False avoids optimizer state errors when loading
|
| 114 |
+
# MobileNetV2 or EfficientNetB0 models in TF 2.10
|
| 115 |
+
model = tf.keras.models.load_model(best_model_path, compile=False)
|
| 116 |
+
model.compile(
|
| 117 |
+
optimizer = "adam",
|
| 118 |
+
loss = "categorical_crossentropy",
|
| 119 |
+
metrics = ["accuracy"],
|
| 120 |
+
)
|
| 121 |
+
|
| 122 |
+
results = evaluate_model(
|
| 123 |
+
model, test_data, class_names,
|
| 124 |
+
model_name="best_model", save_dir=logs_dir,
|
| 125 |
+
)
|
| 126 |
+
logger.info("Evaluation complete.")
|
logs/api.log
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-04-01 00:27:54 | INFO | api | Loading models ...
|
| 2 |
+
2026-04-01 00:27:54 | ERROR | api | Failed to load TF model: No file or directory found at ./saved_models\best_brain_tumor_model.h5
|
| 3 |
+
2026-04-01 00:27:54 | INFO | api | onnx_fp32 loaded.
|
| 4 |
+
2026-04-01 00:27:55 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 5 |
+
2026-04-01 00:27:55 | INFO | api | onnx_static loaded.
|
| 6 |
+
2026-04-01 00:27:55 | INFO | api | Loaded backends: ['onnx_fp32', 'onnx_static']
|
| 7 |
+
2026-04-01 00:37:17 | INFO | api | Loading models ...
|
| 8 |
+
2026-04-01 00:37:17 | ERROR | api | Failed to load TF model: No file or directory found at ./saved_models\best_brain_tumor_model.h5
|
| 9 |
+
2026-04-01 00:37:17 | INFO | api | onnx_fp32 loaded.
|
| 10 |
+
2026-04-01 00:37:17 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 11 |
+
2026-04-01 00:37:17 | INFO | api | onnx_static loaded.
|
| 12 |
+
2026-04-01 00:37:17 | INFO | api | Loaded backends: ['onnx_fp32', 'onnx_static']
|
| 13 |
+
2026-04-01 00:42:17 | INFO | api | Loading models ...
|
| 14 |
+
2026-04-01 00:42:19 | INFO | api | TF model loaded.
|
| 15 |
+
2026-04-01 00:42:19 | INFO | api | onnx_fp32 loaded.
|
| 16 |
+
2026-04-01 00:42:19 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 17 |
+
2026-04-01 00:42:19 | INFO | api | onnx_static loaded.
|
| 18 |
+
2026-04-01 00:42:19 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 19 |
+
2026-04-01 00:50:24 | INFO | api | Loading models ...
|
| 20 |
+
2026-04-01 00:50:26 | INFO | api | TF model loaded.
|
| 21 |
+
2026-04-01 00:50:26 | INFO | api | onnx_fp32 loaded.
|
| 22 |
+
2026-04-01 00:50:26 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 23 |
+
2026-04-01 00:50:26 | INFO | api | onnx_static loaded.
|
| 24 |
+
2026-04-01 00:50:26 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 25 |
+
2026-04-01 00:53:52 | INFO | api | Loading models ...
|
| 26 |
+
2026-04-01 00:53:53 | INFO | api | TF model loaded.
|
| 27 |
+
2026-04-01 00:53:53 | INFO | api | onnx_fp32 loaded.
|
| 28 |
+
2026-04-01 00:53:53 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 29 |
+
2026-04-01 00:53:53 | INFO | api | onnx_static loaded.
|
| 30 |
+
2026-04-01 00:53:53 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 31 |
+
2026-04-01 00:54:31 | INFO | api | Loading models ...
|
| 32 |
+
2026-04-01 00:54:32 | INFO | api | TF model loaded.
|
| 33 |
+
2026-04-01 00:54:32 | INFO | api | onnx_fp32 loaded.
|
| 34 |
+
2026-04-01 00:54:32 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 35 |
+
2026-04-01 00:54:32 | INFO | api | onnx_static loaded.
|
| 36 |
+
2026-04-01 00:54:32 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 37 |
+
2026-04-01 00:56:42 | INFO | api | Loading models ...
|
| 38 |
+
2026-04-01 00:56:44 | INFO | api | TF model loaded.
|
| 39 |
+
2026-04-01 00:56:44 | INFO | api | onnx_fp32 loaded.
|
| 40 |
+
2026-04-01 00:56:44 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 41 |
+
2026-04-01 00:56:44 | INFO | api | onnx_static loaded.
|
| 42 |
+
2026-04-01 00:56:44 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 43 |
+
2026-04-01 00:59:49 | INFO | api | Loading models ...
|
| 44 |
+
2026-04-01 00:59:51 | INFO | api | TF model loaded.
|
| 45 |
+
2026-04-01 00:59:51 | INFO | api | onnx_fp32 loaded.
|
| 46 |
+
2026-04-01 00:59:51 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 47 |
+
2026-04-01 00:59:51 | INFO | api | onnx_static loaded.
|
| 48 |
+
2026-04-01 00:59:51 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 49 |
+
2026-04-01 01:00:17 | INFO | api | Loading models ...
|
| 50 |
+
2026-04-01 01:00:18 | INFO | api | TF model loaded.
|
| 51 |
+
2026-04-01 01:00:18 | INFO | api | onnx_fp32 loaded.
|
| 52 |
+
2026-04-01 01:00:18 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 53 |
+
2026-04-01 01:00:18 | INFO | api | onnx_static loaded.
|
| 54 |
+
2026-04-01 01:00:18 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 55 |
+
2026-04-01 01:01:53 | INFO | api | Loading models ...
|
| 56 |
+
2026-04-01 01:01:54 | INFO | api | TF model loaded.
|
| 57 |
+
2026-04-01 01:01:54 | INFO | api | onnx_fp32 loaded.
|
| 58 |
+
2026-04-01 01:01:55 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 59 |
+
2026-04-01 01:01:55 | INFO | api | onnx_static loaded.
|
| 60 |
+
2026-04-01 01:01:55 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 61 |
+
2026-04-01 11:18:47 | INFO | api | [OK] tensorflow loaded.
|
| 62 |
+
2026-04-01 11:18:47 | INFO | api | [OK] onnx_fp32 loaded.
|
| 63 |
+
2026-04-01 11:18:47 | ERROR | api | [FAIL] onnx_dynamic failed to load: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 64 |
+
2026-04-01 11:18:47 | INFO | api | [OK] onnx_static loaded.
|
| 65 |
+
2026-04-01 11:18:47 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 66 |
+
2026-04-01 11:18:47 | WARNING | api | Failed backends : ['onnx_dynamic']
|
| 67 |
+
2026-04-01 11:18:47 | WARNING | api | onnx_dynamic: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 68 |
+
2026-04-01 11:19:01 | INFO | api | [OK] tensorflow loaded.
|
| 69 |
+
2026-04-01 11:19:01 | INFO | api | [OK] onnx_fp32 loaded.
|
| 70 |
+
2026-04-01 11:19:01 | ERROR | api | [FAIL] onnx_dynamic failed to load: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 71 |
+
2026-04-01 11:19:01 | INFO | api | [OK] onnx_static loaded.
|
| 72 |
+
2026-04-01 11:19:01 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 73 |
+
2026-04-01 11:19:01 | WARNING | api | Failed backends : ['onnx_dynamic']
|
| 74 |
+
2026-04-01 11:19:01 | WARNING | api | onnx_dynamic: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 75 |
+
2026-04-01 11:20:15 | INFO | api | [OK] tensorflow loaded.
|
| 76 |
+
2026-04-01 11:20:15 | INFO | api | [OK] onnx_fp32 loaded.
|
| 77 |
+
2026-04-01 11:20:15 | ERROR | api | [FAIL] onnx_dynamic failed to load: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 78 |
+
2026-04-01 11:20:15 | INFO | api | [OK] onnx_static loaded.
|
| 79 |
+
2026-04-01 11:20:15 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 80 |
+
2026-04-01 11:20:15 | WARNING | api | Failed backends : ['onnx_dynamic']
|
| 81 |
+
2026-04-01 11:20:15 | WARNING | api | onnx_dynamic: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 82 |
+
2026-04-01 11:20:41 | INFO | api | Loading models ...
|
| 83 |
+
2026-04-01 11:20:43 | INFO | api | TF model loaded.
|
| 84 |
+
2026-04-01 11:20:43 | INFO | api | onnx_fp32 loaded.
|
| 85 |
+
2026-04-01 11:20:43 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 86 |
+
2026-04-01 11:20:43 | INFO | api | onnx_static loaded.
|
| 87 |
+
2026-04-01 11:20:43 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 88 |
+
2026-04-01 11:20:59 | INFO | api | Loading models ...
|
| 89 |
+
2026-04-01 11:21:01 | INFO | api | TF model loaded.
|
| 90 |
+
2026-04-01 11:21:01 | INFO | api | onnx_fp32 loaded.
|
| 91 |
+
2026-04-01 11:21:01 | WARNING | api | onnx_dynamic not available: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 92 |
+
2026-04-01 11:21:01 | INFO | api | onnx_static loaded.
|
| 93 |
+
2026-04-01 11:21:01 | INFO | api | Loaded backends: ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 94 |
+
2026-04-01 11:28:21 | INFO | api | Loading models ...
|
| 95 |
+
2026-04-01 11:28:22 | INFO | api | [OK] TF model loaded.
|
| 96 |
+
2026-04-01 11:28:22 | INFO | api | [OK] onnx_fp32 loaded.
|
| 97 |
+
2026-04-01 11:28:23 | ERROR | api | [FAIL] onnx_dynamic failed: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 98 |
+
2026-04-01 11:28:23 | INFO | api | [OK] onnx_static loaded.
|
| 99 |
+
2026-04-01 11:28:23 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 100 |
+
2026-04-01 11:28:23 | WARNING | api | Failed backends : ['onnx_dynamic']
|
| 101 |
+
2026-04-01 11:28:29 | INFO | api | Loading models ...
|
| 102 |
+
2026-04-01 11:28:31 | INFO | api | [OK] TF model loaded.
|
| 103 |
+
2026-04-01 11:28:31 | INFO | api | [OK] onnx_fp32 loaded.
|
| 104 |
+
2026-04-01 11:28:31 | ERROR | api | [FAIL] onnx_dynamic failed: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 105 |
+
2026-04-01 11:28:31 | INFO | api | [OK] onnx_static loaded.
|
| 106 |
+
2026-04-01 11:28:31 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 107 |
+
2026-04-01 11:28:31 | WARNING | api | Failed backends : ['onnx_dynamic']
|
| 108 |
+
2026-04-01 11:28:38 | INFO | api | Loading models ...
|
| 109 |
+
2026-04-01 11:28:40 | INFO | api | [OK] TF model loaded.
|
| 110 |
+
2026-04-01 11:28:40 | INFO | api | [OK] onnx_fp32 loaded.
|
| 111 |
+
2026-04-01 11:28:40 | ERROR | api | [FAIL] onnx_dynamic failed: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 112 |
+
2026-04-01 11:28:40 | INFO | api | [OK] onnx_static loaded.
|
| 113 |
+
2026-04-01 11:28:40 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 114 |
+
2026-04-01 11:28:40 | WARNING | api | Failed backends : ['onnx_dynamic']
|
| 115 |
+
2026-04-01 11:46:37 | INFO | api | Loading models ...
|
| 116 |
+
2026-04-01 11:46:39 | INFO | api | [OK] TF model loaded.
|
| 117 |
+
2026-04-01 11:46:39 | INFO | api | [OK] onnx_fp32 loaded.
|
| 118 |
+
2026-04-01 11:46:39 | ERROR | api | [FAIL] onnx_dynamic failed: ONNX Dynamic INT8 model is not supported in this ONNX Runtime build: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 119 |
+
2026-04-01 11:46:39 | INFO | api | [OK] onnx_static loaded.
|
| 120 |
+
2026-04-01 11:46:39 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 121 |
+
2026-04-01 11:46:39 | WARNING | api | Failed backends : ['onnx_dynamic']
|
| 122 |
+
2026-04-01 11:51:09 | INFO | api | Loading models ...
|
| 123 |
+
2026-04-01 11:51:10 | INFO | api | [OK] TF model loaded.
|
| 124 |
+
2026-04-01 11:51:10 | INFO | api | [OK] onnx_fp32 loaded.
|
| 125 |
+
2026-04-01 11:51:10 | ERROR | api | [FAIL] onnx_dynamic failed: ONNX Dynamic INT8 model is not supported in this ONNX Runtime build: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 126 |
+
2026-04-01 11:51:10 | INFO | api | [OK] onnx_static loaded.
|
| 127 |
+
2026-04-01 11:51:10 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 128 |
+
2026-04-01 11:51:10 | WARNING | api | Failed backends : ['onnx_dynamic']
|
| 129 |
+
2026-04-01 12:06:18 | INFO | api | Loading models ...
|
| 130 |
+
2026-04-01 12:06:20 | INFO | api | [OK] TF model loaded.
|
| 131 |
+
2026-04-01 12:06:20 | INFO | api | [OK] onnx_fp32 loaded.
|
| 132 |
+
2026-04-01 12:06:20 | ERROR | api | [FAIL] onnx_dynamic failed: ONNX Dynamic INT8 model is not supported in this ONNX Runtime build: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 133 |
+
2026-04-01 12:06:20 | INFO | api | [OK] onnx_static loaded.
|
| 134 |
+
2026-04-01 12:06:20 | INFO | api | Loaded backends : ['tensorflow', 'onnx_fp32', 'onnx_static']
|
| 135 |
+
2026-04-01 12:06:20 | WARNING | api | Failed backends : ['onnx_dynamic']
|
logs/best_model_confusion_matrix.png
ADDED
|
logs/best_model_report.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Classification Report � best_model
|
| 2 |
+
============================================================
|
| 3 |
+
precision recall f1-score support
|
| 4 |
+
|
| 5 |
+
glioma 0.9444 0.7225 0.8187 400
|
| 6 |
+
meningioma 0.8414 0.7825 0.8109 400
|
| 7 |
+
notumor 0.9045 0.9950 0.9476 400
|
| 8 |
+
pituitary 0.8237 0.9925 0.9002 400
|
| 9 |
+
|
| 10 |
+
accuracy 0.8731 1600
|
| 11 |
+
macro avg 0.8785 0.8731 0.8694 1600
|
| 12 |
+
weighted avg 0.8785 0.8731 0.8694 1600
|
logs/data_loader.log
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-03-31 12:45:57 | INFO | data_loader | Train samples : 4480
|
| 2 |
+
2026-03-31 12:45:57 | INFO | data_loader | Val samples : 1120
|
| 3 |
+
2026-03-31 12:45:57 | INFO | data_loader | Test samples : 1600
|
| 4 |
+
2026-03-31 12:45:57 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 5 |
+
2026-03-31 15:59:57 | INFO | data_loader | Train samples : 4480
|
| 6 |
+
2026-03-31 15:59:57 | INFO | data_loader | Val samples : 1120
|
| 7 |
+
2026-03-31 15:59:57 | INFO | data_loader | Test samples : 1600
|
| 8 |
+
2026-03-31 15:59:57 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 9 |
+
2026-03-31 16:43:07 | INFO | data_loader | Train samples : 4480
|
| 10 |
+
2026-03-31 16:43:07 | INFO | data_loader | Val samples : 1120
|
| 11 |
+
2026-03-31 16:43:07 | INFO | data_loader | Test samples : 1600
|
| 12 |
+
2026-03-31 16:43:07 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 13 |
+
2026-03-31 17:36:33 | INFO | data_loader | Train samples : 4480
|
| 14 |
+
2026-03-31 17:36:33 | INFO | data_loader | Val samples : 1120
|
| 15 |
+
2026-03-31 17:36:33 | INFO | data_loader | Test samples : 1600
|
| 16 |
+
2026-03-31 17:36:33 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 17 |
+
2026-03-31 20:07:21 | INFO | data_loader | Train samples : 4480
|
| 18 |
+
2026-03-31 20:07:21 | INFO | data_loader | Val samples : 1120
|
| 19 |
+
2026-03-31 20:07:21 | INFO | data_loader | Test samples : 1600
|
| 20 |
+
2026-03-31 20:07:21 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 21 |
+
2026-03-31 20:34:10 | INFO | data_loader | Train samples : 4480
|
| 22 |
+
2026-03-31 20:34:10 | INFO | data_loader | Val samples : 1120
|
| 23 |
+
2026-03-31 20:34:10 | INFO | data_loader | Test samples : 1600
|
| 24 |
+
2026-03-31 20:34:10 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 25 |
+
2026-03-31 21:12:50 | INFO | data_loader | Train samples : 4480
|
| 26 |
+
2026-03-31 21:12:50 | INFO | data_loader | Val samples : 1120
|
| 27 |
+
2026-03-31 21:12:50 | INFO | data_loader | Test samples : 1600
|
| 28 |
+
2026-03-31 21:12:50 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 29 |
+
2026-03-31 23:02:54 | INFO | data_loader | Train samples : 4480
|
| 30 |
+
2026-03-31 23:02:54 | INFO | data_loader | Val samples : 1120
|
| 31 |
+
2026-03-31 23:02:54 | INFO | data_loader | Test samples : 1600
|
| 32 |
+
2026-03-31 23:02:54 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 33 |
+
2026-03-31 23:05:49 | INFO | data_loader | Train samples : 4480
|
| 34 |
+
2026-03-31 23:05:49 | INFO | data_loader | Val samples : 1120
|
| 35 |
+
2026-03-31 23:05:49 | INFO | data_loader | Test samples : 1600
|
| 36 |
+
2026-03-31 23:05:49 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 37 |
+
2026-03-31 23:12:49 | INFO | data_loader | Train samples : 4480
|
| 38 |
+
2026-03-31 23:12:49 | INFO | data_loader | Val samples : 1120
|
| 39 |
+
2026-03-31 23:12:49 | INFO | data_loader | Test samples : 1600
|
| 40 |
+
2026-03-31 23:12:49 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 41 |
+
2026-03-31 23:13:36 | INFO | data_loader | Train samples : 4480
|
| 42 |
+
2026-03-31 23:13:36 | INFO | data_loader | Val samples : 1120
|
| 43 |
+
2026-03-31 23:13:36 | INFO | data_loader | Test samples : 1600
|
| 44 |
+
2026-03-31 23:13:36 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 45 |
+
2026-03-31 23:47:29 | INFO | data_loader | Train samples : 4480
|
| 46 |
+
2026-03-31 23:47:29 | INFO | data_loader | Val samples : 1120
|
| 47 |
+
2026-03-31 23:47:29 | INFO | data_loader | Test samples : 1600
|
| 48 |
+
2026-03-31 23:47:29 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 49 |
+
2026-03-31 23:48:32 | INFO | data_loader | Train samples : 4480
|
| 50 |
+
2026-03-31 23:48:32 | INFO | data_loader | Val samples : 1120
|
| 51 |
+
2026-03-31 23:48:32 | INFO | data_loader | Test samples : 1600
|
| 52 |
+
2026-03-31 23:48:32 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 53 |
+
2026-03-31 23:48:45 | INFO | data_loader | Train samples : 4480
|
| 54 |
+
2026-03-31 23:48:45 | INFO | data_loader | Val samples : 1120
|
| 55 |
+
2026-03-31 23:48:45 | INFO | data_loader | Test samples : 1600
|
| 56 |
+
2026-03-31 23:48:45 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 57 |
+
2026-03-31 23:51:06 | INFO | data_loader | Train samples : 4480
|
| 58 |
+
2026-03-31 23:51:06 | INFO | data_loader | Val samples : 1120
|
| 59 |
+
2026-03-31 23:51:06 | INFO | data_loader | Test samples : 1600
|
| 60 |
+
2026-03-31 23:51:06 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 61 |
+
2026-04-01 00:13:41 | INFO | data_loader | Train samples : 4480
|
| 62 |
+
2026-04-01 00:13:41 | INFO | data_loader | Val samples : 1120
|
| 63 |
+
2026-04-01 00:13:41 | INFO | data_loader | Test samples : 1600
|
| 64 |
+
2026-04-01 00:13:41 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 65 |
+
2026-04-01 00:14:18 | INFO | data_loader | Train samples : 4480
|
| 66 |
+
2026-04-01 00:14:18 | INFO | data_loader | Val samples : 1120
|
| 67 |
+
2026-04-01 00:14:18 | INFO | data_loader | Test samples : 1600
|
| 68 |
+
2026-04-01 00:14:18 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 69 |
+
2026-04-01 00:20:40 | INFO | data_loader | Train samples : 4480
|
| 70 |
+
2026-04-01 00:20:40 | INFO | data_loader | Val samples : 1120
|
| 71 |
+
2026-04-01 00:20:40 | INFO | data_loader | Test samples : 1600
|
| 72 |
+
2026-04-01 00:20:40 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 73 |
+
2026-04-01 00:21:49 | INFO | data_loader | Train samples : 4480
|
| 74 |
+
2026-04-01 00:21:49 | INFO | data_loader | Val samples : 1120
|
| 75 |
+
2026-04-01 00:21:49 | INFO | data_loader | Test samples : 1600
|
| 76 |
+
2026-04-01 00:21:49 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 77 |
+
2026-04-01 11:32:15 | INFO | data_loader | Train samples : 4480
|
| 78 |
+
2026-04-01 11:32:15 | INFO | data_loader | Val samples : 1120
|
| 79 |
+
2026-04-01 11:32:15 | INFO | data_loader | Test samples : 1600
|
| 80 |
+
2026-04-01 11:32:15 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 81 |
+
2026-04-01 11:35:30 | INFO | data_loader | Train samples : 4480
|
| 82 |
+
2026-04-01 11:35:30 | INFO | data_loader | Val samples : 1120
|
| 83 |
+
2026-04-01 11:35:30 | INFO | data_loader | Test samples : 1600
|
| 84 |
+
2026-04-01 11:35:30 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 85 |
+
2026-04-01 11:45:39 | INFO | data_loader | Train samples : 4480
|
| 86 |
+
2026-04-01 11:45:39 | INFO | data_loader | Val samples : 1120
|
| 87 |
+
2026-04-01 11:45:39 | INFO | data_loader | Test samples : 1600
|
| 88 |
+
2026-04-01 11:45:39 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
| 89 |
+
2026-04-01 11:50:04 | INFO | data_loader | Train samples : 4480
|
| 90 |
+
2026-04-01 11:50:04 | INFO | data_loader | Val samples : 1120
|
| 91 |
+
2026-04-01 11:50:04 | INFO | data_loader | Test samples : 1600
|
| 92 |
+
2026-04-01 11:50:04 | INFO | data_loader | Classes : {'glioma': 0, 'meningioma': 1, 'notumor': 2, 'pituitary': 3}
|
logs/evaluate.log
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-03-31 23:02:54 | ERROR | evaluate | Model not found at ./saved_models\best_brain_tumor_model.h5. Run train.py first.
|
| 2 |
+
2026-03-31 23:05:49 | INFO | evaluate | Loading model from ./saved_models\ft_best.h5
|
| 3 |
+
2026-03-31 23:05:51 | INFO | evaluate | Evaluating best_model on test set ...
|
| 4 |
+
2026-03-31 23:06:06 | INFO | evaluate |
|
| 5 |
+
Classification Report — best_model:
|
| 6 |
+
precision recall f1-score support
|
| 7 |
+
|
| 8 |
+
glioma 0.2484 0.1900 0.2153 400
|
| 9 |
+
meningioma 0.2554 0.2375 0.2461 400
|
| 10 |
+
notumor 0.2568 0.2825 0.2690 400
|
| 11 |
+
pituitary 0.2759 0.3325 0.3016 400
|
| 12 |
+
|
| 13 |
+
accuracy 0.2606 1600
|
| 14 |
+
macro avg 0.2591 0.2606 0.2580 1600
|
| 15 |
+
weighted avg 0.2591 0.2606 0.2580 1600
|
| 16 |
+
|
| 17 |
+
2026-03-31 23:06:06 | INFO | evaluate | Report saved → ./logs\best_model_report.txt
|
| 18 |
+
2026-03-31 23:06:53 | INFO | evaluate | Confusion matrix saved → ./logs\best_model_confusion_matrix.png
|
| 19 |
+
2026-03-31 23:06:56 | INFO | evaluate | best_model — Test accuracy: 0.8731 | Test loss: 0.5124
|
| 20 |
+
2026-03-31 23:06:56 | INFO | evaluate | Evaluation complete.
|
| 21 |
+
2026-03-31 23:12:49 | INFO | evaluate | Loading model from ./saved_models\ft_best.h5
|
| 22 |
+
2026-03-31 23:12:51 | INFO | evaluate | Evaluating best_model on test set ...
|
| 23 |
+
2026-03-31 23:12:57 | INFO | evaluate |
|
| 24 |
+
Classification Report — best_model:
|
| 25 |
+
precision recall f1-score support
|
| 26 |
+
|
| 27 |
+
glioma 0.2320 0.1775 0.2011 400
|
| 28 |
+
meningioma 0.2500 0.2325 0.2409 400
|
| 29 |
+
notumor 0.2409 0.2650 0.2524 400
|
| 30 |
+
pituitary 0.2676 0.3225 0.2925 400
|
| 31 |
+
|
| 32 |
+
accuracy 0.2494 1600
|
| 33 |
+
macro avg 0.2476 0.2494 0.2467 1600
|
| 34 |
+
weighted avg 0.2476 0.2494 0.2467 1600
|
| 35 |
+
|
| 36 |
+
2026-03-31 23:12:57 | INFO | evaluate | Report saved → ./logs\best_model_report.txt
|
| 37 |
+
2026-03-31 23:13:00 | INFO | evaluate | Confusion matrix saved → ./logs\best_model_confusion_matrix.png
|
| 38 |
+
2026-03-31 23:13:03 | INFO | evaluate | best_model — Test accuracy: 0.8731 | Test loss: 0.5124
|
| 39 |
+
2026-03-31 23:13:03 | INFO | evaluate | Evaluation complete.
|
| 40 |
+
2026-03-31 23:13:36 | INFO | evaluate | Loading model from ./saved_models\ft_best.h5
|
| 41 |
+
2026-03-31 23:13:38 | INFO | evaluate | Evaluating best_model on test set ...
|
| 42 |
+
2026-03-31 23:13:44 | INFO | evaluate |
|
| 43 |
+
Classification Report — best_model:
|
| 44 |
+
precision recall f1-score support
|
| 45 |
+
|
| 46 |
+
glioma 0.9444 0.7225 0.8187 400
|
| 47 |
+
meningioma 0.8414 0.7825 0.8109 400
|
| 48 |
+
notumor 0.9045 0.9950 0.9476 400
|
| 49 |
+
pituitary 0.8237 0.9925 0.9002 400
|
| 50 |
+
|
| 51 |
+
accuracy 0.8731 1600
|
| 52 |
+
macro avg 0.8785 0.8731 0.8694 1600
|
| 53 |
+
weighted avg 0.8785 0.8731 0.8694 1600
|
| 54 |
+
|
| 55 |
+
2026-03-31 23:13:44 | INFO | evaluate | Report saved → ./logs\best_model_report.txt
|
| 56 |
+
2026-03-31 23:13:46 | INFO | evaluate | Confusion matrix saved → ./logs\best_model_confusion_matrix.png
|
| 57 |
+
2026-03-31 23:13:49 | INFO | evaluate | best_model — Test accuracy: 0.8731 | Test loss: 0.5124
|
| 58 |
+
2026-03-31 23:13:49 | INFO | evaluate | Evaluation complete.
|
logs/export_onnx.log
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-03-31 23:47:29 | INFO | export_onnx | Loading model from ./saved_models\best_brain_tumor_model.h5
|
| 2 |
+
2026-03-31 23:48:32 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 3 |
+
2026-03-31 23:48:45 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 4 |
+
2026-03-31 23:51:06 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 5 |
+
2026-04-01 00:13:41 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 6 |
+
2026-04-01 00:13:43 | INFO | export_onnx | Exporting model to ONNX → ./onnx_models\model_fp32.onnx
|
| 7 |
+
2026-04-01 00:13:48 | INFO | export_onnx | ONNX export successful — size: 9.14 MB
|
| 8 |
+
2026-04-01 00:13:48 | WARNING | export_onnx | Dynamic quantization skipped: No module named 'onnx.reference'
|
| 9 |
+
2026-04-01 00:13:48 | WARNING | export_onnx | Static quantization skipped: No module named 'onnx.reference'
|
| 10 |
+
2026-04-01 00:13:48 | INFO | export_onnx |
|
| 11 |
+
Benchmarking model formats ...
|
| 12 |
+
2026-04-01 00:14:18 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 13 |
+
2026-04-01 00:14:19 | INFO | export_onnx | Exporting model to ONNX → ./onnx_models\model_fp32.onnx
|
| 14 |
+
2026-04-01 00:14:25 | INFO | export_onnx | ONNX export successful — size: 9.14 MB
|
| 15 |
+
2026-04-01 00:14:25 | WARNING | export_onnx | Dynamic quantization skipped: No module named 'onnx.reference'
|
| 16 |
+
2026-04-01 00:14:25 | WARNING | export_onnx | Static quantization skipped: No module named 'onnx.reference'
|
| 17 |
+
2026-04-01 00:14:25 | INFO | export_onnx |
|
| 18 |
+
Benchmarking model formats ...
|
| 19 |
+
2026-04-01 00:14:28 | INFO | export_onnx | TensorFlow (FP32) | acc=0.7000 | 10.53 ms/sample
|
| 20 |
+
2026-04-01 00:14:28 | INFO | export_onnx | ONNX FP32 | acc=0.7000 | 3.02 ms/sample | 9.14 MB
|
| 21 |
+
2026-04-01 00:14:28 | WARNING | export_onnx | Skipping ONNX Dynamic INT8 — path is None
|
| 22 |
+
2026-04-01 00:14:28 | WARNING | export_onnx | Skipping ONNX Static INT8 — path is None
|
| 23 |
+
2026-04-01 00:14:29 | INFO | export_onnx | Benchmark results saved → ./onnx_models\benchmark_results.json
|
| 24 |
+
2026-04-01 00:14:29 | WARNING | export_onnx | ONNX export complete. Quantization skipped: No module named 'onnx.reference'
|
| 25 |
+
2026-04-01 00:20:40 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 26 |
+
2026-04-01 00:20:42 | INFO | export_onnx | Exporting model to ONNX → ./onnx_models\model_fp32.onnx
|
| 27 |
+
2026-04-01 00:20:48 | INFO | export_onnx | ONNX export successful — size: 9.10 MB
|
| 28 |
+
2026-04-01 00:20:48 | INFO | export_onnx | Applying Dynamic Quantization → ./onnx_models\model_dynamic_int8.onnx
|
| 29 |
+
2026-04-01 00:21:49 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 30 |
+
2026-04-01 00:21:51 | INFO | export_onnx | Exporting model to ONNX → ./onnx_models\model_fp32.onnx
|
| 31 |
+
2026-04-01 00:21:57 | INFO | export_onnx | ONNX export successful — size: 9.10 MB
|
| 32 |
+
2026-04-01 00:21:57 | INFO | export_onnx | Applying Dynamic Quantization → ./onnx_models\model_dynamic_int8.onnx
|
| 33 |
+
2026-04-01 00:21:57 | INFO | export_onnx | Dynamic quantized model — size: 2.50 MB
|
| 34 |
+
2026-04-01 00:21:57 | INFO | export_onnx | Applying Static Quantization → ./onnx_models\model_static_int8.onnx
|
| 35 |
+
2026-04-01 00:21:59 | INFO | export_onnx | Calibration reader: 10 batches loaded
|
| 36 |
+
2026-04-01 00:22:02 | INFO | export_onnx | Static quantized model — size: 2.51 MB
|
| 37 |
+
2026-04-01 00:22:02 | INFO | export_onnx |
|
| 38 |
+
Benchmarking model formats ...
|
| 39 |
+
2026-04-01 00:22:04 | INFO | export_onnx | TensorFlow (FP32) | acc=0.7000 | 9.38 ms/sample
|
| 40 |
+
2026-04-01 00:22:05 | INFO | export_onnx | ONNX FP32 | acc=0.7000 | 5.42 ms/sample | 9.10 MB
|
| 41 |
+
2026-04-01 11:32:15 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 42 |
+
2026-04-01 11:32:17 | INFO | export_onnx | Exporting model to ONNX → ./onnx_models\model_fp32.onnx
|
| 43 |
+
2026-04-01 11:32:23 | INFO | export_onnx | ONNX export successful — size: 9.10 MB
|
| 44 |
+
2026-04-01 11:32:23 | INFO | export_onnx | Applying Dynamic Quantization -> ./onnx_models\model_dynamic_int8.onnx
|
| 45 |
+
2026-04-01 11:35:30 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 46 |
+
2026-04-01 11:35:32 | INFO | export_onnx | Exporting model to ONNX → ./onnx_models\model_fp32.onnx
|
| 47 |
+
2026-04-01 11:35:37 | INFO | export_onnx | ONNX export successful — size: 9.10 MB
|
| 48 |
+
2026-04-01 11:35:37 | INFO | export_onnx | Applying Dynamic Quantization → ./onnx_models\model_dynamic_int8.onnx
|
| 49 |
+
2026-04-01 11:35:38 | INFO | export_onnx | Dynamic quantized model — size: 2.50 MB
|
| 50 |
+
2026-04-01 11:35:38 | INFO | export_onnx | Applying Static Quantization → ./onnx_models\model_static_int8.onnx
|
| 51 |
+
2026-04-01 11:35:40 | INFO | export_onnx | Calibration reader: 10 batches loaded
|
| 52 |
+
2026-04-01 11:35:42 | INFO | export_onnx | Static quantized model — size: 2.51 MB
|
| 53 |
+
2026-04-01 11:35:42 | INFO | export_onnx |
|
| 54 |
+
Benchmarking model formats ...
|
| 55 |
+
2026-04-01 11:35:46 | INFO | export_onnx | TensorFlow (FP32) | acc=0.7000 | 9.62 ms/sample
|
| 56 |
+
2026-04-01 11:35:47 | INFO | export_onnx | ONNX FP32 | acc=0.7000 | 5.07 ms/sample | 9.10 MB
|
| 57 |
+
2026-04-01 11:45:39 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 58 |
+
2026-04-01 11:45:40 | INFO | export_onnx | Exporting model to ONNX → ./onnx_models\model_fp32.onnx
|
| 59 |
+
2026-04-01 11:45:46 | INFO | export_onnx | ONNX export successful — size: 9.10 MB
|
| 60 |
+
2026-04-01 11:45:46 | INFO | export_onnx | Applying Dynamic Quantization → ./onnx_models\model_dynamic_int8.onnx
|
| 61 |
+
2026-04-01 11:45:47 | INFO | export_onnx | Dynamic quantized model — size: 2.50 MB
|
| 62 |
+
2026-04-01 11:45:47 | INFO | export_onnx | Applying Static Quantization → ./onnx_models\model_static_int8.onnx
|
| 63 |
+
2026-04-01 11:45:48 | INFO | export_onnx | Calibration reader: 10 batches loaded
|
| 64 |
+
2026-04-01 11:45:51 | INFO | export_onnx | Static quantized model — size: 2.51 MB
|
| 65 |
+
2026-04-01 11:45:51 | INFO | export_onnx |
|
| 66 |
+
Benchmarking model formats ...
|
| 67 |
+
2026-04-01 11:45:53 | INFO | export_onnx | TensorFlow (FP32) | acc=0.7000 | 9.55 ms/sample
|
| 68 |
+
2026-04-01 11:45:54 | INFO | export_onnx | ONNX FP32 | acc=0.7000 | 5.14 ms/sample | 9.10 MB
|
| 69 |
+
2026-04-01 11:45:54 | WARNING | export_onnx | Skipping ONNX Dynamic INT8 due to runtime error: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 70 |
+
2026-04-01 11:45:55 | INFO | export_onnx | ONNX Static INT8 | acc=0.0050 | 4.53 ms/sample | 2.51 MB
|
| 71 |
+
2026-04-01 11:45:55 | INFO | export_onnx | Benchmark results saved → ./onnx_models\benchmark_results.json
|
| 72 |
+
2026-04-01 11:45:55 | INFO | export_onnx | ONNX export complete. Quantization attempted.
|
| 73 |
+
2026-04-01 11:50:04 | INFO | export_onnx | Loading model from ./saved_models\ft_best.h5
|
| 74 |
+
2026-04-01 11:50:06 | INFO | export_onnx | Exporting model to ONNX → ./onnx_models\model_fp32.onnx
|
| 75 |
+
2026-04-01 11:50:11 | INFO | export_onnx | ONNX export successful — size: 9.10 MB
|
| 76 |
+
2026-04-01 11:50:11 | INFO | export_onnx | Applying Dynamic Quantization → ./onnx_models\model_dynamic_int8.onnx
|
| 77 |
+
2026-04-01 11:50:12 | INFO | export_onnx | Dynamic quantized model — size: 2.50 MB
|
| 78 |
+
2026-04-01 11:50:12 | INFO | export_onnx | Applying Static Quantization → ./onnx_models\model_static_int8.onnx
|
| 79 |
+
2026-04-01 11:50:19 | INFO | export_onnx | Calibration reader: 50 batches loaded
|
| 80 |
+
2026-04-01 11:50:30 | INFO | export_onnx | Static quantized model — size: 2.51 MB
|
| 81 |
+
2026-04-01 11:50:30 | INFO | export_onnx |
|
| 82 |
+
Benchmarking model formats ...
|
| 83 |
+
2026-04-01 11:50:32 | INFO | export_onnx | TensorFlow (FP32) | acc=0.7000 | 9.15 ms/sample
|
| 84 |
+
2026-04-01 11:50:33 | INFO | export_onnx | ONNX FP32 | acc=0.7000 | 5.29 ms/sample | 9.10 MB
|
| 85 |
+
2026-04-01 11:50:33 | WARNING | export_onnx | Skipping ONNX Dynamic INT8 due to runtime error: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for ConvInteger(10) node with name 'mobilenet_transfer/mobilenetv2_1.00_224/Conv1/Conv2D_quant'
|
| 86 |
+
2026-04-01 11:50:34 | INFO | export_onnx | ONNX Static INT8 | acc=0.0050 | 4.19 ms/sample | 2.51 MB
|
| 87 |
+
2026-04-01 11:50:34 | INFO | export_onnx | Benchmark results saved → ./onnx_models\benchmark_results.json
|
| 88 |
+
2026-04-01 11:50:34 | INFO | export_onnx | ONNX export complete. Quantization attempted.
|
logs/model_comparison.png
ADDED
|
logs/models.log
ADDED
|
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-03-31 12:46:00 | INFO | models | Built Baseline CNN
|
| 2 |
+
2026-03-31 12:57:32 | INFO | models | Built MobileNetV2 Transfer Learning model
|
| 3 |
+
2026-03-31 13:08:18 | INFO | models | Fine-tuned MobileNetV2: last 20 layers unfrozen
|
| 4 |
+
2026-03-31 13:13:19 | INFO | models | Built EfficientNetB0 Transfer Learning model
|
| 5 |
+
2026-03-31 15:59:58 | INFO | models | Built Baseline CNN
|
| 6 |
+
2026-03-31 16:11:02 | INFO | models | Built MobileNetV2 Transfer Learning model
|
| 7 |
+
2026-03-31 16:22:01 | INFO | models | Fine-tuned MobileNetV2: last 20 layers unfrozen
|
| 8 |
+
2026-03-31 16:27:01 | INFO | models | Built EfficientNetB0 Transfer Learning model
|
| 9 |
+
2026-03-31 16:43:08 | INFO | models | Built Baseline CNN
|
| 10 |
+
2026-03-31 16:53:39 | INFO | models | Built MobileNetV2 Transfer Learning model
|
| 11 |
+
2026-03-31 17:04:25 | INFO | models | Fine-tuned MobileNetV2: last 20 layers unfrozen
|
| 12 |
+
2026-03-31 17:09:23 | INFO | models | Built EfficientNetB0 Transfer Learning model
|
| 13 |
+
2026-03-31 17:36:34 | INFO | models | Built Baseline CNN
|
| 14 |
+
2026-03-31 17:47:07 | INFO | models | Built MobileNetV2 Transfer Learning model
|
| 15 |
+
2026-03-31 17:54:34 | INFO | models | Fine-tuned MobileNetV2: last 20 layers unfrozen
|
| 16 |
+
2026-03-31 18:03:37 | INFO | models | Built EfficientNetB0 Transfer Learning model
|
| 17 |
+
2026-03-31 18:06:52 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 64, 'filters_3': 256, 'dense_units': 128, 'dropout': 0.3528318576498196, 'lr': 0.0023163425192755226}
|
| 18 |
+
2026-03-31 18:09:47 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 128, 'dense_units': 256, 'dropout': 0.298909022597601, 'lr': 0.0012839511259207857}
|
| 19 |
+
2026-03-31 18:13:54 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 64, 'filters_3': 256, 'dense_units': 128, 'dropout': 0.29252720375744357, 'lr': 0.0001280007427686007}
|
| 20 |
+
2026-03-31 18:19:00 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 128, 'dense_units': 256, 'dropout': 0.4273282260839636, 'lr': 0.0011210111966337138}
|
| 21 |
+
2026-03-31 18:23:41 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 128, 'filters_3': 128, 'dense_units': 64, 'dropout': 0.22784649015605415, 'lr': 0.005766474167607917}
|
| 22 |
+
2026-03-31 18:25:45 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 256, 'dense_units': 64, 'dropout': 0.48880635124006017, 'lr': 0.0032352191388469636}
|
| 23 |
+
2026-03-31 18:27:52 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 128, 'dense_units': 64, 'dropout': 0.41843200212884657, 'lr': 0.00017082690675758604}
|
| 24 |
+
2026-03-31 18:33:06 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 64, 'filters_3': 256, 'dense_units': 64, 'dropout': 0.42332478729875544, 'lr': 0.004321482824942734}
|
| 25 |
+
2026-03-31 18:35:11 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 256, 'dense_units': 128, 'dropout': 0.35511716436262036, 'lr': 0.006308240405890077}
|
| 26 |
+
2026-03-31 18:37:52 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 128, 'dense_units': 256, 'dropout': 0.41542144036132356, 'lr': 0.00011919711923710529}
|
| 27 |
+
2026-03-31 18:43:25 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 128, 'dense_units': 256, 'dropout': 0.41542144036132356, 'lr': 0.00011919711923710529}
|
| 28 |
+
2026-03-31 20:07:22 | INFO | models | Built Baseline CNN
|
| 29 |
+
2026-03-31 20:18:34 | INFO | models | Built MobileNetV2 Transfer Learning model
|
| 30 |
+
2026-03-31 20:30:54 | INFO | models | Fine-tuned MobileNetV2: last 20 layers unfrozen
|
| 31 |
+
2026-03-31 20:34:10 | INFO | models | Built Baseline CNN
|
| 32 |
+
2026-03-31 20:46:13 | INFO | models | Built MobileNetV2 Transfer Learning model
|
| 33 |
+
2026-03-31 20:58:01 | INFO | models | Fine-tuned MobileNetV2: last 20 layers unfrozen
|
| 34 |
+
2026-03-31 21:12:51 | INFO | models | Built Baseline CNN
|
| 35 |
+
2026-03-31 21:24:33 | INFO | models | Built MobileNetV2 Transfer Learning model
|
| 36 |
+
2026-03-31 21:33:02 | INFO | models | Fine-tuned MobileNetV2: last 20 layers unfrozen
|
| 37 |
+
2026-03-31 21:44:07 | INFO | models | Built EfficientNetB0 Transfer Learning model
|
| 38 |
+
2026-03-31 21:54:35 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 128, 'filters_3': 128, 'dense_units': 64, 'dropout': 0.32019749083776056, 'lr': 0.00013123523142015807}
|
| 39 |
+
2026-03-31 21:59:44 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 128, 'filters_3': 128, 'dense_units': 64, 'dropout': 0.3608500202287982, 'lr': 0.006160897969691244}
|
| 40 |
+
2026-03-31 22:02:36 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 64, 'filters_3': 256, 'dense_units': 64, 'dropout': 0.36105752942364244, 'lr': 0.0049551985659201875}
|
| 41 |
+
2026-03-31 22:05:22 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 64, 'filters_3': 256, 'dense_units': 128, 'dropout': 0.23703993355843925, 'lr': 0.00038430793245279804}
|
| 42 |
+
2026-03-31 22:10:37 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 64, 'filters_3': 256, 'dense_units': 256, 'dropout': 0.4927392453904861, 'lr': 0.00046537997987104555}
|
| 43 |
+
2026-03-31 22:15:47 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 128, 'filters_3': 128, 'dense_units': 128, 'dropout': 0.29917774216119697, 'lr': 0.004167362541330431}
|
| 44 |
+
2026-03-31 22:18:57 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 128, 'dense_units': 128, 'dropout': 0.21910790415316955, 'lr': 0.001097875265927963}
|
| 45 |
+
2026-03-31 22:24:20 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 256, 'dense_units': 128, 'dropout': 0.2258892046075821, 'lr': 0.00022392987135907492}
|
| 46 |
+
2026-03-31 22:29:43 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 128, 'filters_3': 256, 'dense_units': 64, 'dropout': 0.26014293287733325, 'lr': 0.0028061362157403144}
|
| 47 |
+
2026-03-31 22:33:27 | INFO | models | Built Optuna CNN with params: {'filters_1': 32, 'filters_2': 128, 'filters_3': 128, 'dense_units': 256, 'dropout': 0.34722069690590884, 'lr': 0.0025020125205854928}
|
| 48 |
+
2026-03-31 22:39:09 | INFO | models | Built Optuna CNN with params: {'filters_1': 64, 'filters_2': 128, 'filters_3': 256, 'dense_units': 128, 'dropout': 0.2258892046075821, 'lr': 0.00022392987135907492}
|
logs/predict.log
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-04-01 00:27:54 | INFO | predict | Loading TF model from ./saved_models\best_brain_tumor_model.h5
|
| 2 |
+
2026-04-01 00:27:54 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 3 |
+
2026-04-01 00:27:54 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 4 |
+
2026-04-01 00:27:55 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 5 |
+
2026-04-01 00:37:17 | INFO | predict | Loading TF model from ./saved_models\best_brain_tumor_model.h5
|
| 6 |
+
2026-04-01 00:37:17 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 7 |
+
2026-04-01 00:37:17 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 8 |
+
2026-04-01 00:37:17 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 9 |
+
2026-04-01 00:42:17 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 10 |
+
2026-04-01 00:42:19 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 11 |
+
2026-04-01 00:42:19 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 12 |
+
2026-04-01 00:42:19 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 13 |
+
2026-04-01 00:50:24 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 14 |
+
2026-04-01 00:50:26 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 15 |
+
2026-04-01 00:50:26 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 16 |
+
2026-04-01 00:50:26 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 17 |
+
2026-04-01 00:53:52 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 18 |
+
2026-04-01 00:53:53 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 19 |
+
2026-04-01 00:53:53 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 20 |
+
2026-04-01 00:53:53 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 21 |
+
2026-04-01 00:54:31 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 22 |
+
2026-04-01 00:54:32 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 23 |
+
2026-04-01 00:54:32 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 24 |
+
2026-04-01 00:54:32 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 25 |
+
2026-04-01 00:56:42 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 26 |
+
2026-04-01 00:56:44 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 27 |
+
2026-04-01 00:56:44 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 28 |
+
2026-04-01 00:56:44 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 29 |
+
2026-04-01 00:59:49 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 30 |
+
2026-04-01 00:59:51 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 31 |
+
2026-04-01 00:59:51 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 32 |
+
2026-04-01 00:59:51 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 33 |
+
2026-04-01 01:00:17 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 34 |
+
2026-04-01 01:00:18 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 35 |
+
2026-04-01 01:00:18 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 36 |
+
2026-04-01 01:00:18 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 37 |
+
2026-04-01 01:01:53 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 38 |
+
2026-04-01 01:01:54 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 39 |
+
2026-04-01 01:01:54 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 40 |
+
2026-04-01 01:01:55 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 41 |
+
2026-04-01 11:18:46 | INFO | predict | best_brain_tumor_model.h5 not found, using ft_best.h5
|
| 42 |
+
2026-04-01 11:18:46 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 43 |
+
2026-04-01 11:18:47 | INFO | predict | Loading onnx_fp32 from ./onnx_models\model_fp32.onnx
|
| 44 |
+
2026-04-01 11:18:47 | INFO | predict | onnx_fp32 loaded successfully.
|
| 45 |
+
2026-04-01 11:18:47 | INFO | predict | Loading onnx_dynamic from ./onnx_models\model_dynamic_int8.onnx
|
| 46 |
+
2026-04-01 11:18:47 | INFO | predict | Loading onnx_static from ./onnx_models\model_static_int8.onnx
|
| 47 |
+
2026-04-01 11:18:47 | INFO | predict | onnx_static loaded successfully.
|
| 48 |
+
2026-04-01 11:18:59 | INFO | predict | best_brain_tumor_model.h5 not found, using ft_best.h5
|
| 49 |
+
2026-04-01 11:18:59 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 50 |
+
2026-04-01 11:19:01 | INFO | predict | Loading onnx_fp32 from ./onnx_models\model_fp32.onnx
|
| 51 |
+
2026-04-01 11:19:01 | INFO | predict | onnx_fp32 loaded successfully.
|
| 52 |
+
2026-04-01 11:19:01 | INFO | predict | Loading onnx_dynamic from ./onnx_models\model_dynamic_int8.onnx
|
| 53 |
+
2026-04-01 11:19:01 | INFO | predict | Loading onnx_static from ./onnx_models\model_static_int8.onnx
|
| 54 |
+
2026-04-01 11:19:01 | INFO | predict | onnx_static loaded successfully.
|
| 55 |
+
2026-04-01 11:20:13 | INFO | predict | best_brain_tumor_model.h5 not found, using ft_best.h5
|
| 56 |
+
2026-04-01 11:20:13 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 57 |
+
2026-04-01 11:20:15 | INFO | predict | Loading onnx_fp32 from ./onnx_models\model_fp32.onnx
|
| 58 |
+
2026-04-01 11:20:15 | INFO | predict | onnx_fp32 loaded successfully.
|
| 59 |
+
2026-04-01 11:20:15 | INFO | predict | Loading onnx_dynamic from ./onnx_models\model_dynamic_int8.onnx
|
| 60 |
+
2026-04-01 11:20:15 | INFO | predict | Loading onnx_static from ./onnx_models\model_static_int8.onnx
|
| 61 |
+
2026-04-01 11:20:15 | INFO | predict | onnx_static loaded successfully.
|
| 62 |
+
2026-04-01 11:20:41 | INFO | predict | best_brain_tumor_model.h5 not found, using ft_best.h5
|
| 63 |
+
2026-04-01 11:20:41 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 64 |
+
2026-04-01 11:20:43 | INFO | predict | Loading onnx_fp32 from ./onnx_models\model_fp32.onnx
|
| 65 |
+
2026-04-01 11:20:43 | INFO | predict | onnx_fp32 loaded successfully.
|
| 66 |
+
2026-04-01 11:20:43 | INFO | predict | Loading onnx_dynamic from ./onnx_models\model_dynamic_int8.onnx
|
| 67 |
+
2026-04-01 11:20:43 | INFO | predict | Loading onnx_static from ./onnx_models\model_static_int8.onnx
|
| 68 |
+
2026-04-01 11:20:43 | INFO | predict | onnx_static loaded successfully.
|
| 69 |
+
2026-04-01 11:20:59 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 70 |
+
2026-04-01 11:21:01 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 71 |
+
2026-04-01 11:21:01 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 72 |
+
2026-04-01 11:21:01 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 73 |
+
2026-04-01 11:28:21 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 74 |
+
2026-04-01 11:28:22 | INFO | predict | Loading onnx_fp32 from ./onnx_models\model_fp32.onnx
|
| 75 |
+
2026-04-01 11:28:22 | INFO | predict | Loading onnx_dynamic from ./onnx_models\model_dynamic_int8.onnx
|
| 76 |
+
2026-04-01 11:28:23 | INFO | predict | Loading onnx_static from ./onnx_models\model_static_int8.onnx
|
| 77 |
+
2026-04-01 11:28:29 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 78 |
+
2026-04-01 11:28:31 | INFO | predict | Loading onnx_fp32 from ./onnx_models\model_fp32.onnx
|
| 79 |
+
2026-04-01 11:28:31 | INFO | predict | Loading onnx_dynamic from ./onnx_models\model_dynamic_int8.onnx
|
| 80 |
+
2026-04-01 11:28:31 | INFO | predict | Loading onnx_static from ./onnx_models\model_static_int8.onnx
|
| 81 |
+
2026-04-01 11:28:38 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 82 |
+
2026-04-01 11:28:40 | INFO | predict | Loading onnx_fp32 from ./onnx_models\model_fp32.onnx
|
| 83 |
+
2026-04-01 11:28:40 | INFO | predict | Loading onnx_dynamic from ./onnx_models\model_dynamic_int8.onnx
|
| 84 |
+
2026-04-01 11:28:40 | INFO | predict | Loading onnx_static from ./onnx_models\model_static_int8.onnx
|
| 85 |
+
2026-04-01 11:46:37 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 86 |
+
2026-04-01 11:46:39 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 87 |
+
2026-04-01 11:46:39 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 88 |
+
2026-04-01 11:46:39 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 89 |
+
2026-04-01 11:51:09 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 90 |
+
2026-04-01 11:51:10 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 91 |
+
2026-04-01 11:51:10 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 92 |
+
2026-04-01 11:51:10 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
| 93 |
+
2026-04-01 12:06:18 | INFO | predict | Loading TF model from ./saved_models\ft_best.h5
|
| 94 |
+
2026-04-01 12:06:20 | INFO | predict | Loading ONNX FP32 from ./onnx_models\model_fp32.onnx
|
| 95 |
+
2026-04-01 12:06:20 | INFO | predict | Loading ONNX Dynamic INT8 from ./onnx_models\model_dynamic_int8.onnx
|
| 96 |
+
2026-04-01 12:06:20 | INFO | predict | Loading ONNX Static INT8 from ./onnx_models\model_static_int8.onnx
|
logs/train.log
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
2026-03-31 12:45:57 | INFO | train | ==================================================
|
| 2 |
+
2026-03-31 12:45:57 | INFO | train | Starting training pipeline
|
| 3 |
+
2026-03-31 12:45:57 | INFO | train | ==================================================
|
| 4 |
+
2026-03-31 12:57:30 | INFO | train | Baseline CNN training complete.
|
| 5 |
+
2026-03-31 13:08:17 | INFO | train | Transfer Learning training complete.
|
| 6 |
+
2026-03-31 13:13:09 | INFO | train | Fine-Tuned training complete.
|
| 7 |
+
2026-03-31 15:59:57 | INFO | train | ==================================================
|
| 8 |
+
2026-03-31 15:59:57 | INFO | train | Starting full training pipeline
|
| 9 |
+
2026-03-31 15:59:57 | INFO | train | ==================================================
|
| 10 |
+
2026-03-31 16:11:00 | INFO | train | Baseline CNN training complete.
|
| 11 |
+
2026-03-31 16:22:00 | INFO | train | Transfer Learning training complete.
|
| 12 |
+
2026-03-31 16:26:58 | INFO | train | Fine-Tuned training complete.
|
| 13 |
+
2026-03-31 16:43:07 | INFO | train | ==================================================
|
| 14 |
+
2026-03-31 16:43:07 | INFO | train | Starting full training pipeline
|
| 15 |
+
2026-03-31 16:43:07 | INFO | train | ==================================================
|
| 16 |
+
2026-03-31 16:53:37 | INFO | train | Baseline CNN training complete.
|
| 17 |
+
2026-03-31 17:04:24 | INFO | train | Transfer Learning training complete.
|
| 18 |
+
2026-03-31 17:09:20 | INFO | train | Fine-Tuned training complete.
|
| 19 |
+
2026-03-31 17:09:57 | INFO | train | EfficientNet weights saved (val_acc=0.2500)
|
| 20 |
+
2026-03-31 17:12:19 | INFO | train | Best EfficientNet weights restored.
|
| 21 |
+
2026-03-31 17:36:33 | INFO | train | ==================================================
|
| 22 |
+
2026-03-31 17:36:33 | INFO | train | Starting full training pipeline
|
| 23 |
+
2026-03-31 17:36:33 | INFO | train | ==================================================
|
| 24 |
+
2026-03-31 17:47:05 | INFO | train | Baseline CNN training complete.
|
| 25 |
+
2026-03-31 17:54:34 | INFO | train | Transfer Learning training complete.
|
| 26 |
+
2026-03-31 18:03:34 | INFO | train | Fine-Tuned training complete.
|
| 27 |
+
2026-03-31 18:04:11 | INFO | train | EfficientNet weights saved (val_acc=0.2500)
|
| 28 |
+
2026-03-31 18:06:31 | INFO | train | Best EfficientNet weights restored.
|
| 29 |
+
2026-03-31 18:06:49 | INFO | train | EfficientNetB0 training complete.
|
| 30 |
+
2026-03-31 18:43:24 | INFO | train | Optuna best val_accuracy : 0.8446
|
| 31 |
+
2026-03-31 18:43:24 | INFO | train | Optuna best params : {'filters_1': 64, 'filters_2': 128, 'filters_3': 128, 'dense_units': 256, 'dropout': 0.41542144036132356, 'lr': 0.00011919711923710529}
|
| 32 |
+
2026-03-31 18:56:54 | INFO | train | Optuna Best CNN training complete.
|
| 33 |
+
2026-03-31 18:56:54 | INFO | train |
|
| 34 |
+
=============================================
|
| 35 |
+
2026-03-31 18:56:54 | INFO | train | Model Val Acc
|
| 36 |
+
2026-03-31 18:56:54 | INFO | train | =============================================
|
| 37 |
+
2026-03-31 18:56:54 | INFO | train | Baseline CNN 0.9045
|
| 38 |
+
2026-03-31 18:56:54 | INFO | train | Transfer Learning 0.9250
|
| 39 |
+
2026-03-31 18:56:54 | INFO | train | EfficientNetB0 0.2500
|
| 40 |
+
2026-03-31 18:56:54 | INFO | train | Optuna Best CNN 0.9089
|
| 41 |
+
2026-03-31 18:56:54 | INFO | train | =============================================
|
| 42 |
+
2026-03-31 19:32:15 | INFO | train | Training pipeline complete.
|
| 43 |
+
2026-03-31 20:07:21 | INFO | train | MLflow → DagsHub: https://dagshub.com/shoaib.shahriar01/brain-tumor-classification.mlflow
|
| 44 |
+
2026-03-31 20:07:21 | INFO | train | ==================================================
|
| 45 |
+
2026-03-31 20:07:21 | INFO | train | Starting full training pipeline
|
| 46 |
+
2026-03-31 20:07:21 | INFO | train | ==================================================
|
| 47 |
+
2026-03-31 20:18:32 | INFO | train | Baseline CNN training complete.
|
| 48 |
+
2026-03-31 20:30:53 | INFO | train | Transfer Learning training complete.
|
| 49 |
+
2026-03-31 20:34:09 | INFO | train | MLflow → DagsHub: https://dagshub.com/shoaib.shahriar01/brain-tumor-classification.mlflow
|
| 50 |
+
2026-03-31 20:34:10 | INFO | train | ==================================================
|
| 51 |
+
2026-03-31 20:34:10 | INFO | train | Starting full training pipeline
|
| 52 |
+
2026-03-31 20:34:10 | INFO | train | ==================================================
|
| 53 |
+
2026-03-31 20:46:11 | INFO | train | Baseline CNN training complete.
|
| 54 |
+
2026-03-31 20:58:00 | INFO | train | Transfer Learning training complete.
|
| 55 |
+
2026-03-31 21:04:20 | INFO | train | Fine-Tuned training complete.
|
| 56 |
+
2026-03-31 21:12:50 | INFO | train | MLflow → DagsHub: https://dagshub.com/shoaib.shahriar01/brain-tumor-classification.mlflow
|
| 57 |
+
2026-03-31 21:12:50 | INFO | train | ==================================================
|
| 58 |
+
2026-03-31 21:12:50 | INFO | train | Starting full training pipeline
|
| 59 |
+
2026-03-31 21:12:50 | INFO | train | ==================================================
|
| 60 |
+
2026-03-31 21:24:31 | INFO | train | Baseline CNN training complete.
|
| 61 |
+
2026-03-31 21:33:01 | INFO | train | Transfer Learning training complete.
|
| 62 |
+
2026-03-31 21:44:03 | INFO | train | Fine-Tuned training complete.
|
| 63 |
+
2026-03-31 21:44:42 | INFO | train | EfficientNet weights saved (val_acc=0.8643)
|
| 64 |
+
2026-03-31 21:45:11 | INFO | train | EfficientNet weights saved (val_acc=0.8991)
|
| 65 |
+
2026-03-31 21:46:35 | INFO | train | EfficientNet weights saved (val_acc=0.9107)
|
| 66 |
+
2026-03-31 21:48:03 | INFO | train | EfficientNet weights saved (val_acc=0.9152)
|
| 67 |
+
2026-03-31 21:49:39 | INFO | train | EfficientNet weights saved (val_acc=0.9295)
|
| 68 |
+
2026-03-31 21:51:05 | INFO | train | EfficientNet weights saved (val_acc=0.9357)
|
| 69 |
+
2026-03-31 21:52:02 | INFO | train | EfficientNet weights saved (val_acc=0.9393)
|
| 70 |
+
2026-03-31 21:54:11 | INFO | train | Best EfficientNet weights restored.
|
| 71 |
+
2026-03-31 21:54:32 | INFO | train | EfficientNet weights logged to MLflow → ./saved_models\effnet_final_weights.h5
|
| 72 |
+
2026-03-31 21:54:32 | INFO | train | EfficientNetB0 training complete.
|
| 73 |
+
2026-03-31 22:39:09 | INFO | train | Optuna best val_accuracy : 0.8348
|
| 74 |
+
2026-03-31 22:39:09 | INFO | train | Optuna best params : {'filters_1': 64, 'filters_2': 128, 'filters_3': 256, 'dense_units': 128, 'dropout': 0.2258892046075821, 'lr': 0.00022392987135907492}
|
| 75 |
+
2026-03-31 22:53:50 | INFO | train | Optuna Best CNN training complete.
|
| 76 |
+
2026-03-31 22:53:50 | INFO | train |
|
| 77 |
+
=============================================
|
| 78 |
+
2026-03-31 22:53:50 | INFO | train | Model Val Acc
|
| 79 |
+
2026-03-31 22:53:50 | INFO | train | =============================================
|
| 80 |
+
2026-03-31 22:53:50 | INFO | train | Baseline CNN 0.9009
|
| 81 |
+
2026-03-31 22:53:50 | INFO | train | Transfer Learning 0.9214
|
| 82 |
+
2026-03-31 22:53:50 | INFO | train | Fine-Tuned 0.9366
|
| 83 |
+
2026-03-31 22:53:50 | INFO | train | EfficientNetB0 0.9393 << best
|
| 84 |
+
2026-03-31 22:53:50 | INFO | train | Optuna Best CNN 0.9054
|
| 85 |
+
2026-03-31 22:53:50 | INFO | train | =============================================
|
| 86 |
+
2026-03-31 22:54:07 | INFO | train | Training pipeline complete.
|
onnx_models/benchmark_results.json
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"TensorFlow (FP32)": {
|
| 3 |
+
"latency_ms": 9.146038293838501,
|
| 4 |
+
"accuracy": 0.7,
|
| 5 |
+
"size_mb": null
|
| 6 |
+
},
|
| 7 |
+
"ONNX FP32": {
|
| 8 |
+
"latency_ms": 5.294694900512695,
|
| 9 |
+
"accuracy": 0.7,
|
| 10 |
+
"size_mb": 9.096985816955566
|
| 11 |
+
},
|
| 12 |
+
"ONNX Static INT8": {
|
| 13 |
+
"latency_ms": 4.193357229232788,
|
| 14 |
+
"accuracy": 0.005,
|
| 15 |
+
"size_mb": 2.507014274597168
|
| 16 |
+
}
|
| 17 |
+
}
|
onnx_models/model_dynamic_int8.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5a6349759f4250ba9b0f0930271eb07ed1c66c759bb729260d5e24ee877448bc
|
| 3 |
+
size 2626089
|
onnx_models/model_fp32.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:158b718dd58d4e906d1b72c2d84b8554c22c41530c73273991b541a687590f1f
|
| 3 |
+
size 9538881
|
onnx_models/model_static_int8.onnx
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9a387d14351cb6f199c04aff5f7256a246c6fc7040d5b9b9fafc1309862ee098
|
| 3 |
+
size 2628795
|
predict.py
ADDED
|
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# predict.py — inference engine: TF | ONNX FP32 | Dynamic INT8 | Static INT8
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import numpy as np
|
| 5 |
+
import cv2
|
| 6 |
+
import tensorflow as tf
|
| 7 |
+
import onnxruntime as ort
|
| 8 |
+
from tensorflow.keras.preprocessing.image import load_img
|
| 9 |
+
|
| 10 |
+
from src.utils import get_logger, get_gradcam_heatmap, get_last_conv_layer
|
| 11 |
+
|
| 12 |
+
logger = get_logger("predict")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
class BrainTumorPredictor:
|
| 16 |
+
"""
|
| 17 |
+
Unified predictor supporting TF model, ONNX FP32,
|
| 18 |
+
ONNX Dynamic INT8, and ONNX Static INT8.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
BACKENDS = ["tensorflow", "onnx_fp32", "onnx_dynamic", "onnx_static"]
|
| 22 |
+
|
| 23 |
+
def __init__(self, cfg: dict, backend: str = "tensorflow"):
|
| 24 |
+
if backend not in self.BACKENDS:
|
| 25 |
+
raise ValueError(f"backend must be one of {self.BACKENDS}")
|
| 26 |
+
|
| 27 |
+
self.backend = backend
|
| 28 |
+
self.image_size = tuple(cfg["data"]["image_size"])
|
| 29 |
+
self.class_names = cfg["data"]["classes"]
|
| 30 |
+
self.save_dir = cfg["models"]["save_dir"]
|
| 31 |
+
self.onnx_dir = cfg["models"]["onnx_dir"]
|
| 32 |
+
|
| 33 |
+
self.tf_model = None
|
| 34 |
+
self.ort_session = None
|
| 35 |
+
self._load(backend)
|
| 36 |
+
|
| 37 |
+
def _load(self, backend: str):
|
| 38 |
+
if backend == "tensorflow":
|
| 39 |
+
path = os.path.join(self.save_dir, "ft_best.h5")
|
| 40 |
+
logger.info(f"Loading TF model from {path}")
|
| 41 |
+
self.tf_model = tf.keras.models.load_model(path, compile=False)
|
| 42 |
+
|
| 43 |
+
elif backend == "onnx_fp32":
|
| 44 |
+
path = os.path.join(self.onnx_dir, "model_fp32.onnx")
|
| 45 |
+
logger.info(f"Loading ONNX FP32 from {path}")
|
| 46 |
+
self.ort_session = ort.InferenceSession(path, providers=["CPUExecutionProvider"])
|
| 47 |
+
|
| 48 |
+
elif backend == "onnx_dynamic":
|
| 49 |
+
path = os.path.join(self.onnx_dir, "model_dynamic_int8.onnx")
|
| 50 |
+
logger.info(f"Loading ONNX Dynamic INT8 from {path}")
|
| 51 |
+
try:
|
| 52 |
+
self.ort_session = ort.InferenceSession(path, providers=["CPUExecutionProvider"])
|
| 53 |
+
except Exception as e:
|
| 54 |
+
raise RuntimeError(
|
| 55 |
+
f"ONNX Dynamic INT8 model is not supported in this ONNX Runtime build: {e}"
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
elif backend == "onnx_static":
|
| 59 |
+
path = os.path.join(self.onnx_dir, "model_static_int8.onnx")
|
| 60 |
+
logger.info(f"Loading ONNX Static INT8 from {path}")
|
| 61 |
+
try:
|
| 62 |
+
self.ort_session = ort.InferenceSession(path, providers=["CPUExecutionProvider"])
|
| 63 |
+
except Exception as e:
|
| 64 |
+
raise RuntimeError(
|
| 65 |
+
f"ONNX Static INT8 model is not supported in this ONNX Runtime build: {e}"
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
def preprocess(self, image_path: str) -> tuple:
|
| 69 |
+
img = load_img(image_path, target_size=self.image_size)
|
| 70 |
+
arr = np.array(img) / 255.0
|
| 71 |
+
img_input = np.expand_dims(arr, axis=0).astype(np.float32)
|
| 72 |
+
return img, arr, img_input
|
| 73 |
+
|
| 74 |
+
def predict(self, image_path: str) -> dict:
|
| 75 |
+
_, _, img_input = self.preprocess(image_path)
|
| 76 |
+
|
| 77 |
+
if self.backend == "tensorflow":
|
| 78 |
+
probs = self.tf_model.predict(img_input, verbose=0)[0]
|
| 79 |
+
else:
|
| 80 |
+
inp_name = self.ort_session.get_inputs()[0].name
|
| 81 |
+
out_name = self.ort_session.get_outputs()[0].name
|
| 82 |
+
probs = self.ort_session.run([out_name], {inp_name: img_input})[0][0]
|
| 83 |
+
|
| 84 |
+
pred_idx = int(np.argmax(probs))
|
| 85 |
+
pred_class = self.class_names[pred_idx]
|
| 86 |
+
confidence = float(probs[pred_idx]) * 100
|
| 87 |
+
|
| 88 |
+
all_probs = {cls: float(p) * 100 for cls, p in zip(self.class_names, probs)}
|
| 89 |
+
|
| 90 |
+
return {
|
| 91 |
+
"predicted_class": pred_class,
|
| 92 |
+
"confidence": round(confidence, 2),
|
| 93 |
+
"all_probabilities": all_probs,
|
| 94 |
+
"backend": self.backend,
|
| 95 |
+
}
|
| 96 |
+
|
| 97 |
+
def predict_with_gradcam(self, image_path: str) -> dict:
|
| 98 |
+
if self.backend != "tensorflow":
|
| 99 |
+
raise RuntimeError("Grad-CAM is only supported with tensorflow backend.")
|
| 100 |
+
|
| 101 |
+
result = self.predict(image_path)
|
| 102 |
+
_, arr, img_input = self.preprocess(image_path)
|
| 103 |
+
|
| 104 |
+
last_conv = get_last_conv_layer(self.tf_model)
|
| 105 |
+
heatmap, _ = get_gradcam_heatmap(self.tf_model, img_input, last_conv)
|
| 106 |
+
|
| 107 |
+
heatmap_resized = cv2.resize(heatmap, self.image_size)
|
| 108 |
+
heatmap_colored = cv2.cvtColor(
|
| 109 |
+
cv2.applyColorMap(np.uint8(255 * heatmap_resized), cv2.COLORMAP_JET),
|
| 110 |
+
cv2.COLOR_BGR2RGB
|
| 111 |
+
)
|
| 112 |
+
overlay = cv2.addWeighted(np.uint8(255 * arr), 0.6, heatmap_colored, 0.4, 0)
|
| 113 |
+
|
| 114 |
+
result["gradcam_overlay"] = overlay
|
| 115 |
+
result["heatmap"] = heatmap_resized
|
| 116 |
+
return result
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
if __name__ == "__main__":
|
| 120 |
+
import argparse
|
| 121 |
+
import matplotlib.pyplot as plt
|
| 122 |
+
from src.utils import load_config
|
| 123 |
+
|
| 124 |
+
parser = argparse.ArgumentParser(description="Brain Tumor MRI Predictor")
|
| 125 |
+
parser.add_argument("--image", required=True)
|
| 126 |
+
parser.add_argument("--backend", default="tensorflow", choices=BrainTumorPredictor.BACKENDS)
|
| 127 |
+
parser.add_argument("--gradcam", action="store_true")
|
| 128 |
+
args = parser.parse_args()
|
| 129 |
+
|
| 130 |
+
cfg = load_config("config.yaml")
|
| 131 |
+
predictor = BrainTumorPredictor(cfg, backend=args.backend)
|
| 132 |
+
|
| 133 |
+
if args.gradcam and args.backend == "tensorflow":
|
| 134 |
+
result = predictor.predict_with_gradcam(args.image)
|
| 135 |
+
|
| 136 |
+
fig, axes = plt.subplots(1, 3, figsize=(13, 4))
|
| 137 |
+
img = load_img(args.image, target_size=tuple(cfg["data"]["image_size"]))
|
| 138 |
+
axes[0].imshow(img)
|
| 139 |
+
axes[0].set_title("Input MRI")
|
| 140 |
+
axes[0].axis("off")
|
| 141 |
+
|
| 142 |
+
axes[1].imshow(result["heatmap"], cmap="jet")
|
| 143 |
+
axes[1].set_title("Grad-CAM")
|
| 144 |
+
axes[1].axis("off")
|
| 145 |
+
|
| 146 |
+
axes[2].imshow(result["gradcam_overlay"])
|
| 147 |
+
axes[2].set_title(f"Pred: {result['predicted_class']} ({result['confidence']:.1f}%)")
|
| 148 |
+
axes[2].axis("off")
|
| 149 |
+
|
| 150 |
+
plt.tight_layout()
|
| 151 |
+
plt.show()
|
| 152 |
+
else:
|
| 153 |
+
result = predictor.predict(args.image)
|
| 154 |
+
|
| 155 |
+
print("\n" + "=" * 42)
|
| 156 |
+
print(f" PREDICTION : {result['predicted_class'].upper()}")
|
| 157 |
+
print(f" CONFIDENCE : {result['confidence']:.2f}%")
|
| 158 |
+
print(f" BACKEND : {result['backend']}")
|
| 159 |
+
print("=" * 42)
|
| 160 |
+
print(" All probabilities:")
|
| 161 |
+
for cls, prob in sorted(result["all_probabilities"].items(), key=lambda x: -x[1]):
|
| 162 |
+
bar = "█" * int(prob / 4)
|
| 163 |
+
marker = " ← predicted" if cls == result["predicted_class"] else ""
|
| 164 |
+
print(f" {cls:<15} {prob:5.1f}% {bar}{marker}")
|
requirements.txt
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi==0.115.0
|
| 2 |
+
uvicorn[standard]==0.30.6
|
| 3 |
+
python-multipart==0.0.9
|
| 4 |
+
pydantic==2.8.2
|
| 5 |
+
|
| 6 |
+
tensorflow==2.10.0
|
| 7 |
+
numpy==1.23.5
|
| 8 |
+
protobuf==3.19.6
|
| 9 |
+
|
| 10 |
+
onnxruntime==1.18.1
|
| 11 |
+
|
| 12 |
+
opencv-python-headless==4.7.0.72
|
| 13 |
+
Pillow==10.3.0
|
| 14 |
+
PyYAML==6.0.1
|
| 15 |
+
|
| 16 |
+
matplotlib==3.7.5
|
save_model.py
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# save_model.py — save best model, metadata, and Grad-CAM artifacts
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import json
|
| 5 |
+
import numpy as np
|
| 6 |
+
import tensorflow as tf
|
| 7 |
+
import mlflow
|
| 8 |
+
|
| 9 |
+
from src.utils import get_logger, load_config, generate_gradcam_overlay, get_last_conv_layer
|
| 10 |
+
from src.data_loader import get_data_generators
|
| 11 |
+
|
| 12 |
+
logger = get_logger("save_model")
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def save_best_model(model, model_name: str, results: dict,
|
| 16 |
+
train_data, cfg: dict):
|
| 17 |
+
save_dir = cfg["models"]["save_dir"]
|
| 18 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 19 |
+
image_size = tuple(cfg["data"]["image_size"])
|
| 20 |
+
class_names = cfg["data"]["classes"]
|
| 21 |
+
|
| 22 |
+
# H5 format
|
| 23 |
+
h5_path = os.path.join(save_dir, "best_brain_tumor_model.h5")
|
| 24 |
+
model.save(h5_path)
|
| 25 |
+
logger.info(f"Model saved (H5) → {h5_path}")
|
| 26 |
+
|
| 27 |
+
# SavedModel format
|
| 28 |
+
sm_path = os.path.join(save_dir, "best_brain_tumor_model")
|
| 29 |
+
model.save(sm_path)
|
| 30 |
+
logger.info(f"Model saved (SavedModel) → {sm_path}")
|
| 31 |
+
|
| 32 |
+
# Metadata
|
| 33 |
+
metadata = {
|
| 34 |
+
"best_model" : model_name,
|
| 35 |
+
"class_names" : class_names,
|
| 36 |
+
"class_indices": train_data.class_indices,
|
| 37 |
+
"image_size" : list(image_size),
|
| 38 |
+
"all_results" : {k: float(v) for k, v in results.items()},
|
| 39 |
+
}
|
| 40 |
+
meta_path = os.path.join(save_dir, "model_metadata.json")
|
| 41 |
+
with open(meta_path, "w") as f:
|
| 42 |
+
json.dump(metadata, f, indent=4)
|
| 43 |
+
logger.info(f"Metadata saved → {meta_path}")
|
| 44 |
+
|
| 45 |
+
return h5_path, sm_path, meta_path
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
def log_gradcam_artifacts(model, train_dir: str, class_names: list,
|
| 49 |
+
image_size: tuple, run_id: str, logs_dir: str):
|
| 50 |
+
last_conv = get_last_conv_layer(model)
|
| 51 |
+
logger.info(f"Generating Grad-CAM for all {len(class_names)} classes ...")
|
| 52 |
+
|
| 53 |
+
for class_name in class_names:
|
| 54 |
+
folder = os.path.join(train_dir, class_name)
|
| 55 |
+
sample_img = os.path.join(folder, os.listdir(folder)[0])
|
| 56 |
+
save_path = os.path.join(logs_dir, f"gradcam_{class_name}.png")
|
| 57 |
+
|
| 58 |
+
generate_gradcam_overlay(model, sample_img, last_conv,
|
| 59 |
+
image_size, class_names, save_path=save_path)
|
| 60 |
+
|
| 61 |
+
with mlflow.start_run(run_id=run_id):
|
| 62 |
+
mlflow.log_artifact(save_path, artifact_path=f"gradcam/{class_name}")
|
| 63 |
+
|
| 64 |
+
logger.info(f" {class_name} Grad-CAM logged.")
|
saved_models/ft_best.h5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f32645b86be23839c743381c985e606eaa709a01d4a19dd5aba10dd225eddfb8
|
| 3 |
+
size 21024368
|
src/__init__.py
ADDED
|
File without changes
|
src/data_loader.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# data_loader.py — dataset loading and augmentation
|
| 2 |
+
|
| 3 |
+
from tensorflow.keras.preprocessing.image import ImageDataGenerator
|
| 4 |
+
from src.utils import get_logger
|
| 5 |
+
|
| 6 |
+
logger = get_logger("data_loader")
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def get_data_generators(cfg: dict):
|
| 10 |
+
data_cfg = cfg["data"]
|
| 11 |
+
aug_cfg = cfg["augmentation"]
|
| 12 |
+
|
| 13 |
+
image_size = tuple(data_cfg["image_size"])
|
| 14 |
+
batch_size = data_cfg["batch_size"]
|
| 15 |
+
val_split = data_cfg["validation_split"]
|
| 16 |
+
train_dir = data_cfg["train_dir"]
|
| 17 |
+
test_dir = data_cfg["test_dir"]
|
| 18 |
+
|
| 19 |
+
train_gen = ImageDataGenerator(
|
| 20 |
+
rescale=1./255,
|
| 21 |
+
validation_split=val_split,
|
| 22 |
+
rotation_range=aug_cfg["rotation_range"],
|
| 23 |
+
width_shift_range=aug_cfg["width_shift_range"],
|
| 24 |
+
height_shift_range=aug_cfg["height_shift_range"],
|
| 25 |
+
zoom_range=aug_cfg["zoom_range"],
|
| 26 |
+
horizontal_flip=aug_cfg["horizontal_flip"],
|
| 27 |
+
brightness_range=aug_cfg["brightness_range"],
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
test_gen = ImageDataGenerator(rescale=1./255)
|
| 31 |
+
|
| 32 |
+
train_data = train_gen.flow_from_directory(
|
| 33 |
+
train_dir,
|
| 34 |
+
target_size=image_size,
|
| 35 |
+
batch_size=batch_size,
|
| 36 |
+
class_mode="categorical",
|
| 37 |
+
subset="training",
|
| 38 |
+
seed=cfg["project"]["seed"],
|
| 39 |
+
shuffle=True
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
val_data = train_gen.flow_from_directory(
|
| 43 |
+
train_dir,
|
| 44 |
+
target_size=image_size,
|
| 45 |
+
batch_size=batch_size,
|
| 46 |
+
class_mode="categorical",
|
| 47 |
+
subset="validation",
|
| 48 |
+
seed=cfg["project"]["seed"],
|
| 49 |
+
shuffle=False
|
| 50 |
+
)
|
| 51 |
+
|
| 52 |
+
test_data = test_gen.flow_from_directory(
|
| 53 |
+
test_dir,
|
| 54 |
+
target_size=image_size,
|
| 55 |
+
batch_size=batch_size,
|
| 56 |
+
class_mode="categorical",
|
| 57 |
+
shuffle=False
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
logger.info(f"Train samples : {train_data.samples}")
|
| 61 |
+
logger.info(f"Val samples : {val_data.samples}")
|
| 62 |
+
logger.info(f"Test samples : {test_data.samples}")
|
| 63 |
+
logger.info(f"Classes : {train_data.class_indices}")
|
| 64 |
+
|
| 65 |
+
return train_data, val_data, test_data
|
src/export_onnx.py
ADDED
|
@@ -0,0 +1,282 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# export_onnx.py — ONNX export + optional quantization + benchmark
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
import json
|
| 6 |
+
import numpy as np
|
| 7 |
+
import tensorflow as tf
|
| 8 |
+
import onnx
|
| 9 |
+
import onnxruntime as ort
|
| 10 |
+
|
| 11 |
+
from src.utils import get_logger, load_config
|
| 12 |
+
from data_loader import get_data_generators
|
| 13 |
+
|
| 14 |
+
logger = get_logger("export_onnx")
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
# ---------------------------------------------------------------------------
|
| 18 |
+
# Optional ONNX Runtime quantization import
|
| 19 |
+
# ---------------------------------------------------------------------------
|
| 20 |
+
|
| 21 |
+
QUANTIZATION_AVAILABLE = True
|
| 22 |
+
QUANT_IMPORT_ERROR = None
|
| 23 |
+
|
| 24 |
+
try:
|
| 25 |
+
from onnxruntime.quantization import (
|
| 26 |
+
quantize_dynamic,
|
| 27 |
+
quantize_static,
|
| 28 |
+
QuantType,
|
| 29 |
+
CalibrationDataReader,
|
| 30 |
+
QuantFormat,
|
| 31 |
+
)
|
| 32 |
+
except Exception as e:
|
| 33 |
+
QUANTIZATION_AVAILABLE = False
|
| 34 |
+
QUANT_IMPORT_ERROR = str(e)
|
| 35 |
+
|
| 36 |
+
class CalibrationDataReader:
|
| 37 |
+
"""Fallback placeholder when quantization imports are unavailable."""
|
| 38 |
+
pass
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# ---------------------------------------------------------------------------
|
| 42 |
+
# TF → ONNX export
|
| 43 |
+
# ---------------------------------------------------------------------------
|
| 44 |
+
|
| 45 |
+
def export_to_onnx(model, onnx_path: str, image_size: tuple = (150, 150)):
|
| 46 |
+
import tf2onnx
|
| 47 |
+
import tf2onnx.convert
|
| 48 |
+
|
| 49 |
+
os.makedirs(os.path.dirname(onnx_path), exist_ok=True)
|
| 50 |
+
|
| 51 |
+
input_signature = [
|
| 52 |
+
tf.TensorSpec(
|
| 53 |
+
shape=(None, *image_size, 3),
|
| 54 |
+
dtype=tf.float32,
|
| 55 |
+
name="input",
|
| 56 |
+
)
|
| 57 |
+
]
|
| 58 |
+
|
| 59 |
+
logger.info(f"Exporting model to ONNX → {onnx_path}")
|
| 60 |
+
tf2onnx.convert.from_keras(
|
| 61 |
+
model,
|
| 62 |
+
input_signature=input_signature,
|
| 63 |
+
opset=13,
|
| 64 |
+
output_path=onnx_path,
|
| 65 |
+
)
|
| 66 |
+
|
| 67 |
+
onnx_model = onnx.load(onnx_path)
|
| 68 |
+
onnx.checker.check_model(onnx_model)
|
| 69 |
+
|
| 70 |
+
size_mb = os.path.getsize(onnx_path) / (1024 * 1024)
|
| 71 |
+
logger.info(f"ONNX export successful — size: {size_mb:.2f} MB")
|
| 72 |
+
return onnx_path
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
# ---------------------------------------------------------------------------
|
| 76 |
+
# Dynamic Quantization (optional)
|
| 77 |
+
# ---------------------------------------------------------------------------
|
| 78 |
+
|
| 79 |
+
def dynamic_quantize(onnx_path: str, output_path: str):
|
| 80 |
+
if not QUANTIZATION_AVAILABLE:
|
| 81 |
+
logger.warning(f"Dynamic quantization skipped: {QUANT_IMPORT_ERROR}")
|
| 82 |
+
return None
|
| 83 |
+
|
| 84 |
+
logger.info(f"Applying Dynamic Quantization → {output_path}")
|
| 85 |
+
quantize_dynamic(
|
| 86 |
+
model_input=onnx_path,
|
| 87 |
+
model_output=output_path,
|
| 88 |
+
weight_type=QuantType.QInt8,
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
size_mb = os.path.getsize(output_path) / (1024 * 1024)
|
| 92 |
+
logger.info(f"Dynamic quantized model — size: {size_mb:.2f} MB")
|
| 93 |
+
return output_path
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# ---------------------------------------------------------------------------
|
| 97 |
+
# Static Quantization (optional)
|
| 98 |
+
# ---------------------------------------------------------------------------
|
| 99 |
+
|
| 100 |
+
class MRICalibrationReader(CalibrationDataReader):
|
| 101 |
+
"""Feeds calibration batches to the static quantizer."""
|
| 102 |
+
|
| 103 |
+
def __init__(self, data_generator, n_batches: int = 10):
|
| 104 |
+
self.data = []
|
| 105 |
+
self.index = 0
|
| 106 |
+
|
| 107 |
+
for i, (batch_x, _) in enumerate(data_generator):
|
| 108 |
+
if i >= n_batches:
|
| 109 |
+
break
|
| 110 |
+
self.data.append(batch_x.astype(np.float32))
|
| 111 |
+
|
| 112 |
+
logger.info(f"Calibration reader: {len(self.data)} batches loaded")
|
| 113 |
+
|
| 114 |
+
def get_next(self):
|
| 115 |
+
if self.index >= len(self.data):
|
| 116 |
+
return None
|
| 117 |
+
|
| 118 |
+
batch = {"input": self.data[self.index]}
|
| 119 |
+
self.index += 1
|
| 120 |
+
return batch
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def static_quantize(onnx_path: str, output_path: str, train_data, n_batches: int = 10):
|
| 124 |
+
if not QUANTIZATION_AVAILABLE:
|
| 125 |
+
logger.warning(f"Static quantization skipped: {QUANT_IMPORT_ERROR}")
|
| 126 |
+
return None
|
| 127 |
+
|
| 128 |
+
logger.info(f"Applying Static Quantization → {output_path}")
|
| 129 |
+
reader = MRICalibrationReader(train_data, n_batches=n_batches)
|
| 130 |
+
|
| 131 |
+
quantize_static(
|
| 132 |
+
model_input=onnx_path,
|
| 133 |
+
model_output=output_path,
|
| 134 |
+
calibration_data_reader=reader,
|
| 135 |
+
quant_format=QuantFormat.QDQ,
|
| 136 |
+
activation_type=QuantType.QInt8,
|
| 137 |
+
weight_type=QuantType.QInt8,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
size_mb = os.path.getsize(output_path) / (1024 * 1024)
|
| 141 |
+
logger.info(f"Static quantized model — size: {size_mb:.2f} MB")
|
| 142 |
+
return output_path
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
# ---------------------------------------------------------------------------
|
| 146 |
+
# ONNX Runtime inference helper
|
| 147 |
+
# ---------------------------------------------------------------------------
|
| 148 |
+
|
| 149 |
+
def onnx_predict(onnx_path: str, img_array: np.ndarray) -> np.ndarray:
|
| 150 |
+
sess = ort.InferenceSession(onnx_path, providers=["CPUExecutionProvider"])
|
| 151 |
+
input_name = sess.get_inputs()[0].name
|
| 152 |
+
output_name = sess.get_outputs()[0].name
|
| 153 |
+
return sess.run([output_name], {input_name: img_array.astype(np.float32)})[0]
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# ---------------------------------------------------------------------------
|
| 157 |
+
# Benchmark
|
| 158 |
+
# ---------------------------------------------------------------------------
|
| 159 |
+
|
| 160 |
+
def benchmark_models(tf_model, paths: dict, test_data, n_samples: int = 200) -> dict:
|
| 161 |
+
logger.info("\nBenchmarking model formats ...")
|
| 162 |
+
|
| 163 |
+
X_all, y_all = [], []
|
| 164 |
+
total = 0
|
| 165 |
+
|
| 166 |
+
for batch_x, batch_y in test_data:
|
| 167 |
+
X_all.append(batch_x)
|
| 168 |
+
y_all.append(batch_y)
|
| 169 |
+
total += len(batch_x)
|
| 170 |
+
if total >= n_samples:
|
| 171 |
+
break
|
| 172 |
+
|
| 173 |
+
X = np.concatenate(X_all, axis=0)[:n_samples].astype(np.float32)
|
| 174 |
+
y = np.concatenate(y_all, axis=0)[:n_samples]
|
| 175 |
+
y_true = np.argmax(y, axis=1)
|
| 176 |
+
|
| 177 |
+
results = {}
|
| 178 |
+
|
| 179 |
+
# TensorFlow model
|
| 180 |
+
t0 = time.time()
|
| 181 |
+
preds = tf_model.predict(X, verbose=0)
|
| 182 |
+
tf_ms = (time.time() - t0) * 1000 / len(X)
|
| 183 |
+
tf_acc = (np.argmax(preds, axis=1) == y_true).mean()
|
| 184 |
+
|
| 185 |
+
results["TensorFlow (FP32)"] = {
|
| 186 |
+
"latency_ms": float(tf_ms),
|
| 187 |
+
"accuracy": float(tf_acc),
|
| 188 |
+
"size_mb": None,
|
| 189 |
+
}
|
| 190 |
+
logger.info(f"TensorFlow (FP32) | acc={tf_acc:.4f} | {tf_ms:.2f} ms/sample")
|
| 191 |
+
|
| 192 |
+
# ONNX models
|
| 193 |
+
for name, path in paths.items():
|
| 194 |
+
if path is None:
|
| 195 |
+
logger.warning(f"Skipping {name} — path is None")
|
| 196 |
+
continue
|
| 197 |
+
if not os.path.exists(path):
|
| 198 |
+
logger.warning(f"Skipping {name} — file not found: {path}")
|
| 199 |
+
continue
|
| 200 |
+
|
| 201 |
+
try:
|
| 202 |
+
t0 = time.time()
|
| 203 |
+
preds = onnx_predict(path, X)
|
| 204 |
+
ms = (time.time() - t0) * 1000 / len(X)
|
| 205 |
+
acc = (np.argmax(preds, axis=1) == y_true).mean()
|
| 206 |
+
size = os.path.getsize(path) / (1024 * 1024)
|
| 207 |
+
|
| 208 |
+
results[name] = {
|
| 209 |
+
"latency_ms": float(ms),
|
| 210 |
+
"accuracy": float(acc),
|
| 211 |
+
"size_mb": float(size),
|
| 212 |
+
}
|
| 213 |
+
logger.info(f"{name:<24} | acc={acc:.4f} | {ms:.2f} ms/sample | {size:.2f} MB")
|
| 214 |
+
|
| 215 |
+
except Exception as e:
|
| 216 |
+
logger.warning(f"Skipping {name} due to runtime error: {e}")
|
| 217 |
+
|
| 218 |
+
return results
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
def print_benchmark_table(results: dict):
|
| 222 |
+
print("\n" + "=" * 70)
|
| 223 |
+
print(f"{'Format':<26} {'Accuracy':>10} {'Latency(ms)':>13} {'Size(MB)':>12}")
|
| 224 |
+
print("=" * 70)
|
| 225 |
+
for name, r in results.items():
|
| 226 |
+
size = f"{r['size_mb']:.2f}" if r["size_mb"] is not None else "—"
|
| 227 |
+
print(f"{name:<26} {r['accuracy']:>10.4f} {r['latency_ms']:>13.2f} {size:>12}")
|
| 228 |
+
print("=" * 70)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
# ---------------------------------------------------------------------------
|
| 232 |
+
# Main
|
| 233 |
+
# ---------------------------------------------------------------------------
|
| 234 |
+
|
| 235 |
+
if __name__ == "__main__":
|
| 236 |
+
cfg = load_config("config.yaml")
|
| 237 |
+
image_size = tuple(cfg["data"]["image_size"])
|
| 238 |
+
onnx_dir = cfg["models"]["onnx_dir"]
|
| 239 |
+
save_dir = cfg["models"]["save_dir"]
|
| 240 |
+
|
| 241 |
+
os.makedirs(onnx_dir, exist_ok=True)
|
| 242 |
+
|
| 243 |
+
train_data, val_data, test_data = get_data_generators(cfg)
|
| 244 |
+
|
| 245 |
+
# Load best saved model
|
| 246 |
+
model_path = os.path.join(save_dir, "ft_best.h5")
|
| 247 |
+
logger.info(f"Loading model from {model_path}")
|
| 248 |
+
model = tf.keras.models.load_model(model_path, compile=False)
|
| 249 |
+
|
| 250 |
+
# Output paths
|
| 251 |
+
onnx_fp32_path = os.path.join(onnx_dir, "model_fp32.onnx")
|
| 252 |
+
onnx_dynamic_path = os.path.join(onnx_dir, "model_dynamic_int8.onnx")
|
| 253 |
+
onnx_static_path = os.path.join(onnx_dir, "model_static_int8.onnx")
|
| 254 |
+
|
| 255 |
+
# Export FP32 ONNX
|
| 256 |
+
export_to_onnx(model, onnx_fp32_path, image_size)
|
| 257 |
+
|
| 258 |
+
# Quantization
|
| 259 |
+
dynamic_path = dynamic_quantize(onnx_fp32_path, onnx_dynamic_path)
|
| 260 |
+
static_path = static_quantize(onnx_fp32_path, onnx_static_path, train_data, n_batches=50)
|
| 261 |
+
|
| 262 |
+
# Benchmark available formats
|
| 263 |
+
paths = {
|
| 264 |
+
"ONNX FP32": onnx_fp32_path,
|
| 265 |
+
"ONNX Dynamic INT8": dynamic_path,
|
| 266 |
+
"ONNX Static INT8": static_path,
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
results = benchmark_models(model, paths, test_data)
|
| 270 |
+
print_benchmark_table(results)
|
| 271 |
+
|
| 272 |
+
# Save benchmark results
|
| 273 |
+
bench_path = os.path.join(onnx_dir, "benchmark_results.json")
|
| 274 |
+
with open(bench_path, "w") as f:
|
| 275 |
+
json.dump(results, f, indent=4)
|
| 276 |
+
|
| 277 |
+
logger.info(f"Benchmark results saved → {bench_path}")
|
| 278 |
+
|
| 279 |
+
if QUANTIZATION_AVAILABLE:
|
| 280 |
+
logger.info("ONNX export complete. Quantization attempted.")
|
| 281 |
+
else:
|
| 282 |
+
logger.warning(f"ONNX export complete. Quantization skipped: {QUANT_IMPORT_ERROR}")
|
src/models.py
ADDED
|
@@ -0,0 +1,98 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# models.py — all model architectures
|
| 2 |
+
|
| 3 |
+
from tensorflow.keras import layers, models, optimizers
|
| 4 |
+
from tensorflow.keras.applications import MobileNetV2, EfficientNetB0
|
| 5 |
+
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout, BatchNormalization
|
| 6 |
+
from src.utils import get_logger
|
| 7 |
+
import tensorflow as tf
|
| 8 |
+
logger = get_logger("models")
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def build_baseline_cnn(num_classes: int = 4, image_size: tuple = (150, 150)) -> models.Sequential:
|
| 12 |
+
model = models.Sequential([
|
| 13 |
+
layers.Input(shape=(*image_size, 3)),
|
| 14 |
+
layers.Conv2D(32, (3,3), activation="relu"),
|
| 15 |
+
layers.MaxPooling2D(2,2),
|
| 16 |
+
layers.Conv2D(64, (3,3), activation="relu"),
|
| 17 |
+
layers.MaxPooling2D(2,2),
|
| 18 |
+
layers.Conv2D(128, (3,3), activation="relu"),
|
| 19 |
+
layers.MaxPooling2D(2,2),
|
| 20 |
+
layers.Flatten(),
|
| 21 |
+
layers.Dense(128, activation="relu"),
|
| 22 |
+
layers.Dense(num_classes, activation="softmax"),
|
| 23 |
+
], name="baseline_cnn")
|
| 24 |
+
logger.info("Built Baseline CNN")
|
| 25 |
+
return model
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def build_mobilenet_tl(num_classes: int = 4, image_size: tuple = (150, 150)) -> models.Sequential:
|
| 29 |
+
base = MobileNetV2(weights="imagenet", include_top=False, input_shape=(*image_size, 3))
|
| 30 |
+
base.trainable = False
|
| 31 |
+
model = models.Sequential([
|
| 32 |
+
base,
|
| 33 |
+
GlobalAveragePooling2D(),
|
| 34 |
+
Dense(128, activation="relu"),
|
| 35 |
+
Dense(num_classes, activation="softmax"),
|
| 36 |
+
], name="mobilenet_transfer")
|
| 37 |
+
logger.info("Built MobileNetV2 Transfer Learning model")
|
| 38 |
+
return model
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def build_mobilenet_finetuned(base_model, unfreeze_last: int = 20) -> models.Sequential:
|
| 42 |
+
base_model.trainable = True
|
| 43 |
+
for layer in base_model.layers[:-unfreeze_last]:
|
| 44 |
+
layer.trainable = False
|
| 45 |
+
logger.info(f"Fine-tuned MobileNetV2: last {unfreeze_last} layers unfrozen")
|
| 46 |
+
return base_model
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def build_efficientnet(num_classes: int = 4, image_size: tuple = (150, 150)) -> models.Model:
|
| 50 |
+
inputs = tf.keras.Input(shape=(*image_size, 3))
|
| 51 |
+
x = tf.keras.layers.Rescaling(scale=255.0)(inputs) # if generator gives [0,1]
|
| 52 |
+
|
| 53 |
+
base = EfficientNetB0(
|
| 54 |
+
weights="imagenet",
|
| 55 |
+
include_top=False,
|
| 56 |
+
input_shape=(*image_size, 3)
|
| 57 |
+
)
|
| 58 |
+
base.trainable = False
|
| 59 |
+
|
| 60 |
+
x = base(x, training=False)
|
| 61 |
+
x = GlobalAveragePooling2D()(x)
|
| 62 |
+
x = Dense(256, activation="relu")(x)
|
| 63 |
+
x = Dropout(0.3)(x)
|
| 64 |
+
output = Dense(num_classes, activation="softmax")(x)
|
| 65 |
+
|
| 66 |
+
model = models.Model(inputs=inputs, outputs=output, name="efficientnet_b0")
|
| 67 |
+
logger.info("Built EfficientNetB0 Transfer Learning model")
|
| 68 |
+
return model
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def build_optuna_cnn(params: dict, num_classes: int = 4, image_size: tuple = (150, 150)) -> models.Sequential:
|
| 72 |
+
model = models.Sequential([
|
| 73 |
+
layers.Input(shape=(*image_size, 3)),
|
| 74 |
+
layers.Conv2D(params["filters_1"], (3,3), activation="relu"),
|
| 75 |
+
BatchNormalization(),
|
| 76 |
+
layers.MaxPooling2D(2,2),
|
| 77 |
+
layers.Conv2D(params["filters_2"], (3,3), activation="relu"),
|
| 78 |
+
BatchNormalization(),
|
| 79 |
+
layers.MaxPooling2D(2,2),
|
| 80 |
+
layers.Conv2D(params["filters_3"], (3,3), activation="relu"),
|
| 81 |
+
BatchNormalization(),
|
| 82 |
+
layers.MaxPooling2D(2,2),
|
| 83 |
+
layers.Flatten(),
|
| 84 |
+
Dense(params["dense_units"], activation="relu"),
|
| 85 |
+
Dropout(params["dropout"]),
|
| 86 |
+
Dense(num_classes, activation="softmax"),
|
| 87 |
+
], name="optuna_best_cnn")
|
| 88 |
+
logger.info(f"Built Optuna CNN with params: {params}")
|
| 89 |
+
return model
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def compile_model(model, lr: float = 1e-3):
|
| 93 |
+
model.compile(
|
| 94 |
+
optimizer = optimizers.Adam(learning_rate=lr),
|
| 95 |
+
loss = "categorical_crossentropy",
|
| 96 |
+
metrics = ["accuracy"]
|
| 97 |
+
)
|
| 98 |
+
return model
|
src/utils.py
ADDED
|
@@ -0,0 +1,321 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# utils.py — shared helpers used across all modules
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import yaml
|
| 5 |
+
import logging
|
| 6 |
+
import numpy as np
|
| 7 |
+
import matplotlib.pyplot as plt
|
| 8 |
+
import cv2
|
| 9 |
+
import tensorflow as tf
|
| 10 |
+
from tensorflow.keras.preprocessing.image import load_img
|
| 11 |
+
|
| 12 |
+
# ---------------------------------------------------------------------------
|
| 13 |
+
# Logger
|
| 14 |
+
# ---------------------------------------------------------------------------
|
| 15 |
+
|
| 16 |
+
def get_logger(name: str, log_dir: str = "./logs") -> logging.Logger:
|
| 17 |
+
os.makedirs(log_dir, exist_ok=True)
|
| 18 |
+
logger = logging.getLogger(name)
|
| 19 |
+
if logger.handlers:
|
| 20 |
+
return logger
|
| 21 |
+
|
| 22 |
+
logger.setLevel(logging.INFO)
|
| 23 |
+
fmt = logging.Formatter(
|
| 24 |
+
"%(asctime)s | %(levelname)s | %(name)s | %(message)s",
|
| 25 |
+
datefmt="%Y-%m-%d %H:%M:%S"
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
ch = logging.StreamHandler()
|
| 29 |
+
ch.setFormatter(fmt)
|
| 30 |
+
logger.addHandler(ch)
|
| 31 |
+
|
| 32 |
+
fh = logging.FileHandler(
|
| 33 |
+
os.path.join(log_dir, f"{name}.log"), encoding="utf-8"
|
| 34 |
+
)
|
| 35 |
+
fh.setFormatter(fmt)
|
| 36 |
+
logger.addHandler(fh)
|
| 37 |
+
|
| 38 |
+
return logger
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# ---------------------------------------------------------------------------
|
| 42 |
+
# Config loader
|
| 43 |
+
# ---------------------------------------------------------------------------
|
| 44 |
+
|
| 45 |
+
def load_config(path: str = "config.yaml") -> dict:
|
| 46 |
+
with open(path) as f:
|
| 47 |
+
return yaml.safe_load(f)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
# ---------------------------------------------------------------------------
|
| 51 |
+
# Plotting
|
| 52 |
+
# ---------------------------------------------------------------------------
|
| 53 |
+
|
| 54 |
+
def plot_history(history, title: str, save_path: str = None):
|
| 55 |
+
fig, axes = plt.subplots(1, 2, figsize=(13, 4))
|
| 56 |
+
|
| 57 |
+
axes[0].plot(history.history["accuracy"], label="Train")
|
| 58 |
+
axes[0].plot(history.history["val_accuracy"], label="Val")
|
| 59 |
+
axes[0].set_title(f"{title} - Accuracy")
|
| 60 |
+
axes[0].set_xlabel("Epoch")
|
| 61 |
+
axes[0].legend()
|
| 62 |
+
|
| 63 |
+
axes[1].plot(history.history["loss"], label="Train")
|
| 64 |
+
axes[1].plot(history.history["val_loss"], label="Val")
|
| 65 |
+
axes[1].set_title(f"{title} - Loss")
|
| 66 |
+
axes[1].set_xlabel("Epoch")
|
| 67 |
+
axes[1].legend()
|
| 68 |
+
|
| 69 |
+
plt.tight_layout()
|
| 70 |
+
if save_path:
|
| 71 |
+
plt.savefig(save_path, bbox_inches="tight", dpi=100)
|
| 72 |
+
plt.show()
|
| 73 |
+
plt.close()
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def plot_comparison(results: dict, save_path: str = None):
|
| 77 |
+
plt.figure(figsize=(10, 4))
|
| 78 |
+
colors = [
|
| 79 |
+
"crimson" if v == max(results.values()) else "steelblue"
|
| 80 |
+
for v in results.values()
|
| 81 |
+
]
|
| 82 |
+
bars = plt.bar(results.keys(), results.values(), color=colors)
|
| 83 |
+
plt.bar_label(bars, fmt="%.4f", padding=3)
|
| 84 |
+
plt.ylim(min(results.values()) - 0.05, 1.0)
|
| 85 |
+
plt.title("Model Comparison - Validation Accuracy (red = best)")
|
| 86 |
+
plt.ylabel("Val Accuracy")
|
| 87 |
+
plt.xticks(rotation=15)
|
| 88 |
+
plt.tight_layout()
|
| 89 |
+
if save_path:
|
| 90 |
+
plt.savefig(save_path, bbox_inches="tight", dpi=100)
|
| 91 |
+
plt.show()
|
| 92 |
+
plt.close()
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
# ---------------------------------------------------------------------------
|
| 96 |
+
# Grad-CAM helpers
|
| 97 |
+
# ---------------------------------------------------------------------------
|
| 98 |
+
|
| 99 |
+
def _collect_all_layers(model) -> list:
|
| 100 |
+
"""
|
| 101 |
+
Flatten all layers from a model including layers inside nested sub-models.
|
| 102 |
+
Returns a flat list of (layer_object, parent_model) tuples.
|
| 103 |
+
"""
|
| 104 |
+
result = []
|
| 105 |
+
|
| 106 |
+
def _recurse(m):
|
| 107 |
+
for layer in m.layers:
|
| 108 |
+
result.append(layer)
|
| 109 |
+
if hasattr(layer, "layers") and len(layer.layers) > 0:
|
| 110 |
+
_recurse(layer)
|
| 111 |
+
|
| 112 |
+
_recurse(model)
|
| 113 |
+
return result
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def get_last_conv_layer(model) -> str:
|
| 117 |
+
"""
|
| 118 |
+
Return the name of the last Conv2D layer found anywhere inside the model,
|
| 119 |
+
including inside nested sub-models (MobileNetV2, EfficientNetB0 etc.).
|
| 120 |
+
"""
|
| 121 |
+
all_layers = _collect_all_layers(model)
|
| 122 |
+
conv_layers = [l for l in all_layers if isinstance(l, tf.keras.layers.Conv2D)]
|
| 123 |
+
|
| 124 |
+
if not conv_layers:
|
| 125 |
+
raise ValueError("No Conv2D layer found in model.")
|
| 126 |
+
|
| 127 |
+
return conv_layers[-1].name
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
def _build_gradcam_model(model, last_conv_layer_name: str):
|
| 131 |
+
"""
|
| 132 |
+
Build a Grad-CAM sub-model that outputs:
|
| 133 |
+
[conv_layer_output, final_model_predictions]
|
| 134 |
+
|
| 135 |
+
Works for both:
|
| 136 |
+
- Plain CNNs: Conv2D layers are direct children of the model
|
| 137 |
+
- Nested models: Conv2D is inside a sub-model (MobileNetV2, EfficientNetB0)
|
| 138 |
+
|
| 139 |
+
Strategy: find which sub-model owns the target conv layer, build a
|
| 140 |
+
feature extractor from that sub-model's input to [conv_output, sub_output],
|
| 141 |
+
then chain it with the remaining head layers of the outer model.
|
| 142 |
+
"""
|
| 143 |
+
all_layers = _collect_all_layers(model)
|
| 144 |
+
|
| 145 |
+
# Find the layer object
|
| 146 |
+
target_layer = None
|
| 147 |
+
for layer in all_layers:
|
| 148 |
+
if layer.name == last_conv_layer_name:
|
| 149 |
+
target_layer = layer
|
| 150 |
+
break
|
| 151 |
+
|
| 152 |
+
if target_layer is None:
|
| 153 |
+
raise ValueError(f"Layer '{last_conv_layer_name}' not found in model.")
|
| 154 |
+
|
| 155 |
+
# Check if the conv layer is a direct child of the outer model
|
| 156 |
+
direct_names = [l.name for l in model.layers]
|
| 157 |
+
|
| 158 |
+
if last_conv_layer_name in direct_names:
|
| 159 |
+
# Plain CNN — simple case
|
| 160 |
+
grad_model = tf.keras.models.Model(
|
| 161 |
+
inputs = model.input,
|
| 162 |
+
outputs = [model.get_layer(last_conv_layer_name).output, model.output]
|
| 163 |
+
)
|
| 164 |
+
return grad_model, None # None = no separate head needed
|
| 165 |
+
|
| 166 |
+
# Nested model case — find which direct child sub-model contains the layer
|
| 167 |
+
owner_submodel = None
|
| 168 |
+
for layer in model.layers:
|
| 169 |
+
if hasattr(layer, "layers"):
|
| 170 |
+
sub_names = [l.name for l in _collect_all_layers(layer)]
|
| 171 |
+
if last_conv_layer_name in sub_names:
|
| 172 |
+
owner_submodel = layer
|
| 173 |
+
break
|
| 174 |
+
|
| 175 |
+
if owner_submodel is None:
|
| 176 |
+
raise ValueError(
|
| 177 |
+
f"Could not find parent sub-model for layer '{last_conv_layer_name}'."
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# Build: sub-model input -> [conv_output, sub_model_output]
|
| 181 |
+
sub_grad_model = tf.keras.models.Model(
|
| 182 |
+
inputs = owner_submodel.input,
|
| 183 |
+
outputs = [
|
| 184 |
+
owner_submodel.get_layer(last_conv_layer_name).output,
|
| 185 |
+
owner_submodel.output,
|
| 186 |
+
]
|
| 187 |
+
)
|
| 188 |
+
|
| 189 |
+
# Collect head layers (everything after the sub-model in the outer model)
|
| 190 |
+
head_layers = []
|
| 191 |
+
found = False
|
| 192 |
+
for layer in model.layers:
|
| 193 |
+
if found:
|
| 194 |
+
head_layers.append(layer)
|
| 195 |
+
if layer.name == owner_submodel.name:
|
| 196 |
+
found = True
|
| 197 |
+
|
| 198 |
+
return sub_grad_model, head_layers
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def get_gradcam_heatmap(model, img_array: np.ndarray, last_conv_layer_name: str):
|
| 202 |
+
"""
|
| 203 |
+
Compute Grad-CAM heatmap.
|
| 204 |
+
|
| 205 |
+
Parameters
|
| 206 |
+
----------
|
| 207 |
+
model : compiled Keras model
|
| 208 |
+
img_array : preprocessed image, shape (1, H, W, 3), values in [0,1]
|
| 209 |
+
last_conv_layer_name: name of the target Conv2D layer
|
| 210 |
+
|
| 211 |
+
Returns
|
| 212 |
+
-------
|
| 213 |
+
heatmap : np.ndarray shape (H_conv, W_conv), values in [0,1]
|
| 214 |
+
pred_idx : int, predicted class index
|
| 215 |
+
"""
|
| 216 |
+
grad_model, head_layers = _build_gradcam_model(model, last_conv_layer_name)
|
| 217 |
+
|
| 218 |
+
with tf.GradientTape() as tape:
|
| 219 |
+
|
| 220 |
+
if head_layers is None:
|
| 221 |
+
# Plain CNN — single forward pass
|
| 222 |
+
conv_outputs, predictions = grad_model(img_array)
|
| 223 |
+
else:
|
| 224 |
+
# Nested model — two-stage forward pass
|
| 225 |
+
conv_outputs, sub_output = grad_model(img_array)
|
| 226 |
+
|
| 227 |
+
# Run through head layers sequentially
|
| 228 |
+
x = sub_output
|
| 229 |
+
for layer in head_layers:
|
| 230 |
+
x = layer(x)
|
| 231 |
+
predictions = x
|
| 232 |
+
|
| 233 |
+
pred_idx = tf.argmax(predictions[0])
|
| 234 |
+
loss = predictions[:, pred_idx]
|
| 235 |
+
|
| 236 |
+
# Watch conv_outputs so we can compute gradients w.r.t. it
|
| 237 |
+
tape.watch(conv_outputs)
|
| 238 |
+
|
| 239 |
+
# Recompute with watched tensor inside tape scope
|
| 240 |
+
with tf.GradientTape() as tape2:
|
| 241 |
+
tape2.watch(conv_outputs)
|
| 242 |
+
|
| 243 |
+
if head_layers is None:
|
| 244 |
+
conv_out_val, preds = grad_model(img_array)
|
| 245 |
+
else:
|
| 246 |
+
conv_out_val, sub_out = grad_model(img_array)
|
| 247 |
+
x = sub_out
|
| 248 |
+
for layer in head_layers:
|
| 249 |
+
x = layer(x)
|
| 250 |
+
preds = x
|
| 251 |
+
|
| 252 |
+
pred_idx = int(tf.argmax(preds[0]))
|
| 253 |
+
class_loss = preds[:, pred_idx]
|
| 254 |
+
|
| 255 |
+
grads = tape2.gradient(class_loss, conv_out_val)
|
| 256 |
+
|
| 257 |
+
if grads is None:
|
| 258 |
+
raise ValueError(
|
| 259 |
+
"Gradients are None. The conv layer output is not part of the "
|
| 260 |
+
"computation graph. Try a different layer name."
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
|
| 264 |
+
heatmap = conv_out_val[0] @ pooled_grads[..., tf.newaxis]
|
| 265 |
+
heatmap = tf.squeeze(heatmap)
|
| 266 |
+
heatmap = tf.maximum(heatmap, 0)
|
| 267 |
+
heatmap = heatmap / (tf.math.reduce_max(heatmap) + 1e-8)
|
| 268 |
+
|
| 269 |
+
return heatmap.numpy(), pred_idx
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# ---------------------------------------------------------------------------
|
| 273 |
+
# Full Grad-CAM visualisation
|
| 274 |
+
# ---------------------------------------------------------------------------
|
| 275 |
+
|
| 276 |
+
def generate_gradcam_overlay(model, img_path: str, last_conv_layer: str,
|
| 277 |
+
image_size: tuple, class_names: list,
|
| 278 |
+
save_path: str = None):
|
| 279 |
+
img = load_img(img_path, target_size=image_size)
|
| 280 |
+
img_array = np.array(img) / 255.0
|
| 281 |
+
img_input = np.expand_dims(img_array, axis=0).astype(np.float32)
|
| 282 |
+
|
| 283 |
+
heatmap, pred_idx = get_gradcam_heatmap(model, img_input, last_conv_layer)
|
| 284 |
+
|
| 285 |
+
heatmap_resized = cv2.resize(heatmap, image_size)
|
| 286 |
+
heatmap_colored = cv2.cvtColor(
|
| 287 |
+
cv2.applyColorMap(np.uint8(255 * heatmap_resized), cv2.COLORMAP_JET),
|
| 288 |
+
cv2.COLOR_BGR2RGB
|
| 289 |
+
)
|
| 290 |
+
overlay = cv2.addWeighted(
|
| 291 |
+
np.uint8(255 * img_array), 0.6, heatmap_colored, 0.4, 0
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
probs = model.predict(img_input, verbose=0)[0]
|
| 295 |
+
conf = probs[pred_idx] * 100
|
| 296 |
+
|
| 297 |
+
fig, axes = plt.subplots(1, 3, figsize=(13, 4))
|
| 298 |
+
axes[0].imshow(img)
|
| 299 |
+
axes[0].set_title("Original MRI")
|
| 300 |
+
axes[0].axis("off")
|
| 301 |
+
|
| 302 |
+
axes[1].imshow(heatmap_resized, cmap="jet")
|
| 303 |
+
axes[1].set_title("Grad-CAM Heatmap")
|
| 304 |
+
axes[1].axis("off")
|
| 305 |
+
|
| 306 |
+
axes[2].imshow(overlay)
|
| 307 |
+
axes[2].set_title(f"Pred: {class_names[pred_idx]} ({conf:.1f}%)")
|
| 308 |
+
axes[2].axis("off")
|
| 309 |
+
|
| 310 |
+
plt.suptitle(
|
| 311 |
+
f"Grad-CAM - {class_names[pred_idx].upper()}",
|
| 312 |
+
fontsize=14, fontweight="bold"
|
| 313 |
+
)
|
| 314 |
+
plt.tight_layout()
|
| 315 |
+
|
| 316 |
+
if save_path:
|
| 317 |
+
plt.savefig(save_path, bbox_inches="tight", dpi=100)
|
| 318 |
+
plt.show()
|
| 319 |
+
plt.close()
|
| 320 |
+
|
| 321 |
+
return pred_idx, conf, overlay
|
static/index.html
ADDED
|
@@ -0,0 +1,679 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!DOCTYPE html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8"/>
|
| 5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0"/>
|
| 6 |
+
<title>NeuroScan — Brain Tumor Classification</title>
|
| 7 |
+
<link href="https://fonts.googleapis.com/css2?family=DM+Mono:ital,wght@0,300;0,400;0,500;1,300&family=Syne:wght@400;600;700;800&display=swap" rel="stylesheet"/>
|
| 8 |
+
<style>
|
| 9 |
+
*,*::before,*::after{box-sizing:border-box;margin:0;padding:0}
|
| 10 |
+
:root{
|
| 11 |
+
--bg:#0a0c10;
|
| 12 |
+
--bg2:#11141a;
|
| 13 |
+
--bg3:#181c24;
|
| 14 |
+
--bg4:#1e2330;
|
| 15 |
+
--border:#ffffff12;
|
| 16 |
+
--border2:#ffffff20;
|
| 17 |
+
--text:#e8eaf0;
|
| 18 |
+
--text2:#8b90a0;
|
| 19 |
+
--text3:#555b6e;
|
| 20 |
+
--accent:#00e5ff;
|
| 21 |
+
--accent2:#00b4cc;
|
| 22 |
+
--accent-dim:#00e5ff18;
|
| 23 |
+
--green:#00e676;
|
| 24 |
+
--green-dim:#00e67615;
|
| 25 |
+
--amber:#ffab40;
|
| 26 |
+
--amber-dim:#ffab4015;
|
| 27 |
+
--red:#ff5252;
|
| 28 |
+
--red-dim:#ff525215;
|
| 29 |
+
--purple:#ce93d8;
|
| 30 |
+
--purple-dim:#ce93d815;
|
| 31 |
+
--radius:10px;
|
| 32 |
+
--shadow:0 4px 24px #00000060;
|
| 33 |
+
}
|
| 34 |
+
html{scroll-behavior:smooth}
|
| 35 |
+
body{background:var(--bg);color:var(--text);font-family:'DM Mono',monospace;font-size:14px;line-height:1.7;min-height:100vh;overflow-x:hidden}
|
| 36 |
+
body::before{
|
| 37 |
+
content:'';position:fixed;inset:0;
|
| 38 |
+
background:repeating-linear-gradient(0deg,transparent,transparent 2px,#00000018 2px,#00000018 4px);
|
| 39 |
+
pointer-events:none;z-index:9999;opacity:.35
|
| 40 |
+
}
|
| 41 |
+
.shell{max-width:1100px;margin:0 auto;padding:0 24px 80px}
|
| 42 |
+
header{
|
| 43 |
+
padding:40px 0 32px;
|
| 44 |
+
border-bottom:1px solid var(--border);
|
| 45 |
+
display:flex;align-items:center;justify-content:space-between;gap:16px;
|
| 46 |
+
flex-wrap:wrap
|
| 47 |
+
}
|
| 48 |
+
.logo{display:flex;align-items:center;gap:14px}
|
| 49 |
+
.logo-mark{
|
| 50 |
+
width:42px;height:42px;border-radius:8px;
|
| 51 |
+
background:linear-gradient(135deg,var(--accent),#0066ff);
|
| 52 |
+
display:grid;place-items:center;font-size:20px;flex-shrink:0
|
| 53 |
+
}
|
| 54 |
+
.logo-text{font-family:'Syne',sans-serif;font-size:22px;font-weight:800;
|
| 55 |
+
background:linear-gradient(90deg,var(--accent),#80d8ff);
|
| 56 |
+
-webkit-background-clip:text;-webkit-text-fill-color:transparent;background-clip:text
|
| 57 |
+
}
|
| 58 |
+
.logo-sub{font-size:11px;color:var(--text3);letter-spacing:.12em;text-transform:uppercase;margin-top:-2px}
|
| 59 |
+
.header-badges{display:flex;gap:8px;flex-wrap:wrap}
|
| 60 |
+
.badge{
|
| 61 |
+
padding:4px 10px;border-radius:20px;font-size:11px;letter-spacing:.06em;
|
| 62 |
+
border:1px solid var(--border2);color:var(--text2);background:var(--bg3)
|
| 63 |
+
}
|
| 64 |
+
.badge.live{border-color:#00e67640;color:var(--green);background:var(--green-dim)}
|
| 65 |
+
.badge.live::before{content:'● ';font-size:8px}
|
| 66 |
+
.section-title{
|
| 67 |
+
font-family:'Syne',sans-serif;font-size:12px;font-weight:600;
|
| 68 |
+
letter-spacing:.14em;text-transform:uppercase;
|
| 69 |
+
color:var(--accent);margin-bottom:16px;
|
| 70 |
+
display:flex;align-items:center;gap:8px
|
| 71 |
+
}
|
| 72 |
+
.section-title::before{content:'';width:20px;height:1px;background:var(--accent)}
|
| 73 |
+
.grid-2{display:grid;grid-template-columns:1fr 1fr;gap:20px}
|
| 74 |
+
.grid-3{display:grid;grid-template-columns:repeat(3,1fr);gap:16px}
|
| 75 |
+
@media(max-width:700px){.grid-2,.grid-3{grid-template-columns:1fr}}
|
| 76 |
+
.card{
|
| 77 |
+
background:var(--bg2);border:1px solid var(--border);
|
| 78 |
+
border-radius:var(--radius);padding:24px;
|
| 79 |
+
}
|
| 80 |
+
.card-sm{padding:16px 20px}
|
| 81 |
+
#upload-zone{
|
| 82 |
+
border:2px dashed var(--border2);border-radius:var(--radius);
|
| 83 |
+
padding:48px 24px;text-align:center;cursor:pointer;
|
| 84 |
+
transition:border-color .2s,background .2s;position:relative
|
| 85 |
+
}
|
| 86 |
+
#upload-zone:hover,#upload-zone.drag{border-color:var(--accent);background:var(--accent-dim)}
|
| 87 |
+
#upload-zone input{position:absolute;inset:0;opacity:0;cursor:pointer;width:100%;height:100%}
|
| 88 |
+
.upload-icon{font-size:36px;margin-bottom:12px;opacity:.5}
|
| 89 |
+
.upload-label{font-family:'Syne',sans-serif;font-size:15px;font-weight:600;color:var(--text)}
|
| 90 |
+
.upload-hint{font-size:12px;color:var(--text3);margin-top:6px}
|
| 91 |
+
#preview-area{display:none;gap:12px;flex-direction:column}
|
| 92 |
+
.preview-img-wrap{position:relative;border-radius:8px;overflow:hidden;background:var(--bg3)}
|
| 93 |
+
.preview-img-wrap img{width:100%;display:block;border-radius:8px;max-height:260px;object-fit:contain}
|
| 94 |
+
.img-filename{font-size:11px;color:var(--text3);margin-top:6px;word-break:break-all}
|
| 95 |
+
.backend-grid{display:grid;grid-template-columns:1fr 1fr;gap:8px;margin-bottom:20px}
|
| 96 |
+
.backend-btn{
|
| 97 |
+
padding:10px 12px;border-radius:8px;border:1px solid var(--border2);
|
| 98 |
+
background:var(--bg3);color:var(--text2);cursor:pointer;
|
| 99 |
+
font-family:'DM Mono',monospace;font-size:12px;text-align:left;
|
| 100 |
+
transition:border-color .15s,background .15s,color .15s;line-height:1.4
|
| 101 |
+
}
|
| 102 |
+
.backend-btn:hover{border-color:var(--accent2);color:var(--text)}
|
| 103 |
+
.backend-btn.active{border-color:var(--accent);background:var(--accent-dim);color:var(--accent)}
|
| 104 |
+
.backend-btn.disabled{
|
| 105 |
+
opacity:.35;cursor:not-allowed;pointer-events:none
|
| 106 |
+
}
|
| 107 |
+
.backend-btn .b-name{font-weight:500;display:block}
|
| 108 |
+
.backend-btn .b-tag{font-size:10px;opacity:.6;margin-top:2px}
|
| 109 |
+
#analyse-btn{
|
| 110 |
+
width:100%;padding:14px;border-radius:8px;border:none;cursor:pointer;
|
| 111 |
+
font-family:'Syne',sans-serif;font-size:14px;font-weight:700;
|
| 112 |
+
letter-spacing:.08em;text-transform:uppercase;
|
| 113 |
+
background:linear-gradient(90deg,var(--accent),#0080ff);
|
| 114 |
+
color:#000;transition:opacity .2s,transform .1s
|
| 115 |
+
}
|
| 116 |
+
#analyse-btn:hover{opacity:.9}
|
| 117 |
+
#analyse-btn:active{transform:scale(.98)}
|
| 118 |
+
#analyse-btn:disabled{opacity:.35;cursor:not-allowed}
|
| 119 |
+
#results-panel{display:none}
|
| 120 |
+
.prediction-hero{
|
| 121 |
+
display:flex;align-items:center;gap:16px;
|
| 122 |
+
padding:20px;border-radius:8px;margin-bottom:20px
|
| 123 |
+
}
|
| 124 |
+
.prediction-hero.glioma{background:var(--red-dim);border:1px solid #ff525230}
|
| 125 |
+
.prediction-hero.meningioma{background:var(--amber-dim);border:1px solid #ffab4030}
|
| 126 |
+
.prediction-hero.notumor{background:var(--green-dim);border:1px solid #00e67630}
|
| 127 |
+
.prediction-hero.pituitary{background:var(--purple-dim);border:1px solid #ce93d830}
|
| 128 |
+
.pred-class{
|
| 129 |
+
font-family:'Syne',sans-serif;font-size:22px;font-weight:800;
|
| 130 |
+
text-transform:uppercase;letter-spacing:.06em
|
| 131 |
+
}
|
| 132 |
+
.pred-class.glioma{color:var(--red)}
|
| 133 |
+
.pred-class.meningioma{color:var(--amber)}
|
| 134 |
+
.pred-class.notumor{color:var(--green)}
|
| 135 |
+
.pred-class.pituitary{color:var(--purple)}
|
| 136 |
+
.pred-conf{font-size:13px;color:var(--text2);margin-top:2px}
|
| 137 |
+
.pred-badge{
|
| 138 |
+
margin-left:auto;padding:6px 14px;border-radius:20px;
|
| 139 |
+
font-size:11px;font-weight:500;flex-shrink:0
|
| 140 |
+
}
|
| 141 |
+
.pred-badge.high{background:var(--green-dim);color:var(--green);border:1px solid #00e67640}
|
| 142 |
+
.pred-badge.mid{background:var(--amber-dim);color:var(--amber);border:1px solid #ffab4040}
|
| 143 |
+
.pred-badge.low{background:var(--red-dim);color:var(--red);border:1px solid #ff525240}
|
| 144 |
+
.prob-row{margin-bottom:10px}
|
| 145 |
+
.prob-header{display:flex;justify-content:space-between;margin-bottom:4px;font-size:12px}
|
| 146 |
+
.prob-name{color:var(--text2)}
|
| 147 |
+
.prob-val{color:var(--text)}
|
| 148 |
+
.prob-track{height:6px;background:var(--bg4);border-radius:3px;overflow:hidden}
|
| 149 |
+
.prob-fill{height:100%;border-radius:3px;transition:width .8s cubic-bezier(.4,0,.2,1)}
|
| 150 |
+
.prob-fill.glioma{background:var(--red)}
|
| 151 |
+
.prob-fill.meningioma{background:var(--amber)}
|
| 152 |
+
.prob-fill.notumor{background:var(--green)}
|
| 153 |
+
.prob-fill.pituitary{background:var(--purple)}
|
| 154 |
+
.stats-row{display:grid;grid-template-columns:repeat(3,1fr);gap:12px;margin-top:16px}
|
| 155 |
+
.stat-card{
|
| 156 |
+
background:var(--bg3);border:1px solid var(--border);
|
| 157 |
+
border-radius:8px;padding:14px;text-align:center
|
| 158 |
+
}
|
| 159 |
+
.stat-val{font-family:'Syne',sans-serif;font-size:18px;font-weight:700;color:var(--accent)}
|
| 160 |
+
.stat-lbl{font-size:11px;color:var(--text3);margin-top:2px;text-transform:uppercase;letter-spacing:.08em}
|
| 161 |
+
.gradcam-grid{display:grid;grid-template-columns:1fr 1fr;gap:12px;margin-top:16px}
|
| 162 |
+
.gradcam-item{text-align:center}
|
| 163 |
+
.gradcam-item img{width:100%;border-radius:8px;display:block}
|
| 164 |
+
.gradcam-lbl{font-size:11px;color:var(--text3);margin-top:6px;text-transform:uppercase;letter-spacing:.08em}
|
| 165 |
+
.model-table{width:100%;border-collapse:collapse;font-size:12px}
|
| 166 |
+
.model-table th{color:var(--text3);font-weight:400;text-align:left;padding:8px 10px;
|
| 167 |
+
border-bottom:1px solid var(--border);text-transform:uppercase;letter-spacing:.08em}
|
| 168 |
+
.model-table td{padding:10px;border-bottom:1px solid var(--border)}
|
| 169 |
+
.model-table tr:last-child td{border-bottom:none}
|
| 170 |
+
.model-table tr:hover td{background:var(--bg3)}
|
| 171 |
+
.tag{
|
| 172 |
+
padding:2px 8px;border-radius:4px;font-size:10px;
|
| 173 |
+
background:var(--accent-dim);color:var(--accent);border:1px solid var(--accent)30
|
| 174 |
+
}
|
| 175 |
+
.tag.green{background:var(--green-dim);color:var(--green);border-color:#00e67630}
|
| 176 |
+
.tag.amber{background:var(--amber-dim);color:var(--amber);border-color:#ffab4030}
|
| 177 |
+
.spinner{display:none;width:20px;height:20px;border:2px solid #ffffff20;
|
| 178 |
+
border-top-color:var(--accent);border-radius:50%;animation:spin .7s linear infinite}
|
| 179 |
+
@keyframes spin{to{transform:rotate(360deg)}}
|
| 180 |
+
.btn-inner{display:flex;align-items:center;justify-content:center;gap:10px}
|
| 181 |
+
#toast{
|
| 182 |
+
position:fixed;bottom:28px;right:28px;z-index:1000;
|
| 183 |
+
padding:12px 20px;border-radius:8px;font-size:13px;
|
| 184 |
+
background:var(--bg3);border:1px solid var(--border2);
|
| 185 |
+
transform:translateY(80px);opacity:0;transition:transform .3s,opacity .3s
|
| 186 |
+
}
|
| 187 |
+
#toast.show{transform:translateY(0);opacity:1}
|
| 188 |
+
#toast.error{border-color:#ff525240;color:var(--red)}
|
| 189 |
+
#toast.success{border-color:#00e67640;color:var(--green)}
|
| 190 |
+
.tabs{display:flex;gap:4px;border-bottom:1px solid var(--border);margin-bottom:20px}
|
| 191 |
+
.tab{
|
| 192 |
+
padding:8px 16px;cursor:pointer;font-size:12px;
|
| 193 |
+
color:var(--text3);border-bottom:2px solid transparent;
|
| 194 |
+
margin-bottom:-1px;transition:color .15s,border-color .15s;
|
| 195 |
+
text-transform:uppercase;letter-spacing:.08em
|
| 196 |
+
}
|
| 197 |
+
.tab.active{color:var(--accent);border-bottom-color:var(--accent)}
|
| 198 |
+
.tab-panel{display:none}
|
| 199 |
+
.tab-panel.active{display:block}
|
| 200 |
+
.info-block{
|
| 201 |
+
background:var(--bg3);border:1px solid var(--border);
|
| 202 |
+
border-radius:8px;padding:14px 16px;font-size:12px;color:var(--text2);
|
| 203 |
+
line-height:1.8;margin-top:16px
|
| 204 |
+
}
|
| 205 |
+
.info-block strong{color:var(--text)}
|
| 206 |
+
.divider{height:1px;background:var(--border);margin:28px 0}
|
| 207 |
+
.compare-row{
|
| 208 |
+
display:flex;align-items:center;gap:12px;padding:10px 0;
|
| 209 |
+
border-bottom:1px solid var(--border)
|
| 210 |
+
}
|
| 211 |
+
.compare-row:last-child{border-bottom:none}
|
| 212 |
+
.compare-name{width:180px;flex-shrink:0;font-size:12px;color:var(--text2)}
|
| 213 |
+
.compare-bar-wrap{flex:1;height:8px;background:var(--bg4);border-radius:4px;overflow:hidden}
|
| 214 |
+
.compare-bar{height:100%;border-radius:4px;background:var(--accent);transition:width .9s}
|
| 215 |
+
.compare-val{width:60px;text-align:right;font-size:12px;color:var(--text);flex-shrink:0}
|
| 216 |
+
@keyframes fadeUp{from{opacity:0;transform:translateY(12px)}to{opacity:1;transform:none}}
|
| 217 |
+
.fade-in{animation:fadeUp .4s ease both}
|
| 218 |
+
</style>
|
| 219 |
+
</head>
|
| 220 |
+
<body>
|
| 221 |
+
<div class="shell">
|
| 222 |
+
|
| 223 |
+
<header>
|
| 224 |
+
<div class="logo">
|
| 225 |
+
<div class="logo-mark">🧠</div>
|
| 226 |
+
<div>
|
| 227 |
+
<div class="logo-text">NeuroScan</div>
|
| 228 |
+
<div class="logo-sub">Brain Tumor Classification · v1.0</div>
|
| 229 |
+
</div>
|
| 230 |
+
</div>
|
| 231 |
+
<div class="header-badges">
|
| 232 |
+
<span class="badge live" id="api-status">API Connected</span>
|
| 233 |
+
<span class="badge">TF 2.10</span>
|
| 234 |
+
<span class="badge">ONNX FP32</span>
|
| 235 |
+
<span class="badge">Grad-CAM XAI</span>
|
| 236 |
+
</div>
|
| 237 |
+
</header>
|
| 238 |
+
|
| 239 |
+
<div style="height:36px"></div>
|
| 240 |
+
|
| 241 |
+
<div class="grid-2" style="align-items:start">
|
| 242 |
+
|
| 243 |
+
<div>
|
| 244 |
+
<div class="section-title">MRI Input</div>
|
| 245 |
+
<div class="card" style="margin-bottom:20px">
|
| 246 |
+
<div id="upload-zone">
|
| 247 |
+
<input type="file" id="file-input" accept="image/*"/>
|
| 248 |
+
<div class="upload-icon">⬆</div>
|
| 249 |
+
<div class="upload-label">Drop MRI image here</div>
|
| 250 |
+
<div class="upload-hint">PNG, JPG, JPEG — any brain MRI scan</div>
|
| 251 |
+
</div>
|
| 252 |
+
<div id="preview-area" style="margin-top:16px">
|
| 253 |
+
<div class="preview-img-wrap">
|
| 254 |
+
<img id="preview-img" src="" alt="MRI preview"/>
|
| 255 |
+
</div>
|
| 256 |
+
<div class="img-filename" id="img-filename"></div>
|
| 257 |
+
</div>
|
| 258 |
+
</div>
|
| 259 |
+
|
| 260 |
+
<div class="section-title">Inference Backend</div>
|
| 261 |
+
<div class="card" style="margin-bottom:20px">
|
| 262 |
+
<div class="backend-grid">
|
| 263 |
+
<button class="backend-btn active" data-backend="tensorflow" onclick="selectBackend(this)">
|
| 264 |
+
<span class="b-name">TensorFlow</span>
|
| 265 |
+
<span class="b-tag">FP32 · Grad-CAM</span>
|
| 266 |
+
</button>
|
| 267 |
+
<button class="backend-btn" data-backend="onnx_fp32" onclick="selectBackend(this)">
|
| 268 |
+
<span class="b-name">ONNX FP32</span>
|
| 269 |
+
<span class="b-tag">Full precision</span>
|
| 270 |
+
</button>
|
| 271 |
+
<button class="backend-btn" id="dynamic-btn" data-backend="onnx_dynamic" onclick="selectBackend(this)">
|
| 272 |
+
<span class="b-name">Dynamic INT8</span>
|
| 273 |
+
<span class="b-tag">Quantized · faster</span>
|
| 274 |
+
</button>
|
| 275 |
+
<button class="backend-btn" data-backend="onnx_static" onclick="selectBackend(this)">
|
| 276 |
+
<span class="b-name">Static INT8</span>
|
| 277 |
+
<span class="b-tag">Calibrated · smallest</span>
|
| 278 |
+
</button>
|
| 279 |
+
</div>
|
| 280 |
+
|
| 281 |
+
<div id="gradcam-toggle-wrap" style="display:flex;align-items:center;gap:10px;margin-bottom:20px">
|
| 282 |
+
<label style="display:flex;align-items:center;gap:8px;cursor:pointer;font-size:12px;color:var(--text2)">
|
| 283 |
+
<div style="position:relative;width:36px;height:20px">
|
| 284 |
+
<input type="checkbox" id="gradcam-toggle" style="opacity:0;position:absolute;width:100%;height:100%;cursor:pointer;margin:0" onchange="updateGradcamUI()"/>
|
| 285 |
+
<div id="toggle-track" style="width:36px;height:20px;background:var(--bg4);border-radius:10px;border:1px solid var(--border2);transition:background .2s"></div>
|
| 286 |
+
<div id="toggle-thumb" style="position:absolute;top:3px;left:3px;width:14px;height:14px;background:var(--text3);border-radius:50%;transition:transform .2s,background .2s"></div>
|
| 287 |
+
</div>
|
| 288 |
+
Show Grad-CAM heatmap
|
| 289 |
+
</label>
|
| 290 |
+
<span id="gradcam-note" style="font-size:11px;color:var(--text3)">(TF backend only)</span>
|
| 291 |
+
</div>
|
| 292 |
+
|
| 293 |
+
<button id="analyse-btn" onclick="analyse()" disabled>
|
| 294 |
+
<div class="btn-inner">
|
| 295 |
+
<div class="spinner" id="spinner"></div>
|
| 296 |
+
<span id="btn-label">Upload an image first</span>
|
| 297 |
+
</div>
|
| 298 |
+
</button>
|
| 299 |
+
</div>
|
| 300 |
+
|
| 301 |
+
<div class="section-title">Quantization Info</div>
|
| 302 |
+
<div class="card card-sm">
|
| 303 |
+
<div class="tabs">
|
| 304 |
+
<div class="tab active" onclick="switchTab(this,'q-dynamic')">Dynamic INT8</div>
|
| 305 |
+
<div class="tab" onclick="switchTab(this,'q-static')">Static INT8</div>
|
| 306 |
+
<div class="tab" onclick="switchTab(this,'q-compare')">Compare</div>
|
| 307 |
+
</div>
|
| 308 |
+
<div id="q-dynamic" class="tab-panel active">
|
| 309 |
+
<div class="info-block">
|
| 310 |
+
<strong>Dynamic Quantization</strong> converts model <strong>weights</strong> to INT8 at export time. Activations remain FP32 at runtime and are quantized on-the-fly. No calibration data needed.
|
| 311 |
+
</div>
|
| 312 |
+
</div>
|
| 313 |
+
<div id="q-static" class="tab-panel">
|
| 314 |
+
<div class="info-block">
|
| 315 |
+
<strong>Static Quantization</strong> quantizes both <strong>weights and activations</strong> to INT8. Requires calibration data. It can be smaller and faster, but accuracy may vary by architecture.
|
| 316 |
+
</div>
|
| 317 |
+
</div>
|
| 318 |
+
<div id="q-compare" class="tab-panel">
|
| 319 |
+
<div id="benchmark-data" style="padding:4px 0">
|
| 320 |
+
<div style="color:var(--text3);font-size:12px">Loading benchmark...</div>
|
| 321 |
+
</div>
|
| 322 |
+
</div>
|
| 323 |
+
</div>
|
| 324 |
+
</div>
|
| 325 |
+
|
| 326 |
+
<div>
|
| 327 |
+
<div class="section-title">Analysis Results</div>
|
| 328 |
+
|
| 329 |
+
<div id="placeholder" class="card" style="text-align:center;padding:60px 24px">
|
| 330 |
+
<div style="font-size:48px;opacity:.2;margin-bottom:16px">🔬</div>
|
| 331 |
+
<div style="font-family:'Syne',sans-serif;font-size:15px;color:var(--text3)">
|
| 332 |
+
Upload an MRI scan and click analyse
|
| 333 |
+
</div>
|
| 334 |
+
<div style="font-size:12px;color:var(--text3);margin-top:8px;opacity:.6">
|
| 335 |
+
Supports glioma · meningioma · no tumor · pituitary
|
| 336 |
+
</div>
|
| 337 |
+
</div>
|
| 338 |
+
|
| 339 |
+
<div id="results-panel" class="fade-in">
|
| 340 |
+
<div id="pred-hero" class="prediction-hero card">
|
| 341 |
+
<div>
|
| 342 |
+
<div class="pred-class" id="pred-class-text"></div>
|
| 343 |
+
<div class="pred-conf" id="pred-conf-text"></div>
|
| 344 |
+
</div>
|
| 345 |
+
<div class="pred-badge" id="pred-badge"></div>
|
| 346 |
+
</div>
|
| 347 |
+
|
| 348 |
+
<div class="card" style="margin-bottom:16px">
|
| 349 |
+
<div class="section-title">Class Probabilities</div>
|
| 350 |
+
<div id="prob-bars"></div>
|
| 351 |
+
<div class="stats-row" id="stats-row">
|
| 352 |
+
<div class="stat-card">
|
| 353 |
+
<div class="stat-val" id="stat-conf">—</div>
|
| 354 |
+
<div class="stat-lbl">Confidence</div>
|
| 355 |
+
</div>
|
| 356 |
+
<div class="stat-card">
|
| 357 |
+
<div class="stat-val" id="stat-latency">—</div>
|
| 358 |
+
<div class="stat-lbl">Latency</div>
|
| 359 |
+
</div>
|
| 360 |
+
<div class="stat-card">
|
| 361 |
+
<div class="stat-val" id="stat-backend">—</div>
|
| 362 |
+
<div class="stat-lbl">Backend</div>
|
| 363 |
+
</div>
|
| 364 |
+
</div>
|
| 365 |
+
</div>
|
| 366 |
+
|
| 367 |
+
<div class="card" id="gradcam-card" style="display:none;margin-bottom:16px">
|
| 368 |
+
<div class="section-title">Grad-CAM Explanation</div>
|
| 369 |
+
<div class="gradcam-grid">
|
| 370 |
+
<div class="gradcam-item">
|
| 371 |
+
<img id="gradcam-overlay-img" src="" alt="Grad-CAM overlay"/>
|
| 372 |
+
<div class="gradcam-lbl">Activation Overlay</div>
|
| 373 |
+
</div>
|
| 374 |
+
<div class="gradcam-item">
|
| 375 |
+
<img id="gradcam-heatmap-img" src="" alt="Heatmap"/>
|
| 376 |
+
<div class="gradcam-lbl">Raw Heatmap</div>
|
| 377 |
+
</div>
|
| 378 |
+
</div>
|
| 379 |
+
<div class="info-block" style="margin-top:12px">
|
| 380 |
+
<strong>Grad-CAM</strong> highlights regions the model focused on.
|
| 381 |
+
</div>
|
| 382 |
+
</div>
|
| 383 |
+
|
| 384 |
+
<div class="card card-sm">
|
| 385 |
+
<div class="section-title">Class Reference</div>
|
| 386 |
+
<table class="model-table">
|
| 387 |
+
<thead><tr>
|
| 388 |
+
<th>Class</th><th>Description</th><th>Severity</th>
|
| 389 |
+
</tr></thead>
|
| 390 |
+
<tbody>
|
| 391 |
+
<tr><td><span style="color:var(--red)">Glioma</span></td>
|
| 392 |
+
<td style="color:var(--text2)">Tumor of glial cells</td>
|
| 393 |
+
<td><span class="tag">High</span></td></tr>
|
| 394 |
+
<tr><td><span style="color:var(--amber)">Meningioma</span></td>
|
| 395 |
+
<td style="color:var(--text2)">Tumor of meninges</td>
|
| 396 |
+
<td><span class="tag amber">Medium</span></td></tr>
|
| 397 |
+
<tr><td><span style="color:var(--green)">No Tumor</span></td>
|
| 398 |
+
<td style="color:var(--text2)">Normal brain MRI</td>
|
| 399 |
+
<td><span class="tag green">None</span></td></tr>
|
| 400 |
+
<tr><td><span style="color:var(--purple)">Pituitary</span></td>
|
| 401 |
+
<td style="color:var(--text2)">Pituitary gland tumor</td>
|
| 402 |
+
<td><span class="tag amber">Medium</span></td></tr>
|
| 403 |
+
</tbody>
|
| 404 |
+
</table>
|
| 405 |
+
</div>
|
| 406 |
+
</div>
|
| 407 |
+
</div>
|
| 408 |
+
|
| 409 |
+
</div>
|
| 410 |
+
|
| 411 |
+
<div class="divider"></div>
|
| 412 |
+
|
| 413 |
+
<div class="section-title">Model Performance Comparison</div>
|
| 414 |
+
<div class="card">
|
| 415 |
+
<div id="model-compare">
|
| 416 |
+
<div style="color:var(--text3);font-size:12px">Loading model info...</div>
|
| 417 |
+
</div>
|
| 418 |
+
</div>
|
| 419 |
+
|
| 420 |
+
</div>
|
| 421 |
+
|
| 422 |
+
<div id="toast"></div>
|
| 423 |
+
|
| 424 |
+
<script>
|
| 425 |
+
const API = '';
|
| 426 |
+
let selectedBackend = 'tensorflow';
|
| 427 |
+
let selectedFile = null;
|
| 428 |
+
let healthInfo = null;
|
| 429 |
+
|
| 430 |
+
async function checkHealth(){
|
| 431 |
+
try{
|
| 432 |
+
const r = await fetch(`${API}/health`);
|
| 433 |
+
if(!r.ok) throw new Error();
|
| 434 |
+
const d = await r.json();
|
| 435 |
+
healthInfo = d;
|
| 436 |
+
document.getElementById('api-status').textContent = `API · ${d.loaded_backends.length} backends`;
|
| 437 |
+
|
| 438 |
+
const failed = d.failed_backends || {};
|
| 439 |
+
if (failed.onnx_dynamic) {
|
| 440 |
+
const btn = document.getElementById('dynamic-btn');
|
| 441 |
+
if (btn) {
|
| 442 |
+
btn.classList.add('disabled');
|
| 443 |
+
btn.title = 'Dynamic INT8 is not available in this runtime';
|
| 444 |
+
}
|
| 445 |
+
if (selectedBackend === 'onnx_dynamic') {
|
| 446 |
+
selectedBackend = 'tensorflow';
|
| 447 |
+
document.querySelectorAll('.backend-btn').forEach(b => b.classList.remove('active'));
|
| 448 |
+
document.querySelector('.backend-btn[data-backend="tensorflow"]').classList.add('active');
|
| 449 |
+
}
|
| 450 |
+
}
|
| 451 |
+
} catch(e){
|
| 452 |
+
const el = document.getElementById('api-status');
|
| 453 |
+
el.textContent = 'API Offline';
|
| 454 |
+
el.classList.remove('live');
|
| 455 |
+
}
|
| 456 |
+
}
|
| 457 |
+
|
| 458 |
+
async function loadModelInfo(){
|
| 459 |
+
try{
|
| 460 |
+
const r = await fetch(`${API}/models/info`);
|
| 461 |
+
if(!r.ok) throw new Error();
|
| 462 |
+
const d = await r.json();
|
| 463 |
+
const results = d.all_results || {};
|
| 464 |
+
const values = Object.values(results);
|
| 465 |
+
const max = values.length ? Math.max(...values) : null;
|
| 466 |
+
let html = '';
|
| 467 |
+
for(const [name, acc] of Object.entries(results)){
|
| 468 |
+
const pct = (acc*100).toFixed(2);
|
| 469 |
+
const isB = acc === max;
|
| 470 |
+
html += `<div class="compare-row">
|
| 471 |
+
<div class="compare-name">${name}${isB ? ' <span class="tag green">best</span>' : ''}</div>
|
| 472 |
+
<div class="compare-bar-wrap"><div class="compare-bar" style="width:${pct}%;background:${isB?'var(--green)':'var(--accent)'}"></div></div>
|
| 473 |
+
<div class="compare-val">${pct}%</div>
|
| 474 |
+
</div>`;
|
| 475 |
+
}
|
| 476 |
+
document.getElementById('model-compare').innerHTML = html || '<div style="color:var(--text3);font-size:12px">No data</div>';
|
| 477 |
+
} catch(e){
|
| 478 |
+
document.getElementById('model-compare').innerHTML = '<div style="color:var(--text3);font-size:12px">Run train.py to generate model info.</div>';
|
| 479 |
+
}
|
| 480 |
+
}
|
| 481 |
+
|
| 482 |
+
async function loadBenchmark(){
|
| 483 |
+
try{
|
| 484 |
+
const r = await fetch(`${API}/benchmark`);
|
| 485 |
+
if(!r.ok) throw new Error();
|
| 486 |
+
const d = await r.json();
|
| 487 |
+
let html = '<table class="model-table"><thead><tr><th>Format</th><th>Accuracy</th><th>Latency</th><th>Size</th></tr></thead><tbody>';
|
| 488 |
+
for(const [name, v] of Object.entries(d)){
|
| 489 |
+
const acc = v.accuracy !== undefined ? (v.accuracy*100).toFixed(2)+'%' : '—';
|
| 490 |
+
const lat = v.latency_ms !== undefined ? v.latency_ms.toFixed(1)+' ms' : '—';
|
| 491 |
+
const size = v.size_mb !== null && v.size_mb !== undefined ? v.size_mb.toFixed(1)+' MB' : '—';
|
| 492 |
+
html += `<tr><td>${name}</td><td>${acc}</td><td>${lat}</td><td>${size}</td></tr>`;
|
| 493 |
+
}
|
| 494 |
+
html += '</tbody></table>';
|
| 495 |
+
document.getElementById('benchmark-data').innerHTML = html;
|
| 496 |
+
} catch(e){
|
| 497 |
+
document.getElementById('benchmark-data').innerHTML = '<div style="color:var(--text3);font-size:12px">Run export_onnx.py to generate benchmark data.</div>';
|
| 498 |
+
}
|
| 499 |
+
}
|
| 500 |
+
|
| 501 |
+
const fileInput = document.getElementById('file-input');
|
| 502 |
+
const uploadZone = document.getElementById('upload-zone');
|
| 503 |
+
|
| 504 |
+
fileInput.addEventListener('change', e => {
|
| 505 |
+
if(e.target.files[0]) handleFile(e.target.files[0]);
|
| 506 |
+
});
|
| 507 |
+
uploadZone.addEventListener('dragover', e => { e.preventDefault(); uploadZone.classList.add('drag'); });
|
| 508 |
+
uploadZone.addEventListener('dragleave', () => uploadZone.classList.remove('drag'));
|
| 509 |
+
uploadZone.addEventListener('drop', e => {
|
| 510 |
+
e.preventDefault();
|
| 511 |
+
uploadZone.classList.remove('drag');
|
| 512 |
+
if(e.dataTransfer.files[0]) handleFile(e.dataTransfer.files[0]);
|
| 513 |
+
});
|
| 514 |
+
|
| 515 |
+
function handleFile(file){
|
| 516 |
+
selectedFile = file;
|
| 517 |
+
const reader = new FileReader();
|
| 518 |
+
reader.onload = ev => {
|
| 519 |
+
document.getElementById('preview-img').src = ev.target.result;
|
| 520 |
+
document.getElementById('img-filename').textContent = file.name;
|
| 521 |
+
document.getElementById('preview-area').style.display = 'flex';
|
| 522 |
+
document.getElementById('analyse-btn').disabled = false;
|
| 523 |
+
document.getElementById('btn-label').textContent = 'Analyse MRI';
|
| 524 |
+
};
|
| 525 |
+
reader.readAsDataURL(file);
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
function selectBackend(btn){
|
| 529 |
+
if (btn.classList.contains('disabled')) return;
|
| 530 |
+
document.querySelectorAll('.backend-btn').forEach(b => b.classList.remove('active'));
|
| 531 |
+
btn.classList.add('active');
|
| 532 |
+
selectedBackend = btn.dataset.backend;
|
| 533 |
+
updateGradcamUI();
|
| 534 |
+
}
|
| 535 |
+
|
| 536 |
+
function updateGradcamUI(){
|
| 537 |
+
const isTF = selectedBackend === 'tensorflow';
|
| 538 |
+
const toggle = document.getElementById('gradcam-toggle');
|
| 539 |
+
const track = document.getElementById('toggle-track');
|
| 540 |
+
const thumb = document.getElementById('toggle-thumb');
|
| 541 |
+
const note = document.getElementById('gradcam-note');
|
| 542 |
+
note.style.opacity = isTF ? '0' : '1';
|
| 543 |
+
if(!isTF){ toggle.checked = false; }
|
| 544 |
+
toggle.disabled = !isTF;
|
| 545 |
+
const on = toggle.checked && isTF;
|
| 546 |
+
track.style.background = on ? 'var(--accent2)' : 'var(--bg4)';
|
| 547 |
+
thumb.style.transform = on ? 'translateX(16px)' : 'none';
|
| 548 |
+
thumb.style.background = on ? '#000' : 'var(--text3)';
|
| 549 |
+
}
|
| 550 |
+
|
| 551 |
+
document.getElementById('gradcam-toggle').addEventListener('change', updateGradcamUI);
|
| 552 |
+
|
| 553 |
+
async function analyse(){
|
| 554 |
+
if(!selectedFile) return;
|
| 555 |
+
|
| 556 |
+
const btn = document.getElementById('analyse-btn');
|
| 557 |
+
const spinner = document.getElementById('spinner');
|
| 558 |
+
const label = document.getElementById('btn-label');
|
| 559 |
+
btn.disabled = true;
|
| 560 |
+
spinner.style.display = 'block';
|
| 561 |
+
label.textContent = 'Analysing...';
|
| 562 |
+
|
| 563 |
+
const useGradcam = document.getElementById('gradcam-toggle').checked && selectedBackend === 'tensorflow';
|
| 564 |
+
|
| 565 |
+
const formData = new FormData();
|
| 566 |
+
formData.append('file', selectedFile);
|
| 567 |
+
|
| 568 |
+
const endpoint = useGradcam
|
| 569 |
+
? `${API}/predict/gradcam`
|
| 570 |
+
: selectedBackend === 'tensorflow' ? `${API}/predict`
|
| 571 |
+
: selectedBackend === 'onnx_fp32' ? `${API}/predict/onnx`
|
| 572 |
+
: selectedBackend === 'onnx_dynamic' ? `${API}/predict/dynamic`
|
| 573 |
+
: `${API}/predict/static`;
|
| 574 |
+
|
| 575 |
+
try{
|
| 576 |
+
const r = await fetch(endpoint, { method:'POST', body: formData });
|
| 577 |
+
if(!r.ok){
|
| 578 |
+
let message = 'API error';
|
| 579 |
+
try{
|
| 580 |
+
const err = await r.json();
|
| 581 |
+
message = err.detail || message;
|
| 582 |
+
} catch(_) {}
|
| 583 |
+
throw new Error(message);
|
| 584 |
+
}
|
| 585 |
+
const data = await r.json();
|
| 586 |
+
renderResults(data, useGradcam);
|
| 587 |
+
showToast('Analysis complete', 'success');
|
| 588 |
+
} catch(e){
|
| 589 |
+
showToast(`Error: ${e.message}`, 'error');
|
| 590 |
+
} finally{
|
| 591 |
+
btn.disabled = false;
|
| 592 |
+
spinner.style.display = 'none';
|
| 593 |
+
label.textContent = 'Analyse MRI';
|
| 594 |
+
}
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
function renderResults(data, hasGradcam){
|
| 598 |
+
const cls = data.predicted_class.toLowerCase().replace(' ','');
|
| 599 |
+
const conf = data.confidence;
|
| 600 |
+
const confPct = `${conf.toFixed(1)}%`;
|
| 601 |
+
|
| 602 |
+
document.getElementById('placeholder').style.display = 'none';
|
| 603 |
+
document.getElementById('results-panel').style.display = 'block';
|
| 604 |
+
document.getElementById('results-panel').classList.add('fade-in');
|
| 605 |
+
|
| 606 |
+
const hero = document.getElementById('pred-hero');
|
| 607 |
+
hero.className = `prediction-hero card ${cls}`;
|
| 608 |
+
|
| 609 |
+
const classText = document.getElementById('pred-class-text');
|
| 610 |
+
classText.className = `pred-class ${cls}`;
|
| 611 |
+
classText.textContent = data.predicted_class.toUpperCase();
|
| 612 |
+
|
| 613 |
+
document.getElementById('pred-conf-text').textContent =
|
| 614 |
+
`Confidence: ${confPct} · via ${data.backend}`;
|
| 615 |
+
|
| 616 |
+
const badge = document.getElementById('pred-badge');
|
| 617 |
+
const level = conf >= 80 ? 'high' : conf >= 50 ? 'mid' : 'low';
|
| 618 |
+
badge.className = `pred-badge ${level}`;
|
| 619 |
+
badge.textContent = conf >= 80 ? 'High confidence' : conf >= 50 ? 'Moderate' : 'Low confidence';
|
| 620 |
+
|
| 621 |
+
const probs = data.all_probabilities;
|
| 622 |
+
let barsHtml = '';
|
| 623 |
+
for(const [name, p] of Object.entries(probs).sort((a,b) => b[1]-a[1])){
|
| 624 |
+
const clsKey = name.toLowerCase().replace(' ','');
|
| 625 |
+
const pct = p.toFixed(1);
|
| 626 |
+
barsHtml += `<div class="prob-row">
|
| 627 |
+
<div class="prob-header">
|
| 628 |
+
<span class="prob-name">${name}</span>
|
| 629 |
+
<span class="prob-val">${pct}%</span>
|
| 630 |
+
</div>
|
| 631 |
+
<div class="prob-track">
|
| 632 |
+
<div class="prob-fill ${clsKey}" style="width:${p}%"></div>
|
| 633 |
+
</div>
|
| 634 |
+
</div>`;
|
| 635 |
+
}
|
| 636 |
+
document.getElementById('prob-bars').innerHTML = barsHtml;
|
| 637 |
+
|
| 638 |
+
document.getElementById('stat-conf').textContent = confPct;
|
| 639 |
+
document.getElementById('stat-latency').textContent = data.latency_ms ? `${data.latency_ms}ms` : '—';
|
| 640 |
+
const backendShort = {
|
| 641 |
+
tensorflow:'TF FP32',
|
| 642 |
+
onnx_fp32:'ONNX FP32',
|
| 643 |
+
onnx_dynamic:'Dynamic INT8',
|
| 644 |
+
onnx_static:'Static INT8'
|
| 645 |
+
}[data.backend] || data.backend;
|
| 646 |
+
document.getElementById('stat-backend').textContent = backendShort;
|
| 647 |
+
|
| 648 |
+
const gcCard = document.getElementById('gradcam-card');
|
| 649 |
+
if(hasGradcam && data.gradcam_b64){
|
| 650 |
+
document.getElementById('gradcam-overlay-img').src = `data:image/png;base64,${data.gradcam_b64}`;
|
| 651 |
+
document.getElementById('gradcam-heatmap-img').src = `data:image/png;base64,${data.heatmap_b64}`;
|
| 652 |
+
gcCard.style.display = 'block';
|
| 653 |
+
} else {
|
| 654 |
+
gcCard.style.display = 'none';
|
| 655 |
+
}
|
| 656 |
+
}
|
| 657 |
+
|
| 658 |
+
function switchTab(tab, panelId){
|
| 659 |
+
const parent = tab.closest('.card');
|
| 660 |
+
parent.querySelectorAll('.tab').forEach(t => t.classList.remove('active'));
|
| 661 |
+
parent.querySelectorAll('.tab-panel').forEach(p => p.classList.remove('active'));
|
| 662 |
+
tab.classList.add('active');
|
| 663 |
+
document.getElementById(panelId).classList.add('active');
|
| 664 |
+
}
|
| 665 |
+
|
| 666 |
+
function showToast(msg, type='success'){
|
| 667 |
+
const t = document.getElementById('toast');
|
| 668 |
+
t.textContent = msg;
|
| 669 |
+
t.className = `show ${type}`;
|
| 670 |
+
setTimeout(() => t.className = '', 3500);
|
| 671 |
+
}
|
| 672 |
+
|
| 673 |
+
checkHealth();
|
| 674 |
+
loadModelInfo();
|
| 675 |
+
loadBenchmark();
|
| 676 |
+
updateGradcamUI();
|
| 677 |
+
</script>
|
| 678 |
+
</body>
|
| 679 |
+
</html>
|
train.py
ADDED
|
@@ -0,0 +1,406 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# train.py — training loop with MLflow + DagsHub tracking
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import numpy as np
|
| 5 |
+
import tensorflow as tf
|
| 6 |
+
import mlflow
|
| 7 |
+
import mlflow.keras
|
| 8 |
+
from mlflow.models.signature import infer_signature
|
| 9 |
+
from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau, ModelCheckpoint
|
| 10 |
+
from tensorflow.keras import optimizers
|
| 11 |
+
|
| 12 |
+
from src.utils import get_logger, load_config, plot_history, plot_comparison
|
| 13 |
+
from src.data_loader import get_data_generators
|
| 14 |
+
from src.models import (build_baseline_cnn, build_mobilenet_tl, build_mobilenet_finetuned,
|
| 15 |
+
build_efficientnet, build_optuna_cnn, compile_model)
|
| 16 |
+
import optuna
|
| 17 |
+
|
| 18 |
+
logger = get_logger("train")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# ---------------------------------------------------------------------------
|
| 22 |
+
# MLflow setup
|
| 23 |
+
# ---------------------------------------------------------------------------
|
| 24 |
+
|
| 25 |
+
def setup_mlflow(cfg: dict):
|
| 26 |
+
ml = cfg["mlflow"]
|
| 27 |
+
os.environ["MLFLOW_TRACKING_USERNAME"] = ml["dagshub_username"]
|
| 28 |
+
os.environ["MLFLOW_TRACKING_PASSWORD"] = ml["dagshub_token"]
|
| 29 |
+
uri = f"https://dagshub.com/{ml['dagshub_username']}/{ml['dagshub_repo']}.mlflow"
|
| 30 |
+
mlflow.set_tracking_uri(uri)
|
| 31 |
+
mlflow.set_experiment(ml["experiment_name"])
|
| 32 |
+
logger.info(f"MLflow → DagsHub: {uri}")
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def log_epoch_metrics(history):
|
| 36 |
+
"""Log per-epoch metrics to the active MLflow run."""
|
| 37 |
+
for epoch, (ta, va, tl, vl) in enumerate(zip(
|
| 38 |
+
history.history["accuracy"], history.history["val_accuracy"],
|
| 39 |
+
history.history["loss"], history.history["val_loss"],
|
| 40 |
+
)):
|
| 41 |
+
mlflow.log_metrics({
|
| 42 |
+
"train_accuracy" : float(ta),
|
| 43 |
+
"val_accuracy" : float(va),
|
| 44 |
+
"train_loss" : float(tl),
|
| 45 |
+
"val_loss" : float(vl),
|
| 46 |
+
}, step=epoch)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def get_callbacks(cfg: dict, checkpoint_path: str):
|
| 50 |
+
"""
|
| 51 |
+
Standard callbacks for all models.
|
| 52 |
+
save_format='h5' is required to avoid EagerTensor JSON
|
| 53 |
+
serialization crash with EfficientNet in TF 2.10.
|
| 54 |
+
"""
|
| 55 |
+
t = cfg["training"]
|
| 56 |
+
return [
|
| 57 |
+
EarlyStopping(
|
| 58 |
+
monitor = "val_accuracy",
|
| 59 |
+
patience = t["early_stopping_patience"],
|
| 60 |
+
restore_best_weights= True,
|
| 61 |
+
verbose = 1,
|
| 62 |
+
),
|
| 63 |
+
ReduceLROnPlateau(
|
| 64 |
+
monitor = "val_loss",
|
| 65 |
+
factor = t["reduce_lr_factor"],
|
| 66 |
+
patience = t["reduce_lr_patience"],
|
| 67 |
+
min_lr = t["min_lr"],
|
| 68 |
+
verbose = 1,
|
| 69 |
+
),
|
| 70 |
+
ModelCheckpoint(
|
| 71 |
+
filepath = checkpoint_path,
|
| 72 |
+
monitor = "val_accuracy",
|
| 73 |
+
save_best_only = True,
|
| 74 |
+
save_format = "h5", # ← fixes EagerTensor crash
|
| 75 |
+
verbose = 0,
|
| 76 |
+
),
|
| 77 |
+
]
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# ---------------------------------------------------------------------------
|
| 81 |
+
# Training functions
|
| 82 |
+
# ---------------------------------------------------------------------------
|
| 83 |
+
|
| 84 |
+
def train_baseline(cfg, train_data, val_data):
|
| 85 |
+
image_size = tuple(cfg["data"]["image_size"])
|
| 86 |
+
epochs = cfg["training"]["epochs"]
|
| 87 |
+
save_dir = cfg["models"]["save_dir"]
|
| 88 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 89 |
+
|
| 90 |
+
model = compile_model(build_baseline_cnn(image_size=image_size))
|
| 91 |
+
|
| 92 |
+
with mlflow.start_run(run_name="Baseline_CNN"):
|
| 93 |
+
mlflow.log_params({
|
| 94 |
+
"model_type" : "Baseline CNN",
|
| 95 |
+
"filters" : "32-64-128",
|
| 96 |
+
"optimizer" : "adam",
|
| 97 |
+
"lr" : 0.001,
|
| 98 |
+
"epochs" : epochs,
|
| 99 |
+
"batch_size" : cfg["data"]["batch_size"],
|
| 100 |
+
})
|
| 101 |
+
history = model.fit(
|
| 102 |
+
train_data, epochs=epochs, validation_data=val_data,
|
| 103 |
+
callbacks=get_callbacks(cfg, f"{save_dir}/baseline_best.h5")
|
| 104 |
+
)
|
| 105 |
+
log_epoch_metrics(history)
|
| 106 |
+
mlflow.log_metrics({
|
| 107 |
+
"best_val_accuracy" : float(max(history.history["val_accuracy"])),
|
| 108 |
+
"best_val_loss" : float(min(history.history["val_loss"])),
|
| 109 |
+
})
|
| 110 |
+
mlflow.keras.log_model(model, "baseline_cnn")
|
| 111 |
+
logger.info("Baseline CNN training complete.")
|
| 112 |
+
return model, history
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def train_transfer_learning(cfg, train_data, val_data):
|
| 116 |
+
image_size = tuple(cfg["data"]["image_size"])
|
| 117 |
+
epochs = cfg["training"]["epochs"]
|
| 118 |
+
save_dir = cfg["models"]["save_dir"]
|
| 119 |
+
|
| 120 |
+
model = compile_model(build_mobilenet_tl(image_size=image_size))
|
| 121 |
+
|
| 122 |
+
with mlflow.start_run(run_name="Transfer_Learning_MobileNetV2"):
|
| 123 |
+
mlflow.log_params({
|
| 124 |
+
"model_type" : "MobileNetV2 TL",
|
| 125 |
+
"base_frozen" : True,
|
| 126 |
+
"optimizer" : "adam",
|
| 127 |
+
"lr" : 0.001,
|
| 128 |
+
"epochs" : epochs,
|
| 129 |
+
})
|
| 130 |
+
history = model.fit(
|
| 131 |
+
train_data, epochs=epochs, validation_data=val_data,
|
| 132 |
+
callbacks=get_callbacks(cfg, f"{save_dir}/tl_best.h5")
|
| 133 |
+
)
|
| 134 |
+
log_epoch_metrics(history)
|
| 135 |
+
mlflow.log_metrics({
|
| 136 |
+
"best_val_accuracy" : float(max(history.history["val_accuracy"])),
|
| 137 |
+
"best_val_loss" : float(min(history.history["val_loss"])),
|
| 138 |
+
})
|
| 139 |
+
mlflow.keras.log_model(model, "transfer_learning")
|
| 140 |
+
logger.info("Transfer Learning training complete.")
|
| 141 |
+
return model, history
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
def train_finetuned(cfg, tl_model, train_data, val_data):
|
| 145 |
+
epochs = cfg["training"]["epochs"]
|
| 146 |
+
save_dir = cfg["models"]["save_dir"]
|
| 147 |
+
|
| 148 |
+
build_mobilenet_finetuned(tl_model.layers[0], unfreeze_last=20)
|
| 149 |
+
compile_model(tl_model, lr=1e-5)
|
| 150 |
+
|
| 151 |
+
with mlflow.start_run(run_name="Fine_Tuned_MobileNetV2"):
|
| 152 |
+
mlflow.log_params({
|
| 153 |
+
"model_type" : "MobileNetV2 Fine-Tuned",
|
| 154 |
+
"unfrozen_layers" : 20,
|
| 155 |
+
"lr" : 1e-5,
|
| 156 |
+
"epochs" : epochs,
|
| 157 |
+
})
|
| 158 |
+
history = tl_model.fit(
|
| 159 |
+
train_data, epochs=epochs, validation_data=val_data,
|
| 160 |
+
callbacks=get_callbacks(cfg, f"{save_dir}/ft_best.h5")
|
| 161 |
+
)
|
| 162 |
+
log_epoch_metrics(history)
|
| 163 |
+
mlflow.log_metrics({
|
| 164 |
+
"best_val_accuracy" : float(max(history.history["val_accuracy"])),
|
| 165 |
+
"best_val_loss" : float(min(history.history["val_loss"])),
|
| 166 |
+
})
|
| 167 |
+
mlflow.keras.log_model(tl_model, "fine_tuned")
|
| 168 |
+
logger.info("Fine-Tuned training complete.")
|
| 169 |
+
return tl_model, history
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def train_efficientnet(cfg, train_data, val_data):
|
| 173 |
+
"""
|
| 174 |
+
EfficientNetB0 in TF 2.10 crashes ModelCheckpoint because its internal
|
| 175 |
+
rescaling layer stores weights as EagerTensors which cannot be JSON-serialized
|
| 176 |
+
during checkpoint saving.
|
| 177 |
+
|
| 178 |
+
Fix: use a custom callback that calls model.save_weights() instead of
|
| 179 |
+
model.save() — weights-only saving never touches the model config JSON,
|
| 180 |
+
so EagerTensors are never serialized.
|
| 181 |
+
"""
|
| 182 |
+
image_size = tuple(cfg["data"]["image_size"])
|
| 183 |
+
epochs = cfg["training"]["epochs"]
|
| 184 |
+
save_dir = cfg["models"]["save_dir"]
|
| 185 |
+
t = cfg["training"]
|
| 186 |
+
|
| 187 |
+
model = build_efficientnet(image_size=image_size)
|
| 188 |
+
model.compile(
|
| 189 |
+
optimizer = optimizers.Adam(learning_rate=0.001),
|
| 190 |
+
loss = "categorical_crossentropy",
|
| 191 |
+
metrics = ["accuracy"],
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
# ── Custom checkpoint: saves weights only (no JSON config serialization) ──
|
| 195 |
+
weights_path = os.path.join(save_dir, "effnet_best_weights.h5")
|
| 196 |
+
best_val_acc = [0.0] # mutable container so inner class can write to it
|
| 197 |
+
|
| 198 |
+
class WeightsCheckpoint(tf.keras.callbacks.Callback):
|
| 199 |
+
def on_epoch_end(self, epoch, logs=None):
|
| 200 |
+
va = float(logs.get("val_accuracy", 0.0))
|
| 201 |
+
if va > best_val_acc[0]:
|
| 202 |
+
best_val_acc[0] = va
|
| 203 |
+
self.model.save_weights(weights_path)
|
| 204 |
+
logger.info(f" EfficientNet weights saved (val_acc={va:.4f})")
|
| 205 |
+
|
| 206 |
+
with mlflow.start_run(run_name="EfficientNetB0_TL"):
|
| 207 |
+
mlflow.log_params({
|
| 208 |
+
"model_type" : "EfficientNetB0",
|
| 209 |
+
"base_frozen" : True,
|
| 210 |
+
"lr" : 0.001,
|
| 211 |
+
"epochs" : epochs,
|
| 212 |
+
})
|
| 213 |
+
|
| 214 |
+
history = model.fit(
|
| 215 |
+
train_data,
|
| 216 |
+
epochs = epochs,
|
| 217 |
+
validation_data = val_data,
|
| 218 |
+
callbacks = [
|
| 219 |
+
EarlyStopping(
|
| 220 |
+
monitor = "val_accuracy",
|
| 221 |
+
patience = t["early_stopping_patience"],
|
| 222 |
+
restore_best_weights = True,
|
| 223 |
+
verbose = 1,
|
| 224 |
+
),
|
| 225 |
+
ReduceLROnPlateau(
|
| 226 |
+
monitor = "val_loss",
|
| 227 |
+
factor = t["reduce_lr_factor"],
|
| 228 |
+
patience = t["reduce_lr_patience"],
|
| 229 |
+
min_lr = t["min_lr"],
|
| 230 |
+
verbose = 1,
|
| 231 |
+
),
|
| 232 |
+
WeightsCheckpoint(), # ← replaces ModelCheckpoint entirely
|
| 233 |
+
],
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
log_epoch_metrics(history)
|
| 237 |
+
mlflow.log_metrics({
|
| 238 |
+
"best_val_accuracy" : float(max(history.history["val_accuracy"])),
|
| 239 |
+
"best_val_loss" : float(min(history.history["val_loss"])),
|
| 240 |
+
})
|
| 241 |
+
|
| 242 |
+
# Restore best weights
|
| 243 |
+
if os.path.exists(weights_path):
|
| 244 |
+
model.load_weights(weights_path)
|
| 245 |
+
logger.info("Best EfficientNet weights restored.")
|
| 246 |
+
|
| 247 |
+
# mlflow.keras.log_model crashes for EfficientNetB0 in TF 2.10 because
|
| 248 |
+
# mlflow internally calls model.save() which hits the same EagerTensor
|
| 249 |
+
# JSON bug. Workaround: save weights manually and log as artifact.
|
| 250 |
+
final_weights_path = os.path.join(save_dir, "effnet_final_weights.h5")
|
| 251 |
+
model.save_weights(final_weights_path)
|
| 252 |
+
mlflow.log_artifact(final_weights_path, artifact_path="efficientnet_weights")
|
| 253 |
+
mlflow.set_tag("efficientnet_note",
|
| 254 |
+
"Logged as weights-only artifact due to TF2.10 EagerTensor bug")
|
| 255 |
+
logger.info(f"EfficientNet weights logged to MLflow → {final_weights_path}")
|
| 256 |
+
logger.info("EfficientNetB0 training complete.")
|
| 257 |
+
return model, history
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
# ---------------------------------------------------------------------------
|
| 261 |
+
# Optuna hyperparameter search
|
| 262 |
+
# ---------------------------------------------------------------------------
|
| 263 |
+
|
| 264 |
+
def run_optuna(cfg, train_data, val_data):
|
| 265 |
+
image_size = tuple(cfg["data"]["image_size"])
|
| 266 |
+
n_trials = cfg["optuna"]["n_trials"]
|
| 267 |
+
|
| 268 |
+
def objective(trial):
|
| 269 |
+
params = {
|
| 270 |
+
"filters_1" : trial.suggest_categorical("filters_1", [32, 64]),
|
| 271 |
+
"filters_2" : trial.suggest_categorical("filters_2", [64, 128]),
|
| 272 |
+
"filters_3" : trial.suggest_categorical("filters_3", [128, 256]),
|
| 273 |
+
"dense_units" : trial.suggest_categorical("dense_units", [64, 128, 256]),
|
| 274 |
+
"dropout" : trial.suggest_float("dropout", 0.2, 0.5),
|
| 275 |
+
"lr" : trial.suggest_float("lr", 1e-4, 1e-2, log=True),
|
| 276 |
+
}
|
| 277 |
+
|
| 278 |
+
with mlflow.start_run(run_name=f"trial_{trial.number:02d}", nested=True):
|
| 279 |
+
mlflow.log_params(params)
|
| 280 |
+
|
| 281 |
+
m = compile_model(
|
| 282 |
+
build_optuna_cnn(params, image_size=image_size),
|
| 283 |
+
lr=params["lr"]
|
| 284 |
+
)
|
| 285 |
+
h = m.fit(
|
| 286 |
+
train_data,
|
| 287 |
+
epochs = 10,
|
| 288 |
+
validation_data = val_data,
|
| 289 |
+
callbacks = [EarlyStopping(monitor="val_accuracy",
|
| 290 |
+
patience=3,
|
| 291 |
+
restore_best_weights=True)],
|
| 292 |
+
verbose = 0,
|
| 293 |
+
)
|
| 294 |
+
|
| 295 |
+
best_val = float(max(h.history["val_accuracy"]))
|
| 296 |
+
mlflow.log_metric("best_val_accuracy", best_val)
|
| 297 |
+
|
| 298 |
+
for ep, (ta, va, tl, vl) in enumerate(zip(
|
| 299 |
+
h.history["accuracy"], h.history["val_accuracy"],
|
| 300 |
+
h.history["loss"], h.history["val_loss"],
|
| 301 |
+
)):
|
| 302 |
+
mlflow.log_metrics({
|
| 303 |
+
"train_accuracy" : float(ta),
|
| 304 |
+
"val_accuracy" : float(va),
|
| 305 |
+
"train_loss" : float(tl),
|
| 306 |
+
"val_loss" : float(vl),
|
| 307 |
+
}, step=ep)
|
| 308 |
+
|
| 309 |
+
return best_val
|
| 310 |
+
|
| 311 |
+
optuna.logging.set_verbosity(optuna.logging.WARNING)
|
| 312 |
+
study = optuna.create_study(direction="maximize")
|
| 313 |
+
|
| 314 |
+
with mlflow.start_run(run_name="Optuna_Search_Parent"):
|
| 315 |
+
mlflow.log_params({
|
| 316 |
+
"n_trials" : n_trials,
|
| 317 |
+
"direction" : "maximize",
|
| 318 |
+
"objective" : "val_accuracy",
|
| 319 |
+
})
|
| 320 |
+
study.optimize(objective, n_trials=n_trials)
|
| 321 |
+
mlflow.log_metric("best_val_accuracy", float(study.best_value))
|
| 322 |
+
mlflow.log_metric("best_trial_number", study.best_trial.number)
|
| 323 |
+
mlflow.log_params({f"best_{k}": v for k, v in study.best_params.items()})
|
| 324 |
+
|
| 325 |
+
logger.info(f"Optuna best val_accuracy : {study.best_value:.4f}")
|
| 326 |
+
logger.info(f"Optuna best params : {study.best_params}")
|
| 327 |
+
return study
|
| 328 |
+
|
| 329 |
+
|
| 330 |
+
def train_optuna_best(cfg, study, train_data, val_data):
|
| 331 |
+
image_size = tuple(cfg["data"]["image_size"])
|
| 332 |
+
epochs = cfg["training"]["epochs"]
|
| 333 |
+
save_dir = cfg["models"]["save_dir"]
|
| 334 |
+
p = study.best_params
|
| 335 |
+
|
| 336 |
+
model = compile_model(
|
| 337 |
+
build_optuna_cnn(p, image_size=image_size),
|
| 338 |
+
lr=p["lr"]
|
| 339 |
+
)
|
| 340 |
+
|
| 341 |
+
with mlflow.start_run(run_name="Optuna_Best_CNN_Final") as run:
|
| 342 |
+
mlflow.log_params({
|
| 343 |
+
**p,
|
| 344 |
+
"model_type" : "Optuna Best CNN",
|
| 345 |
+
"epochs" : epochs,
|
| 346 |
+
})
|
| 347 |
+
history = model.fit(
|
| 348 |
+
train_data, epochs=epochs, validation_data=val_data,
|
| 349 |
+
callbacks=get_callbacks(cfg, f"{save_dir}/optuna_best.h5")
|
| 350 |
+
)
|
| 351 |
+
log_epoch_metrics(history)
|
| 352 |
+
mlflow.log_metrics({
|
| 353 |
+
"best_val_accuracy" : float(max(history.history["val_accuracy"])),
|
| 354 |
+
"best_val_loss" : float(min(history.history["val_loss"])),
|
| 355 |
+
})
|
| 356 |
+
sample = train_data[0][0][:1]
|
| 357 |
+
signature = infer_signature(sample, model.predict(sample))
|
| 358 |
+
mlflow.keras.log_model(model, "optuna_best_cnn", signature=signature)
|
| 359 |
+
best_run_id = run.info.run_id
|
| 360 |
+
|
| 361 |
+
logger.info("Optuna Best CNN training complete.")
|
| 362 |
+
return model, history, best_run_id
|
| 363 |
+
|
| 364 |
+
|
| 365 |
+
# ---------------------------------------------------------------------------
|
| 366 |
+
# Main
|
| 367 |
+
# ---------------------------------------------------------------------------
|
| 368 |
+
|
| 369 |
+
if __name__ == "__main__":
|
| 370 |
+
cfg = load_config("config.yaml")
|
| 371 |
+
setup_mlflow(cfg)
|
| 372 |
+
|
| 373 |
+
train_data, val_data, test_data = get_data_generators(cfg)
|
| 374 |
+
|
| 375 |
+
logger.info("=" * 50)
|
| 376 |
+
logger.info("Starting full training pipeline")
|
| 377 |
+
logger.info("=" * 50)
|
| 378 |
+
|
| 379 |
+
baseline_model, history_base = train_baseline(cfg, train_data, val_data)
|
| 380 |
+
tl_model, history_tl = train_transfer_learning(cfg, train_data, val_data)
|
| 381 |
+
ft_model, history_ft = train_finetuned(cfg, tl_model, train_data, val_data)
|
| 382 |
+
eff_model, history_eff = train_efficientnet(cfg, train_data, val_data)
|
| 383 |
+
study = run_optuna(cfg, train_data, val_data)
|
| 384 |
+
optuna_model, history_optuna, best_run_id = train_optuna_best(
|
| 385 |
+
cfg, study, train_data, val_data
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
results = {
|
| 389 |
+
"Baseline CNN" : float(max(history_base.history["val_accuracy"])),
|
| 390 |
+
"Transfer Learning" : float(max(history_tl.history["val_accuracy"])),
|
| 391 |
+
"Fine-Tuned" : float(max(history_ft.history["val_accuracy"])),
|
| 392 |
+
"EfficientNetB0" : float(max(history_eff.history["val_accuracy"])),
|
| 393 |
+
"Optuna Best CNN" : float(max(history_optuna.history["val_accuracy"])),
|
| 394 |
+
}
|
| 395 |
+
|
| 396 |
+
logger.info("\n" + "=" * 45)
|
| 397 |
+
logger.info(f"{'Model':<25} {'Val Acc':>10}")
|
| 398 |
+
logger.info("=" * 45)
|
| 399 |
+
for name, acc in results.items():
|
| 400 |
+
mark = " << best" if acc == max(results.values()) else ""
|
| 401 |
+
logger.info(f"{name:<25} {acc:>10.4f}{mark}")
|
| 402 |
+
logger.info("=" * 45)
|
| 403 |
+
|
| 404 |
+
os.makedirs("./logs", exist_ok=True)
|
| 405 |
+
plot_comparison(results, save_path="./logs/model_comparison.png")
|
| 406 |
+
logger.info("Training pipeline complete.")
|