Refactor email content generation and update email sending logic
Browse files- Simplified OTP and password reset email content generation by removing HTML rendering and returning plain text only.
- Updated the `_send_email` function to send only plain text emails.
- Adjusted the registration, login, and password reset flows to use the new email content functions.
- Added a new `report_payload` column to the `ScreeningReport` model for storing additional report data.
- Introduced Celery tasks for asynchronous DICOM processing and batch handling, including progress tracking via Redis.
- Enhanced error handling in user loading and added logging for SQLAlchemy errors.
- Updated various templates to reflect the new branding as "AI Medical Intelligence Pipeline" and improved UI elements.
- Added ground truth update functionality in the detail view.
- Implemented LLM information display in the report detail view.
Co-authored-by: Copilot <copilot@github.com>
- .env.example +9 -0
- README.md +3 -3
- app_new.py +565 -225
- auth_routes.py +18 -83
- auth_utils.py +17 -1
- models.py +1 -0
- requirements.txt +5 -0
- run_interface.py +5 -0
- static/js/batch.js +11 -0
- tasks.py +260 -0
- templates/404.html +2 -2
- templates/500.html +2 -2
- templates/about.html +4 -4
- templates/auth/login.html +5 -5
- templates/auth/reset_password.html +2 -2
- templates/auth/verify_otp.html +3 -3
- templates/base.html +6 -4
- templates/batch_progress.html +6 -1
- templates/detail.html +29 -0
- templates/evaluation.html +30 -1
- templates/home.html +5 -3
- templates/logs.html +1 -1
- templates/reports.html +1 -1
- templates/upload.html +1 -1
|
@@ -57,6 +57,15 @@ ICH_PUBLIC_BASE_URL=
|
|
| 57 |
# Optional local debugging for auth emails (prints OTP/reset link to server logs)
|
| 58 |
ICH_DEBUG_AUTH_EMAILS=false
|
| 59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 61 |
# LOGGING & MONITORING
|
| 62 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
| 57 |
# Optional local debugging for auth emails (prints OTP/reset link to server logs)
|
| 58 |
ICH_DEBUG_AUTH_EMAILS=false
|
| 59 |
|
| 60 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 61 |
+
# CELERY + REDIS (ASYNC TASK QUEUE FOR BATCH PROCESSING)
|
| 62 |
+
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 63 |
+
# Redis broker and result backend for Celery workers.
|
| 64 |
+
# Local dev: redis://localhost:6379/0
|
| 65 |
+
# Upstash (cloud): rediss://default:<auth_token>@<hostname>:<port>
|
| 66 |
+
# Leave empty to use default local Redis
|
| 67 |
+
REDIS_URL=
|
| 68 |
+
|
| 69 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 70 |
# LOGGING & MONITORING
|
| 71 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
@@ -1,6 +1,6 @@
|
|
| 1 |
-
#
|
| 2 |
|
| 3 |
-
AI
|
| 4 |
|
| 5 |
This project provides a Flask web interface for:
|
| 6 |
|
|
@@ -12,7 +12,7 @@ This project provides a Flask web interface for:
|
|
| 12 |
|
| 13 |
## Project Overview
|
| 14 |
|
| 15 |
-
Intracranial hemorrhage is a time-critical emergency finding in neuroimaging. This repository focuses on a practical
|
| 16 |
|
| 17 |
The system is built for decision support and triage assistance, not standalone diagnosis.
|
| 18 |
|
|
|
|
| 1 |
+
# AI Medical Intelligence Pipeline for CT Scan Analysis
|
| 2 |
|
| 3 |
+
AI medical intelligence pipeline for intracranial hemorrhage (ICH) analysis from head CT (DICOM) images.
|
| 4 |
|
| 5 |
This project provides a Flask web interface for:
|
| 6 |
|
|
|
|
| 12 |
|
| 13 |
## Project Overview
|
| 14 |
|
| 15 |
+
Intracranial hemorrhage is a time-critical emergency finding in neuroimaging. This repository focuses on a practical intelligence pipeline with explainability and structured report output.
|
| 16 |
|
| 17 |
The system is built for decision support and triage assistance, not standalone diagnosis.
|
| 18 |
|
|
@@ -25,12 +25,12 @@ import os
|
|
| 25 |
import shutil
|
| 26 |
import sys
|
| 27 |
import tempfile
|
| 28 |
-
import threading
|
| 29 |
import time
|
| 30 |
import uuid
|
| 31 |
import zipfile
|
| 32 |
import math
|
| 33 |
from dataclasses import dataclass
|
|
|
|
| 34 |
from pathlib import Path
|
| 35 |
from typing import Any
|
| 36 |
|
|
@@ -66,9 +66,12 @@ except Exception:
|
|
| 66 |
bbr = _NoopRecorder()
|
| 67 |
|
| 68 |
from flask import (
|
| 69 |
-
Flask, abort, flash, g, jsonify, redirect, render_template, request,
|
| 70 |
send_from_directory, url_for
|
| 71 |
)
|
|
|
|
|
|
|
|
|
|
| 72 |
from werkzeug.utils import secure_filename
|
| 73 |
from werkzeug.middleware.proxy_fix import ProxyFix
|
| 74 |
from flask_login import current_user, login_required
|
|
@@ -121,6 +124,7 @@ DATABASE_URL = os.environ.get("DATABASE_URL", "").strip()
|
|
| 121 |
HF_MODEL_REPO = os.environ.get("ICH_HF_MODEL_REPO", "").strip()
|
| 122 |
HF_TOKEN = os.environ.get("ICH_HF_TOKEN", "").strip()
|
| 123 |
LOCAL_MODE = _env_bool("ICH_LOCAL_MODE", True)
|
|
|
|
| 124 |
|
| 125 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 126 |
# FLASK APP SETUP
|
|
@@ -135,6 +139,10 @@ app.config.update(
|
|
| 135 |
SECRET_KEY=SECRET_KEY or os.urandom(32).hex(),
|
| 136 |
DEBUG=APP_DEBUG and os.environ.get("FLASK_ENV") == "development",
|
| 137 |
SQLALCHEMY_DATABASE_URI=DATABASE_URL or "sqlite:///ich_app.db",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
SQLALCHEMY_TRACK_MODIFICATIONS=False,
|
| 139 |
SESSION_COOKIE_SECURE=not APP_DEBUG,
|
| 140 |
SESSION_COOKIE_HTTPONLY=True,
|
|
@@ -150,6 +158,16 @@ init_security(app)
|
|
| 150 |
# Register blueprints
|
| 151 |
app.register_blueprint(auth_bp)
|
| 152 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 153 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 154 |
# LOGGING
|
| 155 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
@@ -194,9 +212,6 @@ _MODEL: dict[str, Any] = {
|
|
| 194 |
"inference_mod": None,
|
| 195 |
}
|
| 196 |
|
| 197 |
-
_BATCHES: dict[str, dict[str, Any]] = {}
|
| 198 |
-
_BATCHES_LOCK = threading.Lock()
|
| 199 |
-
|
| 200 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 201 |
# MODEL LOADING
|
| 202 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
@@ -305,14 +320,18 @@ def _ensure_model_loaded() -> bool:
|
|
| 305 |
# INFERENCE & BATCH PROCESSING
|
| 306 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 307 |
|
| 308 |
-
def _run_inference_on_dcm(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 309 |
"""Run inference on a single DICOM file"""
|
| 310 |
if not _ensure_model_loaded():
|
| 311 |
return None, None
|
| 312 |
|
| 313 |
ri_mod = _MODEL["inference_mod"]
|
| 314 |
image_id = dcm_path.stem
|
| 315 |
-
user_reports_dir = UserDataManager(
|
| 316 |
|
| 317 |
bbr.start()
|
| 318 |
|
|
@@ -338,26 +357,24 @@ def _run_inference_on_dcm(dcm_path: Path, user_id: int) -> tuple[dict[str, Any]
|
|
| 338 |
pred.setdefault("calibrated_probability", inference.get("cal_prob_any"))
|
| 339 |
pred.setdefault("decision_threshold", pred.get("decision_threshold_any"))
|
| 340 |
report["prediction"] = pred
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 341 |
|
| 342 |
report_path = user_reports_dir / f"{image_id}_report.json"
|
| 343 |
with open(report_path, "w") as f:
|
| 344 |
json.dump(report, f, indent=2)
|
| 345 |
|
| 346 |
-
upload = ScreeningUpload(
|
| 347 |
-
user_id=user_id,
|
| 348 |
-
file_name=dcm_path.name,
|
| 349 |
-
original_filename=dcm_path.name,
|
| 350 |
-
file_size=dcm_path.stat().st_size if dcm_path.exists() else 0,
|
| 351 |
-
file_path=str(dcm_path.relative_to(BASE_DIR)) if dcm_path.is_relative_to(BASE_DIR) else str(dcm_path),
|
| 352 |
-
processing_status='completed'
|
| 353 |
-
)
|
| 354 |
-
db.session.add(upload)
|
| 355 |
-
db.session.flush()
|
| 356 |
-
|
| 357 |
# Save to database
|
|
|
|
|
|
|
| 358 |
screening_report = ScreeningReport(
|
| 359 |
user_id=user_id,
|
| 360 |
-
upload_id=
|
| 361 |
image_id=image_id,
|
| 362 |
screening_outcome=pred.get("screening_outcome"),
|
| 363 |
raw_probability=pred.get("raw_probability"),
|
|
@@ -366,9 +383,10 @@ def _run_inference_on_dcm(dcm_path: Path, user_id: int) -> tuple[dict[str, Any]
|
|
| 366 |
decision_threshold=pred.get("decision_threshold"),
|
| 367 |
triage_action=report.get("triage", {}).get("action"),
|
| 368 |
urgency=report.get("triage", {}).get("urgency"),
|
|
|
|
|
|
|
| 369 |
llm_summary=report.get("llm_summary"),
|
| 370 |
-
|
| 371 |
-
gradcam_image_path=report.get("cloudinary_heatmap_url") or str((user_reports_dir / f"{image_id}_gradcam.png").relative_to(BASE_DIR)),
|
| 372 |
generated_at=datetime.datetime.utcnow(),
|
| 373 |
)
|
| 374 |
db.session.add(screening_report)
|
|
@@ -378,6 +396,7 @@ def _run_inference_on_dcm(dcm_path: Path, user_id: int) -> tuple[dict[str, Any]
|
|
| 378 |
resource_id=screening_report.id, status="success")
|
| 379 |
|
| 380 |
except Exception as e:
|
|
|
|
| 381 |
bbr.stop()
|
| 382 |
logger.error(f"Inference failed: {e}", exc_info=True)
|
| 383 |
log_audit("inference_failed", user_id=user_id, status="failure", details=str(e))
|
|
@@ -396,87 +415,139 @@ def _run_inference_on_dcm(dcm_path: Path, user_id: int) -> tuple[dict[str, Any]
|
|
| 396 |
|
| 397 |
return report, {"timestamp": ts, "image_id": image_id}
|
| 398 |
|
| 399 |
-
def
|
| 400 |
-
"""
|
| 401 |
-
batch_id = uuid.uuid4().hex[:12]
|
| 402 |
-
|
| 403 |
-
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
|
| 407 |
-
"
|
| 408 |
-
|
| 409 |
-
|
| 410 |
-
|
| 411 |
-
|
| 412 |
-
|
| 413 |
-
|
| 414 |
-
|
| 415 |
-
|
| 416 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 417 |
return batch_id
|
| 418 |
|
| 419 |
-
def _batch_update(batch_id: str, **kw: Any) -> None:
|
| 420 |
-
"""Update batch job status"""
|
| 421 |
-
with _BATCHES_LOCK:
|
| 422 |
-
if batch_id in _BATCHES:
|
| 423 |
-
_BATCHES[batch_id].update(kw)
|
| 424 |
|
| 425 |
-
def
|
| 426 |
-
"""
|
| 427 |
-
|
| 428 |
-
|
| 429 |
-
|
| 430 |
-
|
| 431 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 432 |
image_id = path.stem
|
| 433 |
-
|
| 434 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 435 |
try:
|
| 436 |
-
report, _ = _run_inference_on_dcm(path, user_id)
|
| 437 |
if report:
|
|
|
|
|
|
|
| 438 |
succeeded_ids.append(image_id)
|
| 439 |
else:
|
|
|
|
|
|
|
| 440 |
failed_ids.append(image_id)
|
| 441 |
-
except Exception as
|
| 442 |
-
logger.error(f"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 443 |
failed_ids.append(image_id)
|
| 444 |
-
|
| 445 |
-
|
| 446 |
-
|
| 447 |
-
|
| 448 |
-
|
| 449 |
-
|
| 450 |
-
|
| 451 |
-
|
| 452 |
-
|
| 453 |
-
|
| 454 |
-
|
| 455 |
-
|
| 456 |
-
|
| 457 |
-
|
| 458 |
-
|
| 459 |
-
|
| 460 |
-
_batch_update(
|
| 461 |
-
batch_id,
|
| 462 |
-
status="completed",
|
| 463 |
-
current_file="",
|
| 464 |
-
finished_at=datetime.datetime.now().isoformat(),
|
| 465 |
-
)
|
| 466 |
-
logger.info(f"Batch {batch_id} complete: {len(succeeded_ids)}/{len(dcm_paths)}, {len(failed_ids)} failed")
|
| 467 |
-
|
| 468 |
-
def _start_batch(dcm_paths: list[Path], user_id: int, temp_dir: str | None = None) -> str:
|
| 469 |
-
"""Start async batch processing"""
|
| 470 |
-
batch_id = _new_batch(user_id, len(dcm_paths), temp_dir)
|
| 471 |
-
t = threading.Thread(
|
| 472 |
-
target=_run_batch_worker,
|
| 473 |
-
args=(batch_id, dcm_paths, user_id),
|
| 474 |
-
daemon=True,
|
| 475 |
-
name=f"batch-{batch_id}",
|
| 476 |
)
|
| 477 |
-
t.start()
|
| 478 |
-
return batch_id
|
| 479 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 480 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 481 |
# DATA MODEL & UTILITIES
|
| 482 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
@@ -493,7 +564,15 @@ class CaseRow:
|
|
| 493 |
urgency: str = "N/A"
|
| 494 |
generated_at: str = ""
|
| 495 |
report_file: str | None = None
|
| 496 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 497 |
|
| 498 |
@property
|
| 499 |
def date_display(self) -> str:
|
|
@@ -517,13 +596,6 @@ def _load_user_cases(user_id: int) -> list[CaseRow]:
|
|
| 517 |
|
| 518 |
cases = []
|
| 519 |
for r in reports:
|
| 520 |
-
# Fallback for old records with missing gradcam_image_path
|
| 521 |
-
g_url = r.gradcam_image_path
|
| 522 |
-
if not g_url:
|
| 523 |
-
fallback_path = UserDataManager(UPLOAD_BASE_DIR).get_user_reports_dir(current_user.id) / f"{r.image_id}_gradcam.png"
|
| 524 |
-
if fallback_path.exists():
|
| 525 |
-
g_url = url_for('serve_gradcam', filename=fallback_path.name)
|
| 526 |
-
|
| 527 |
cases.append(CaseRow(
|
| 528 |
image_id=r.image_id,
|
| 529 |
outcome=r.screening_outcome or "Unknown",
|
|
@@ -534,11 +606,50 @@ def _load_user_cases(user_id: int) -> list[CaseRow]:
|
|
| 534 |
urgency=r.urgency or "N/A",
|
| 535 |
generated_at=r.generated_at.isoformat() if r.generated_at else "",
|
| 536 |
report_file=Path(r.report_json_path).name if r.report_json_path else None,
|
| 537 |
-
|
| 538 |
))
|
| 539 |
|
| 540 |
return cases
|
| 541 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 542 |
def compute_stats(rows: list[CaseRow]) -> dict[str, Any]:
|
| 543 |
"""Compute statistics for dashboard"""
|
| 544 |
total = len(rows)
|
|
@@ -555,7 +666,53 @@ def compute_stats(rows: list[CaseRow]) -> dict[str, Any]:
|
|
| 555 |
"urgent": urgent,
|
| 556 |
"avg_cal_prob": avg_cal,
|
| 557 |
"pos_rate": pos_rate,
|
| 558 |
-
"heatmaps": sum(1 for r in rows if r.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 559 |
}
|
| 560 |
|
| 561 |
|
|
@@ -658,7 +815,7 @@ def analyze():
|
|
| 658 |
flash("No files were uploaded.", "error")
|
| 659 |
return redirect(url_for("upload"))
|
| 660 |
|
| 661 |
-
user_upload_dir = UserDataManager(
|
| 662 |
user_upload_dir.mkdir(parents=True, exist_ok=True)
|
| 663 |
|
| 664 |
dcm_paths: list[Path] = []
|
|
@@ -700,12 +857,28 @@ def analyze():
|
|
| 700 |
if len(dcm_paths) == 1 and temp_dir is None:
|
| 701 |
path = dcm_paths[0]
|
| 702 |
try:
|
| 703 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 704 |
if not report:
|
| 705 |
flash("Model failed to load. Check server logs.", "error")
|
| 706 |
return redirect(url_for("upload"))
|
|
|
|
|
|
|
|
|
|
| 707 |
return redirect(url_for("case_detail", image_id=path.stem))
|
| 708 |
except Exception as e:
|
|
|
|
| 709 |
logger.error(f"Analysis failed: {e}")
|
| 710 |
log_audit("analysis_failed", user_id=current_user.id, status="failure", details=str(e))
|
| 711 |
flash(f"Analysis failed: {e}", "error")
|
|
@@ -715,10 +888,23 @@ def analyze():
|
|
| 715 |
path.unlink()
|
| 716 |
|
| 717 |
# Multiple files - async batch
|
| 718 |
-
|
| 719 |
-
|
| 720 |
-
|
| 721 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 722 |
|
| 723 |
|
| 724 |
@app.route("/analyze/directory", methods=["POST"])
|
|
@@ -728,22 +914,14 @@ def analyze_directory():
|
|
| 728 |
if not LOCAL_MODE:
|
| 729 |
abort(403)
|
| 730 |
|
| 731 |
-
dir_path_str = request.form.get("dir_path", "").strip()
|
| 732 |
if not dir_path_str:
|
| 733 |
flash("Please enter a directory path.", "error")
|
| 734 |
return redirect(url_for("upload"))
|
| 735 |
|
| 736 |
-
|
| 737 |
-
|
| 738 |
-
|
| 739 |
-
|
| 740 |
-
scan_dir = Path(dir_path_str).expanduser().resolve()
|
| 741 |
-
try:
|
| 742 |
-
if not scan_dir.is_dir():
|
| 743 |
-
flash(f"Directory not found: {dir_path_str}", "error")
|
| 744 |
-
return redirect(url_for("upload"))
|
| 745 |
-
except OSError:
|
| 746 |
-
flash("Invalid directory path format.", "error")
|
| 747 |
return redirect(url_for("upload"))
|
| 748 |
|
| 749 |
dcm_paths = sorted(scan_dir.rglob("*.dcm"))
|
|
@@ -751,32 +929,125 @@ def analyze_directory():
|
|
| 751 |
flash(f"No .dcm files found in: {dir_path_str}", "error")
|
| 752 |
return redirect(url_for("upload"))
|
| 753 |
|
| 754 |
-
|
| 755 |
-
|
| 756 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 757 |
|
| 758 |
@app.route("/batch/<batch_id>")
|
| 759 |
@login_required
|
| 760 |
def batch_progress(batch_id):
|
| 761 |
"""Batch processing progress page"""
|
| 762 |
-
|
| 763 |
-
|
| 764 |
-
|
| 765 |
-
abort(404)
|
| 766 |
-
batch_copy = dict(batch)
|
| 767 |
|
| 768 |
-
return render_template("batch_progress.html", batch=
|
| 769 |
|
| 770 |
@app.route("/batch/<batch_id>/status")
|
| 771 |
@login_required
|
| 772 |
def batch_status(batch_id):
|
| 773 |
"""Get batch status (JSON API)"""
|
| 774 |
-
|
| 775 |
-
|
| 776 |
-
|
| 777 |
-
|
| 778 |
-
return jsonify(batch)
|
| 779 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 780 |
@app.route("/reports")
|
| 781 |
@login_required
|
| 782 |
def reports():
|
|
@@ -854,58 +1125,171 @@ def reports():
|
|
| 854 |
data_cache_hit=False,
|
| 855 |
)
|
| 856 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 857 |
@app.route("/case/<image_id>")
|
| 858 |
@login_required
|
| 859 |
def case_detail(image_id):
|
| 860 |
"""View screening report details"""
|
| 861 |
-
|
|
|
|
|
|
|
| 862 |
|
| 863 |
-
user_reports_dir = UserDataManager(UPLOAD_BASE_DIR).get_user_reports_dir(current_user.id)
|
| 864 |
report_data = None
|
| 865 |
-
if
|
| 866 |
-
|
| 867 |
-
|
| 868 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 869 |
report_data = json.load(f)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 870 |
|
| 871 |
-
|
| 872 |
-
if report_record:
|
| 873 |
-
g_url = report_record.gradcam_image_path
|
| 874 |
-
if not g_url:
|
| 875 |
-
fallback_path = user_reports_dir / f"{image_id}_gradcam.png"
|
| 876 |
-
if fallback_path.exists():
|
| 877 |
-
g_url = url_for('serve_gradcam', filename=fallback_path.name)
|
| 878 |
|
| 879 |
-
|
| 880 |
-
|
| 881 |
-
|
| 882 |
-
|
| 883 |
-
|
| 884 |
-
|
| 885 |
-
|
| 886 |
-
|
| 887 |
-
|
| 888 |
-
|
| 889 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 890 |
else:
|
| 891 |
-
|
| 892 |
-
row = CaseRow(
|
| 893 |
-
image_id=image_id, outcome="Unknown", raw_prob=None, cal_prob=None,
|
| 894 |
-
band="N/A", triage="N/A", urgency="N/A", generated_at=""
|
| 895 |
-
)
|
| 896 |
|
| 897 |
-
|
| 898 |
-
|
| 899 |
-
|
| 900 |
-
|
| 901 |
-
|
| 902 |
-
|
| 903 |
-
|
|
|
|
|
|
|
|
|
|
| 904 |
|
| 905 |
@app.route("/logs")
|
| 906 |
@login_required
|
| 907 |
def logs_page():
|
| 908 |
"""View user's inference logs"""
|
|
|
|
|
|
|
| 909 |
log_files = []
|
| 910 |
|
| 911 |
if LOGS_DIR.exists():
|
|
@@ -928,6 +1312,7 @@ def about():
|
|
| 928 |
def evaluation():
|
| 929 |
"""Model evaluation page"""
|
| 930 |
cases = _load_user_cases(current_user.id) if current_user.is_authenticated else []
|
|
|
|
| 931 |
cal_probs = [r.cal_prob for r in cases if r.cal_prob is not None]
|
| 932 |
|
| 933 |
bins = [0] * 10
|
|
@@ -952,6 +1337,7 @@ def evaluation():
|
|
| 952 |
bins=bins,
|
| 953 |
band_data=band_data,
|
| 954 |
total=len(cases),
|
|
|
|
| 955 |
)
|
| 956 |
|
| 957 |
|
|
@@ -960,69 +1346,25 @@ def evaluation():
|
|
| 960 |
def serve_gradcam(filename: str):
|
| 961 |
"""Serve a user's Grad-CAM image from their report directory."""
|
| 962 |
safe_name = Path(filename).name
|
| 963 |
-
reports_dir = UserDataManager(
|
| 964 |
return send_from_directory(reports_dir, safe_name)
|
| 965 |
|
| 966 |
-
@app.route("/report/<path:filename>")
|
| 967 |
@login_required
|
| 968 |
def serve_report_json(filename: str):
|
| 969 |
-
"""Serve a user's JSON report."""
|
| 970 |
safe_name = Path(filename).name
|
| 971 |
-
reports_dir = UserDataManager(
|
| 972 |
-
|
|
|
|
|
|
|
| 973 |
|
| 974 |
-
|
| 975 |
-
@login_required
|
| 976 |
-
def delete_report(image_id: str):
|
| 977 |
-
"""Delete a single screening report and its local files."""
|
| 978 |
report = ScreeningReport.query.filter_by(user_id=current_user.id, image_id=image_id).first()
|
| 979 |
-
if
|
| 980 |
-
|
| 981 |
-
return redirect(url_for("reports"))
|
| 982 |
-
|
| 983 |
-
# Delete local files
|
| 984 |
-
user_reports_dir = UserDataManager(UPLOAD_BASE_DIR).get_user_reports_dir(current_user.id)
|
| 985 |
-
for suffix in ("_report.json", "_gradcam.png", "_preview.png"):
|
| 986 |
-
fp = user_reports_dir / f"{image_id}{suffix}"
|
| 987 |
-
fp.unlink(missing_ok=True)
|
| 988 |
-
|
| 989 |
-
# Delete associated upload record
|
| 990 |
-
if report.upload_id:
|
| 991 |
-
upload = db.session.get(ScreeningUpload, report.upload_id)
|
| 992 |
-
if upload:
|
| 993 |
-
db.session.delete(upload)
|
| 994 |
-
else:
|
| 995 |
-
db.session.delete(report)
|
| 996 |
-
else:
|
| 997 |
-
db.session.delete(report)
|
| 998 |
|
| 999 |
-
|
| 1000 |
-
log_audit("report_deleted", user_id=current_user.id, resource_type="report", resource_id=image_id)
|
| 1001 |
-
flash(f"Report {image_id} deleted.", "success")
|
| 1002 |
-
return redirect(url_for("reports"))
|
| 1003 |
-
|
| 1004 |
-
|
| 1005 |
-
@app.route("/reports/delete-all", methods=["POST"])
|
| 1006 |
-
@login_required
|
| 1007 |
-
def delete_all_reports():
|
| 1008 |
-
"""Delete ALL reports for the current user quickly."""
|
| 1009 |
-
import shutil
|
| 1010 |
-
|
| 1011 |
-
# 1. Delete physical files securely by dropping the entire user reports folder
|
| 1012 |
-
user_reports_dir = UserDataManager(UPLOAD_BASE_DIR).get_user_reports_dir(current_user.id)
|
| 1013 |
-
shutil.rmtree(user_reports_dir, ignore_errors=True)
|
| 1014 |
-
user_reports_dir.mkdir(parents=True, exist_ok=True)
|
| 1015 |
-
|
| 1016 |
-
# 2. Bulk delete database records
|
| 1017 |
-
# Because of cascade rules, deleting uploads will automatically delete reports too.
|
| 1018 |
-
# But to be completely safe, we can delete reports directly as well.
|
| 1019 |
-
report_count = db.session.query(ScreeningReport).filter_by(user_id=current_user.id).delete()
|
| 1020 |
-
upload_count = db.session.query(ScreeningUpload).filter_by(user_id=current_user.id).delete()
|
| 1021 |
-
|
| 1022 |
-
db.session.commit()
|
| 1023 |
-
log_audit("all_reports_deleted", user_id=current_user.id, resource_type="report", resource_id="all")
|
| 1024 |
-
flash(f"All {report_count} reports and their files have been deleted.", "success")
|
| 1025 |
-
return redirect(url_for("reports"))
|
| 1026 |
|
| 1027 |
@app.errorhandler(401)
|
| 1028 |
def unauthorized(e):
|
|
@@ -1063,8 +1405,6 @@ def init_db_cmd():
|
|
| 1063 |
@app.cli.command()
|
| 1064 |
def create_admin():
|
| 1065 |
"""Create admin user (interactive)"""
|
| 1066 |
-
from getpass import getpass
|
| 1067 |
-
|
| 1068 |
username = input("Username: ").strip()
|
| 1069 |
email = input("Email: ").strip()
|
| 1070 |
password = getpass("Password: ")
|
|
|
|
| 25 |
import shutil
|
| 26 |
import sys
|
| 27 |
import tempfile
|
|
|
|
| 28 |
import time
|
| 29 |
import uuid
|
| 30 |
import zipfile
|
| 31 |
import math
|
| 32 |
from dataclasses import dataclass
|
| 33 |
+
from getpass import getpass
|
| 34 |
from pathlib import Path
|
| 35 |
from typing import Any
|
| 36 |
|
|
|
|
| 66 |
bbr = _NoopRecorder()
|
| 67 |
|
| 68 |
from flask import (
|
| 69 |
+
Flask, Response, abort, flash, g, jsonify, redirect, render_template, request,
|
| 70 |
send_from_directory, url_for
|
| 71 |
)
|
| 72 |
+
from types import SimpleNamespace
|
| 73 |
+
from celery.result import AsyncResult
|
| 74 |
+
from tasks import REDIS_URL, celery_app
|
| 75 |
from werkzeug.utils import secure_filename
|
| 76 |
from werkzeug.middleware.proxy_fix import ProxyFix
|
| 77 |
from flask_login import current_user, login_required
|
|
|
|
| 124 |
HF_MODEL_REPO = os.environ.get("ICH_HF_MODEL_REPO", "").strip()
|
| 125 |
HF_TOKEN = os.environ.get("ICH_HF_TOKEN", "").strip()
|
| 126 |
LOCAL_MODE = _env_bool("ICH_LOCAL_MODE", True)
|
| 127 |
+
SHOW_LOGS = _env_bool("ICH_SHOW_LOGS", False)
|
| 128 |
|
| 129 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 130 |
# FLASK APP SETUP
|
|
|
|
| 139 |
SECRET_KEY=SECRET_KEY or os.urandom(32).hex(),
|
| 140 |
DEBUG=APP_DEBUG and os.environ.get("FLASK_ENV") == "development",
|
| 141 |
SQLALCHEMY_DATABASE_URI=DATABASE_URL or "sqlite:///ich_app.db",
|
| 142 |
+
SQLALCHEMY_ENGINE_OPTIONS={
|
| 143 |
+
"pool_pre_ping": True,
|
| 144 |
+
"pool_recycle": 280,
|
| 145 |
+
},
|
| 146 |
SQLALCHEMY_TRACK_MODIFICATIONS=False,
|
| 147 |
SESSION_COOKIE_SECURE=not APP_DEBUG,
|
| 148 |
SESSION_COOKIE_HTTPONLY=True,
|
|
|
|
| 158 |
# Register blueprints
|
| 159 |
app.register_blueprint(auth_bp)
|
| 160 |
|
| 161 |
+
@app.context_processor
|
| 162 |
+
def inject_feature_flags():
|
| 163 |
+
log_count = 0
|
| 164 |
+
if SHOW_LOGS and LOGS_DIR.exists():
|
| 165 |
+
try:
|
| 166 |
+
log_count = sum(1 for path in LOGS_DIR.iterdir() if path.suffix == ".json")
|
| 167 |
+
except OSError:
|
| 168 |
+
log_count = 0
|
| 169 |
+
return {"show_logs": SHOW_LOGS, "log_count": log_count}
|
| 170 |
+
|
| 171 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 172 |
# LOGGING
|
| 173 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
| 212 |
"inference_mod": None,
|
| 213 |
}
|
| 214 |
|
|
|
|
|
|
|
|
|
|
| 215 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 216 |
# MODEL LOADING
|
| 217 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
| 320 |
# INFERENCE & BATCH PROCESSING
|
| 321 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 322 |
|
| 323 |
+
def _run_inference_on_dcm(
|
| 324 |
+
dcm_path: Path,
|
| 325 |
+
user_id: int,
|
| 326 |
+
upload_id: int,
|
| 327 |
+
) -> tuple[dict[str, Any] | None, dict[str, Any] | None]:
|
| 328 |
"""Run inference on a single DICOM file"""
|
| 329 |
if not _ensure_model_loaded():
|
| 330 |
return None, None
|
| 331 |
|
| 332 |
ri_mod = _MODEL["inference_mod"]
|
| 333 |
image_id = dcm_path.stem
|
| 334 |
+
user_reports_dir = UserDataManager().get_user_reports_dir(user_id)
|
| 335 |
|
| 336 |
bbr.start()
|
| 337 |
|
|
|
|
| 357 |
pred.setdefault("calibrated_probability", inference.get("cal_prob_any"))
|
| 358 |
pred.setdefault("decision_threshold", pred.get("decision_threshold_any"))
|
| 359 |
report["prediction"] = pred
|
| 360 |
+
|
| 361 |
+
explainability = report.get("explainability", {}) if isinstance(report, dict) else {}
|
| 362 |
+
gradcam_reference = (
|
| 363 |
+
report.get("cloudinary_heatmap_url")
|
| 364 |
+
or explainability.get("heatmap_path")
|
| 365 |
+
or explainability.get("image_path")
|
| 366 |
+
)
|
| 367 |
|
| 368 |
report_path = user_reports_dir / f"{image_id}_report.json"
|
| 369 |
with open(report_path, "w") as f:
|
| 370 |
json.dump(report, f, indent=2)
|
| 371 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
# Save to database
|
| 373 |
+
user_data_dir = UserDataManager().get_user_data_dir(user_id)
|
| 374 |
+
|
| 375 |
screening_report = ScreeningReport(
|
| 376 |
user_id=user_id,
|
| 377 |
+
upload_id=upload_id,
|
| 378 |
image_id=image_id,
|
| 379 |
screening_outcome=pred.get("screening_outcome"),
|
| 380 |
raw_probability=pred.get("raw_probability"),
|
|
|
|
| 383 |
decision_threshold=pred.get("decision_threshold"),
|
| 384 |
triage_action=report.get("triage", {}).get("action"),
|
| 385 |
urgency=report.get("triage", {}).get("urgency"),
|
| 386 |
+
report_json_path=str(report_path.relative_to(user_data_dir)),
|
| 387 |
+
gradcam_image_path=gradcam_reference,
|
| 388 |
llm_summary=report.get("llm_summary"),
|
| 389 |
+
report_payload=json.dumps(report, ensure_ascii=True),
|
|
|
|
| 390 |
generated_at=datetime.datetime.utcnow(),
|
| 391 |
)
|
| 392 |
db.session.add(screening_report)
|
|
|
|
| 396 |
resource_id=screening_report.id, status="success")
|
| 397 |
|
| 398 |
except Exception as e:
|
| 399 |
+
db.session.rollback()
|
| 400 |
bbr.stop()
|
| 401 |
logger.error(f"Inference failed: {e}", exc_info=True)
|
| 402 |
log_audit("inference_failed", user_id=user_id, status="failure", details=str(e))
|
|
|
|
| 415 |
|
| 416 |
return report, {"timestamp": ts, "image_id": image_id}
|
| 417 |
|
| 418 |
+
def _start_batch(dcm_paths: list[Path], user_id: int, temp_dir: str | None = None) -> str:
|
| 419 |
+
"""Trigger async batch processing via Celery."""
|
| 420 |
+
batch_id = f"u{user_id}_{uuid.uuid4().hex[:12]}"
|
| 421 |
+
dcm_paths_str = [str(p) for p in dcm_paths]
|
| 422 |
+
|
| 423 |
+
# Send task to Celery worker
|
| 424 |
+
try:
|
| 425 |
+
task = celery_app.send_task(
|
| 426 |
+
"tasks.process_dicom_batch",
|
| 427 |
+
kwargs={
|
| 428 |
+
"batch_id": batch_id,
|
| 429 |
+
"dcm_paths": dcm_paths_str,
|
| 430 |
+
"user_id": user_id,
|
| 431 |
+
"temp_dir": temp_dir,
|
| 432 |
+
},
|
| 433 |
+
task_id=batch_id,
|
| 434 |
+
)
|
| 435 |
+
except Exception as exc:
|
| 436 |
+
logger.error("Failed to enqueue Celery batch task", exc_info=True)
|
| 437 |
+
raise RuntimeError("Celery enqueue failed") from exc
|
| 438 |
+
|
| 439 |
+
logger.info(f"Started Celery batch task {batch_id} (task_id={task.id})")
|
| 440 |
return batch_id
|
| 441 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 442 |
|
| 443 |
+
def _run_batch_sync(dcm_paths: list[Path], user_id: int, temp_dir: str | None = None) -> dict[str, Any]:
|
| 444 |
+
"""Fallback synchronous batch processing when Celery is unavailable."""
|
| 445 |
+
total = len(dcm_paths)
|
| 446 |
+
succeeded_ids: list[str] = []
|
| 447 |
+
failed_ids: list[str] = []
|
| 448 |
+
started_at = datetime.datetime.now().isoformat()
|
| 449 |
+
sync_batch_id = f"sync_u{user_id}_{uuid.uuid4().hex[:12]}"
|
| 450 |
+
|
| 451 |
+
log_audit(
|
| 452 |
+
"batch_sync_started",
|
| 453 |
+
user_id=user_id,
|
| 454 |
+
details=f"batch_id={sync_batch_id}, files={total}",
|
| 455 |
+
status="success",
|
| 456 |
+
)
|
| 457 |
+
|
| 458 |
+
user_upload_dir = UserDataManager().get_user_upload_dir(user_id)
|
| 459 |
+
|
| 460 |
+
try:
|
| 461 |
+
for path in dcm_paths:
|
| 462 |
image_id = path.stem
|
| 463 |
+
|
| 464 |
+
upload_record = ScreeningUpload(
|
| 465 |
+
user_id=user_id,
|
| 466 |
+
file_name=path.name,
|
| 467 |
+
original_filename=path.name,
|
| 468 |
+
file_size=path.stat().st_size if path.exists() else None,
|
| 469 |
+
file_path=str(path.relative_to(user_upload_dir)) if path.parent == user_upload_dir else str(path),
|
| 470 |
+
processing_status="processing",
|
| 471 |
+
)
|
| 472 |
+
db.session.add(upload_record)
|
| 473 |
+
db.session.commit()
|
| 474 |
+
|
| 475 |
try:
|
| 476 |
+
report, _ = _run_inference_on_dcm(path, user_id, upload_record.id)
|
| 477 |
if report:
|
| 478 |
+
upload_record.processing_status = "completed"
|
| 479 |
+
db.session.commit()
|
| 480 |
succeeded_ids.append(image_id)
|
| 481 |
else:
|
| 482 |
+
upload_record.processing_status = "failed"
|
| 483 |
+
db.session.commit()
|
| 484 |
failed_ids.append(image_id)
|
| 485 |
+
except Exception as exc:
|
| 486 |
+
logger.error(f"Sync batch failed {image_id} β {exc}", exc_info=True)
|
| 487 |
+
db.session.rollback()
|
| 488 |
+
upload_record.processing_status = "failed"
|
| 489 |
+
try:
|
| 490 |
+
db.session.commit()
|
| 491 |
+
except Exception:
|
| 492 |
+
db.session.rollback()
|
| 493 |
failed_ids.append(image_id)
|
| 494 |
+
finally:
|
| 495 |
+
if temp_dir and Path(temp_dir).exists():
|
| 496 |
+
try:
|
| 497 |
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 498 |
+
logger.info(f"Cleaned up temp_dir: {temp_dir}")
|
| 499 |
+
except Exception as exc:
|
| 500 |
+
logger.warning(f"Failed to clean temp_dir {temp_dir}: {exc}")
|
| 501 |
+
|
| 502 |
+
log_audit(
|
| 503 |
+
"batch_sync_completed",
|
| 504 |
+
user_id=user_id,
|
| 505 |
+
details=(
|
| 506 |
+
f"batch_id={sync_batch_id}, processed={total}, "
|
| 507 |
+
f"succeeded={len(succeeded_ids)}, failed={len(failed_ids)}"
|
| 508 |
+
),
|
| 509 |
+
status="success" if not failed_ids else "partial",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 510 |
)
|
|
|
|
|
|
|
| 511 |
|
| 512 |
+
return {
|
| 513 |
+
"batch_id": sync_batch_id,
|
| 514 |
+
"user_id": user_id,
|
| 515 |
+
"status": "completed",
|
| 516 |
+
"total": total,
|
| 517 |
+
"processed": total,
|
| 518 |
+
"succeeded": len(succeeded_ids),
|
| 519 |
+
"failed_ids": list(failed_ids),
|
| 520 |
+
"image_ids": list(succeeded_ids),
|
| 521 |
+
"current_file": "",
|
| 522 |
+
"started_at": started_at,
|
| 523 |
+
"finished_at": datetime.datetime.now().isoformat(),
|
| 524 |
+
"error": None,
|
| 525 |
+
"temp_dir": temp_dir,
|
| 526 |
+
}
|
| 527 |
+
|
| 528 |
+
|
| 529 |
+
def _extract_user_id_from_batch_id(batch_id: str) -> int | None:
|
| 530 |
+
"""Recover the user id embedded in a batch id."""
|
| 531 |
+
if not batch_id.startswith("u"):
|
| 532 |
+
return None
|
| 533 |
+
user_part = batch_id.split("_", 1)[0][1:]
|
| 534 |
+
try:
|
| 535 |
+
return int(user_part)
|
| 536 |
+
except ValueError:
|
| 537 |
+
return None
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
def _get_queue_depth() -> int | None:
|
| 541 |
+
"""Best-effort queue depth for the default Celery queue."""
|
| 542 |
+
if not REDIS_URL.startswith("redis"):
|
| 543 |
+
return None
|
| 544 |
+
|
| 545 |
+
try:
|
| 546 |
+
from redis import Redis
|
| 547 |
+
client = Redis.from_url(REDIS_URL, decode_responses=True)
|
| 548 |
+
return int(client.llen("celery"))
|
| 549 |
+
except Exception:
|
| 550 |
+
return None
|
| 551 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 552 |
# DATA MODEL & UTILITIES
|
| 553 |
# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
|
|
|
| 564 |
urgency: str = "N/A"
|
| 565 |
generated_at: str = ""
|
| 566 |
report_file: str | None = None
|
| 567 |
+
gradcam_file: str | None = None
|
| 568 |
+
|
| 569 |
+
@property
|
| 570 |
+
def gradcam_url(self) -> str | None:
|
| 571 |
+
if not self.gradcam_file:
|
| 572 |
+
return None
|
| 573 |
+
if self.gradcam_file.startswith("http"):
|
| 574 |
+
return self.gradcam_file
|
| 575 |
+
return self.gradcam_file
|
| 576 |
|
| 577 |
@property
|
| 578 |
def date_display(self) -> str:
|
|
|
|
| 596 |
|
| 597 |
cases = []
|
| 598 |
for r in reports:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 599 |
cases.append(CaseRow(
|
| 600 |
image_id=r.image_id,
|
| 601 |
outcome=r.screening_outcome or "Unknown",
|
|
|
|
| 606 |
urgency=r.urgency or "N/A",
|
| 607 |
generated_at=r.generated_at.isoformat() if r.generated_at else "",
|
| 608 |
report_file=Path(r.report_json_path).name if r.report_json_path else None,
|
| 609 |
+
gradcam_file=_resolve_gradcam_reference(r),
|
| 610 |
))
|
| 611 |
|
| 612 |
return cases
|
| 613 |
|
| 614 |
+
|
| 615 |
+
def _resolve_gradcam_reference(report: ScreeningReport) -> str | None:
|
| 616 |
+
"""Resolve the best available Grad-CAM reference for a report."""
|
| 617 |
+
if report.gradcam_image_path:
|
| 618 |
+
return str(report.gradcam_image_path)
|
| 619 |
+
|
| 620 |
+
if report.report_payload:
|
| 621 |
+
try:
|
| 622 |
+
payload = json.loads(report.report_payload)
|
| 623 |
+
explainability = payload.get("explainability", {}) if isinstance(payload, dict) else {}
|
| 624 |
+
return (
|
| 625 |
+
payload.get("cloudinary_heatmap_url")
|
| 626 |
+
or explainability.get("heatmap_path")
|
| 627 |
+
or explainability.get("image_path")
|
| 628 |
+
)
|
| 629 |
+
except json.JSONDecodeError:
|
| 630 |
+
pass
|
| 631 |
+
|
| 632 |
+
if not report.report_json_path:
|
| 633 |
+
return None
|
| 634 |
+
|
| 635 |
+
try:
|
| 636 |
+
user_data_dir = UserDataManager().get_user_data_dir(report.user_id)
|
| 637 |
+
report_path = user_data_dir / report.report_json_path
|
| 638 |
+
if not report_path.exists():
|
| 639 |
+
return None
|
| 640 |
+
|
| 641 |
+
with open(report_path, "r", encoding="utf-8") as f:
|
| 642 |
+
payload = json.load(f)
|
| 643 |
+
|
| 644 |
+
explainability = payload.get("explainability", {}) if isinstance(payload, dict) else {}
|
| 645 |
+
return (
|
| 646 |
+
payload.get("cloudinary_heatmap_url")
|
| 647 |
+
or explainability.get("heatmap_path")
|
| 648 |
+
or explainability.get("image_path")
|
| 649 |
+
)
|
| 650 |
+
except (OSError, json.JSONDecodeError, TypeError, AttributeError):
|
| 651 |
+
return None
|
| 652 |
+
|
| 653 |
def compute_stats(rows: list[CaseRow]) -> dict[str, Any]:
|
| 654 |
"""Compute statistics for dashboard"""
|
| 655 |
total = len(rows)
|
|
|
|
| 666 |
"urgent": urgent,
|
| 667 |
"avg_cal_prob": avg_cal,
|
| 668 |
"pos_rate": pos_rate,
|
| 669 |
+
"heatmaps": sum(1 for r in rows if r.gradcam_file),
|
| 670 |
+
}
|
| 671 |
+
|
| 672 |
+
|
| 673 |
+
def _compute_ground_truth_stats(user_id: int) -> dict[str, Any]:
|
| 674 |
+
"""Compute ground-truth agreement stats for a user."""
|
| 675 |
+
reports = ScreeningReport.query.filter_by(user_id=user_id).all()
|
| 676 |
+
labeled = [r for r in reports if (r.true_label or "").upper() in ("POSITIVE", "NEGATIVE")]
|
| 677 |
+
total = len(labeled)
|
| 678 |
+
if total == 0:
|
| 679 |
+
return {
|
| 680 |
+
"total": 0,
|
| 681 |
+
"tp": 0,
|
| 682 |
+
"tn": 0,
|
| 683 |
+
"fp": 0,
|
| 684 |
+
"fn": 0,
|
| 685 |
+
"accuracy": None,
|
| 686 |
+
"fp_rate": None,
|
| 687 |
+
}
|
| 688 |
+
|
| 689 |
+
def _ai_positive(report: ScreeningReport) -> bool:
|
| 690 |
+
return "no hemorrhage" not in (report.screening_outcome or "").lower()
|
| 691 |
+
|
| 692 |
+
tp = tn = fp = fn = 0
|
| 693 |
+
for r in labeled:
|
| 694 |
+
ai_pos = _ai_positive(r)
|
| 695 |
+
truth_pos = (r.true_label or "").upper() == "POSITIVE"
|
| 696 |
+
if ai_pos and truth_pos:
|
| 697 |
+
tp += 1
|
| 698 |
+
elif ai_pos and not truth_pos:
|
| 699 |
+
fp += 1
|
| 700 |
+
elif not ai_pos and truth_pos:
|
| 701 |
+
fn += 1
|
| 702 |
+
else:
|
| 703 |
+
tn += 1
|
| 704 |
+
|
| 705 |
+
accuracy = (tp + tn) / total if total else None
|
| 706 |
+
fp_rate = fp / (fp + tn) if (fp + tn) else None
|
| 707 |
+
|
| 708 |
+
return {
|
| 709 |
+
"total": total,
|
| 710 |
+
"tp": tp,
|
| 711 |
+
"tn": tn,
|
| 712 |
+
"fp": fp,
|
| 713 |
+
"fn": fn,
|
| 714 |
+
"accuracy": accuracy,
|
| 715 |
+
"fp_rate": fp_rate,
|
| 716 |
}
|
| 717 |
|
| 718 |
|
|
|
|
| 815 |
flash("No files were uploaded.", "error")
|
| 816 |
return redirect(url_for("upload"))
|
| 817 |
|
| 818 |
+
user_upload_dir = UserDataManager().get_user_upload_dir(current_user.id)
|
| 819 |
user_upload_dir.mkdir(parents=True, exist_ok=True)
|
| 820 |
|
| 821 |
dcm_paths: list[Path] = []
|
|
|
|
| 857 |
if len(dcm_paths) == 1 and temp_dir is None:
|
| 858 |
path = dcm_paths[0]
|
| 859 |
try:
|
| 860 |
+
user_upload_dir = UserDataManager().get_user_upload_dir(current_user.id)
|
| 861 |
+
upload_record = ScreeningUpload(
|
| 862 |
+
user_id=current_user.id,
|
| 863 |
+
file_name=path.name,
|
| 864 |
+
original_filename=path.name,
|
| 865 |
+
file_size=path.stat().st_size if path.exists() else None,
|
| 866 |
+
file_path=str(path.relative_to(user_upload_dir)) if path.parent == user_upload_dir else str(path),
|
| 867 |
+
processing_status="processing",
|
| 868 |
+
)
|
| 869 |
+
db.session.add(upload_record)
|
| 870 |
+
db.session.commit()
|
| 871 |
+
|
| 872 |
+
report, _ = _run_inference_on_dcm(path, current_user.id, upload_record.id)
|
| 873 |
if not report:
|
| 874 |
flash("Model failed to load. Check server logs.", "error")
|
| 875 |
return redirect(url_for("upload"))
|
| 876 |
+
|
| 877 |
+
upload_record.processing_status = "completed"
|
| 878 |
+
db.session.commit()
|
| 879 |
return redirect(url_for("case_detail", image_id=path.stem))
|
| 880 |
except Exception as e:
|
| 881 |
+
db.session.rollback()
|
| 882 |
logger.error(f"Analysis failed: {e}")
|
| 883 |
log_audit("analysis_failed", user_id=current_user.id, status="failure", details=str(e))
|
| 884 |
flash(f"Analysis failed: {e}", "error")
|
|
|
|
| 888 |
path.unlink()
|
| 889 |
|
| 890 |
# Multiple files - async batch
|
| 891 |
+
try:
|
| 892 |
+
batch_id = _start_batch(dcm_paths, current_user.id, temp_dir)
|
| 893 |
+
log_audit(
|
| 894 |
+
"batch_started",
|
| 895 |
+
user_id=current_user.id,
|
| 896 |
+
details=f"batch_id={batch_id}, files={len(dcm_paths)}",
|
| 897 |
+
)
|
| 898 |
+
return redirect(url_for("batch_progress", batch_id=batch_id))
|
| 899 |
+
except Exception:
|
| 900 |
+
logger.error("Celery unavailable; running synchronous fallback", exc_info=True)
|
| 901 |
+
flash("Celery worker unavailable. Running batch synchronously; this may take a while.", "warning")
|
| 902 |
+
result = _run_batch_sync(dcm_paths, current_user.id, temp_dir)
|
| 903 |
+
flash(
|
| 904 |
+
f"Batch complete: {result['succeeded']}/{result['total']} succeeded.",
|
| 905 |
+
"info",
|
| 906 |
+
)
|
| 907 |
+
return redirect(url_for("reports"))
|
| 908 |
|
| 909 |
|
| 910 |
@app.route("/analyze/directory", methods=["POST"])
|
|
|
|
| 914 |
if not LOCAL_MODE:
|
| 915 |
abort(403)
|
| 916 |
|
| 917 |
+
dir_path_str = request.form.get("dir_path", "").strip()
|
| 918 |
if not dir_path_str:
|
| 919 |
flash("Please enter a directory path.", "error")
|
| 920 |
return redirect(url_for("upload"))
|
| 921 |
|
| 922 |
+
scan_dir = Path(dir_path_str)
|
| 923 |
+
if not scan_dir.is_dir():
|
| 924 |
+
flash(f"Directory not found: {dir_path_str}", "error")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 925 |
return redirect(url_for("upload"))
|
| 926 |
|
| 927 |
dcm_paths = sorted(scan_dir.rglob("*.dcm"))
|
|
|
|
| 929 |
flash(f"No .dcm files found in: {dir_path_str}", "error")
|
| 930 |
return redirect(url_for("upload"))
|
| 931 |
|
| 932 |
+
try:
|
| 933 |
+
batch_id = _start_batch(dcm_paths, current_user.id)
|
| 934 |
+
log_audit(
|
| 935 |
+
"directory_batch_started",
|
| 936 |
+
user_id=current_user.id,
|
| 937 |
+
details=f"batch_id={batch_id}, files={len(dcm_paths)}",
|
| 938 |
+
)
|
| 939 |
+
return redirect(url_for("batch_progress", batch_id=batch_id))
|
| 940 |
+
except Exception:
|
| 941 |
+
logger.error("Celery unavailable; running synchronous directory scan", exc_info=True)
|
| 942 |
+
flash("Celery worker unavailable. Running directory scan synchronously.", "warning")
|
| 943 |
+
result = _run_batch_sync(dcm_paths, current_user.id)
|
| 944 |
+
flash(
|
| 945 |
+
f"Directory scan complete: {result['succeeded']}/{result['total']} succeeded.",
|
| 946 |
+
"info",
|
| 947 |
+
)
|
| 948 |
+
return redirect(url_for("reports"))
|
| 949 |
|
| 950 |
@app.route("/batch/<batch_id>")
|
| 951 |
@login_required
|
| 952 |
def batch_progress(batch_id):
|
| 953 |
"""Batch processing progress page"""
|
| 954 |
+
batch = _get_batch_from_celery(batch_id)
|
| 955 |
+
if not batch or batch.get("user_id") != current_user.id:
|
| 956 |
+
abort(404)
|
|
|
|
|
|
|
| 957 |
|
| 958 |
+
return render_template("batch_progress.html", batch=batch, batch_id=batch_id)
|
| 959 |
|
| 960 |
@app.route("/batch/<batch_id>/status")
|
| 961 |
@login_required
|
| 962 |
def batch_status(batch_id):
|
| 963 |
"""Get batch status (JSON API)"""
|
| 964 |
+
batch = _get_batch_from_celery(batch_id)
|
| 965 |
+
if not batch or batch.get("user_id") != current_user.id:
|
| 966 |
+
return jsonify({"error": "Not found"}), 404
|
| 967 |
+
return jsonify(batch)
|
|
|
|
| 968 |
|
| 969 |
+
def _get_batch_from_celery(batch_id: str) -> dict[str, Any] | None:
|
| 970 |
+
"""Retrieve batch status from Celery task result backend."""
|
| 971 |
+
# In a production system, we'd also validate user_id from the database
|
| 972 |
+
# For now, we rely on Celery returning task metadata with user_id in meta dict
|
| 973 |
+
queue_size = _get_queue_depth()
|
| 974 |
+
|
| 975 |
+
# Try to find the task associated with this batch_id
|
| 976 |
+
# Celery doesn't provide a direct "get by batch_id" so we query the backend
|
| 977 |
+
result = AsyncResult(batch_id, app=celery_app)
|
| 978 |
+
user_id = _extract_user_id_from_batch_id(batch_id)
|
| 979 |
+
|
| 980 |
+
if result.state == "PENDING" and not result.info:
|
| 981 |
+
# Task has been queued but has not written progress yet.
|
| 982 |
+
return {
|
| 983 |
+
"batch_id": batch_id,
|
| 984 |
+
"user_id": user_id,
|
| 985 |
+
"status": "pending",
|
| 986 |
+
"total": 0,
|
| 987 |
+
"processed": 0,
|
| 988 |
+
"succeeded": 0,
|
| 989 |
+
"failed_ids": [],
|
| 990 |
+
"image_ids": [],
|
| 991 |
+
"current_file": "",
|
| 992 |
+
"started_at": None,
|
| 993 |
+
"finished_at": None,
|
| 994 |
+
"error": None,
|
| 995 |
+
"queue_size": queue_size,
|
| 996 |
+
}
|
| 997 |
+
|
| 998 |
+
# Build response matching _BATCHES format for frontend compatibility
|
| 999 |
+
if result.state == "PROGRESS":
|
| 1000 |
+
meta = result.info or {}
|
| 1001 |
+
return {
|
| 1002 |
+
"batch_id": meta.get("batch_id", batch_id),
|
| 1003 |
+
"user_id": meta.get("user_id", user_id),
|
| 1004 |
+
"status": meta.get("status", "running"),
|
| 1005 |
+
"total": meta.get("total", 0),
|
| 1006 |
+
"processed": meta.get("processed", 0),
|
| 1007 |
+
"succeeded": meta.get("succeeded", 0),
|
| 1008 |
+
"failed_ids": meta.get("failed_ids", []),
|
| 1009 |
+
"image_ids": meta.get("image_ids", []),
|
| 1010 |
+
"current_file": meta.get("current_file", ""),
|
| 1011 |
+
"started_at": meta.get("started_at"),
|
| 1012 |
+
"finished_at": meta.get("finished_at"),
|
| 1013 |
+
"error": meta.get("error"),
|
| 1014 |
+
"queue_size": meta.get("queue_size", queue_size),
|
| 1015 |
+
}
|
| 1016 |
+
elif result.state == "SUCCESS":
|
| 1017 |
+
# Task completed
|
| 1018 |
+
return result.result if isinstance(result.result, dict) else {
|
| 1019 |
+
"batch_id": batch_id,
|
| 1020 |
+
"user_id": user_id,
|
| 1021 |
+
"status": "completed",
|
| 1022 |
+
"error": None,
|
| 1023 |
+
"queue_size": queue_size,
|
| 1024 |
+
}
|
| 1025 |
+
elif result.state == "FAILURE":
|
| 1026 |
+
# Task failed
|
| 1027 |
+
return {
|
| 1028 |
+
"batch_id": batch_id,
|
| 1029 |
+
"user_id": user_id,
|
| 1030 |
+
"status": "failed",
|
| 1031 |
+
"error": str(result.info) if result.info else "Unknown error",
|
| 1032 |
+
"queue_size": queue_size,
|
| 1033 |
+
}
|
| 1034 |
+
elif result.state == "REVOKED":
|
| 1035 |
+
return {
|
| 1036 |
+
"batch_id": batch_id,
|
| 1037 |
+
"user_id": user_id,
|
| 1038 |
+
"status": "revoked",
|
| 1039 |
+
"error": "Task was revoked",
|
| 1040 |
+
"queue_size": queue_size,
|
| 1041 |
+
}
|
| 1042 |
+
else:
|
| 1043 |
+
# PENDING or other states
|
| 1044 |
+
return {
|
| 1045 |
+
"batch_id": batch_id,
|
| 1046 |
+
"user_id": user_id,
|
| 1047 |
+
"status": "pending",
|
| 1048 |
+
"error": None,
|
| 1049 |
+
"queue_size": queue_size,
|
| 1050 |
+
}
|
| 1051 |
@app.route("/reports")
|
| 1052 |
@login_required
|
| 1053 |
def reports():
|
|
|
|
| 1125 |
data_cache_hit=False,
|
| 1126 |
)
|
| 1127 |
|
| 1128 |
+
|
| 1129 |
+
@app.route("/report/<image_id>/delete", methods=["POST"])
|
| 1130 |
+
@login_required
|
| 1131 |
+
def delete_report(image_id):
|
| 1132 |
+
"""Delete a single report and its associated files for the current user."""
|
| 1133 |
+
report = ScreeningReport.query.filter_by(user_id=current_user.id, image_id=image_id).first()
|
| 1134 |
+
if not report:
|
| 1135 |
+
flash("Report not found", "error")
|
| 1136 |
+
return redirect(url_for("reports"))
|
| 1137 |
+
|
| 1138 |
+
reports_dir = UserDataManager().get_user_reports_dir(current_user.id)
|
| 1139 |
+
try:
|
| 1140 |
+
for path in reports_dir.glob(f"{image_id}*"):
|
| 1141 |
+
try:
|
| 1142 |
+
path.unlink()
|
| 1143 |
+
except OSError:
|
| 1144 |
+
logger.warning(f"Failed to delete file: {path}")
|
| 1145 |
+
except Exception:
|
| 1146 |
+
logger.exception("Error while removing report files")
|
| 1147 |
+
|
| 1148 |
+
try:
|
| 1149 |
+
db.session.delete(report)
|
| 1150 |
+
db.session.commit()
|
| 1151 |
+
except Exception:
|
| 1152 |
+
db.session.rollback()
|
| 1153 |
+
logger.exception("Failed to delete report DB entry")
|
| 1154 |
+
flash("Failed to delete report", "error")
|
| 1155 |
+
return redirect(url_for("reports"))
|
| 1156 |
+
|
| 1157 |
+
log_audit("report_deleted", user_id=current_user.id, resource_type="report", resource_id=report.id)
|
| 1158 |
+
flash("Report deleted", "success")
|
| 1159 |
+
return redirect(url_for("reports"))
|
| 1160 |
+
|
| 1161 |
+
|
| 1162 |
+
@app.route("/reports/delete_all", methods=["POST"])
|
| 1163 |
+
@login_required
|
| 1164 |
+
def delete_all_reports():
|
| 1165 |
+
"""Delete all reports and local files for the current user."""
|
| 1166 |
+
reports = ScreeningReport.query.filter_by(user_id=current_user.id).all()
|
| 1167 |
+
reports_dir = UserDataManager().get_user_reports_dir(current_user.id)
|
| 1168 |
+
|
| 1169 |
+
# Remove files
|
| 1170 |
+
try:
|
| 1171 |
+
for path in reports_dir.iterdir():
|
| 1172 |
+
if path.is_file():
|
| 1173 |
+
try:
|
| 1174 |
+
path.unlink()
|
| 1175 |
+
except OSError:
|
| 1176 |
+
logger.warning(f"Failed to delete file: {path}")
|
| 1177 |
+
except Exception:
|
| 1178 |
+
logger.exception("Error while removing user report files")
|
| 1179 |
+
|
| 1180 |
+
# Remove DB entries
|
| 1181 |
+
try:
|
| 1182 |
+
for r in reports:
|
| 1183 |
+
db.session.delete(r)
|
| 1184 |
+
db.session.commit()
|
| 1185 |
+
except Exception:
|
| 1186 |
+
db.session.rollback()
|
| 1187 |
+
logger.exception("Failed to delete report DB entries")
|
| 1188 |
+
flash("Failed to delete all reports", "error")
|
| 1189 |
+
return redirect(url_for("reports"))
|
| 1190 |
+
|
| 1191 |
+
log_audit("reports_deleted_all", user_id=current_user.id, resource_type="report", resource_id=None)
|
| 1192 |
+
flash("All reports deleted", "success")
|
| 1193 |
+
return redirect(url_for("reports"))
|
| 1194 |
+
|
| 1195 |
@app.route("/case/<image_id>")
|
| 1196 |
@login_required
|
| 1197 |
def case_detail(image_id):
|
| 1198 |
"""View screening report details"""
|
| 1199 |
+
report = ScreeningReport.query.filter_by(user_id=current_user.id, image_id=image_id).first()
|
| 1200 |
+
if not report:
|
| 1201 |
+
abort(404)
|
| 1202 |
|
|
|
|
| 1203 |
report_data = None
|
| 1204 |
+
if report.report_payload:
|
| 1205 |
+
try:
|
| 1206 |
+
report_data = json.loads(report.report_payload)
|
| 1207 |
+
except json.JSONDecodeError:
|
| 1208 |
+
report_data = None
|
| 1209 |
+
|
| 1210 |
+
if report_data is None:
|
| 1211 |
+
user_reports_dir = UserDataManager().get_user_reports_dir(current_user.id)
|
| 1212 |
+
report_path = user_reports_dir / f"{image_id}_report.json"
|
| 1213 |
+
if not report_path.exists():
|
| 1214 |
+
abort(404)
|
| 1215 |
+
try:
|
| 1216 |
+
with open(report_path) as f:
|
| 1217 |
report_data = json.load(f)
|
| 1218 |
+
except (json.JSONDecodeError, OSError):
|
| 1219 |
+
abort(500)
|
| 1220 |
+
|
| 1221 |
+
log_audit("report_viewed", user_id=current_user.id, resource_type="report", resource_id=report.id)
|
| 1222 |
+
# Build a lightweight `row` object matching CaseRow used elsewhere so the
|
| 1223 |
+
# detail template can access properties like `row.image_id`, `row.cal_prob`.
|
| 1224 |
+
def _format_date(dt):
|
| 1225 |
+
try:
|
| 1226 |
+
return dt.isoformat()
|
| 1227 |
+
except Exception:
|
| 1228 |
+
return str(dt) if dt else ""
|
| 1229 |
+
|
| 1230 |
+
gradcam_ref = _resolve_gradcam_reference(report)
|
| 1231 |
+
gradcam_url = None
|
| 1232 |
+
if gradcam_ref:
|
| 1233 |
+
if gradcam_ref.startswith("http"):
|
| 1234 |
+
gradcam_url = gradcam_ref
|
| 1235 |
+
else:
|
| 1236 |
+
gradcam_url = url_for("serve_gradcam", filename=Path(gradcam_ref).name)
|
| 1237 |
+
|
| 1238 |
+
row = SimpleNamespace(
|
| 1239 |
+
image_id=report.image_id,
|
| 1240 |
+
outcome=report.screening_outcome or "Unknown",
|
| 1241 |
+
raw_prob=report.raw_probability,
|
| 1242 |
+
cal_prob=report.calibrated_probability,
|
| 1243 |
+
band=report.confidence_band or "N/A",
|
| 1244 |
+
triage=report.triage_action or "N/A",
|
| 1245 |
+
urgency=report.urgency or "N/A",
|
| 1246 |
+
generated_at=_format_date(report.generated_at),
|
| 1247 |
+
date_display=(report.generated_at.strftime("%Y-%m-%d %H:%M") if report.generated_at else "β"),
|
| 1248 |
+
report_file=Path(report.report_json_path).name if report.report_json_path else None,
|
| 1249 |
+
gradcam_url=gradcam_url,
|
| 1250 |
+
true_label=report.true_label,
|
| 1251 |
+
is_positive=("no hemorrhage" not in (report.screening_outcome or "").lower()),
|
| 1252 |
+
)
|
| 1253 |
|
| 1254 |
+
return render_template("detail.html", row=row, report_record=report, payload=report_data)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1255 |
|
| 1256 |
+
|
| 1257 |
+
@app.route("/case/<image_id>/ground-truth", methods=["POST"])
|
| 1258 |
+
@login_required
|
| 1259 |
+
def update_ground_truth(image_id):
|
| 1260 |
+
"""Update ground truth label for a report."""
|
| 1261 |
+
report = ScreeningReport.query.filter_by(user_id=current_user.id, image_id=image_id).first()
|
| 1262 |
+
if not report:
|
| 1263 |
+
abort(404)
|
| 1264 |
+
|
| 1265 |
+
raw_value = (request.form.get("true_label") or "").strip()
|
| 1266 |
+
normalized = raw_value.upper().replace(" ", "_").replace("/", "_")
|
| 1267 |
+
allowed = {"POSITIVE", "NEGATIVE", "UNKNOWN", "N_A"}
|
| 1268 |
+
if not normalized or normalized == "N_A":
|
| 1269 |
+
report.true_label = None
|
| 1270 |
+
elif normalized not in allowed:
|
| 1271 |
+
flash("Invalid ground truth value.", "error")
|
| 1272 |
+
return redirect(url_for("case_detail", image_id=image_id))
|
| 1273 |
else:
|
| 1274 |
+
report.true_label = "UNKNOWN" if normalized == "UNKNOWN" else normalized
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1275 |
|
| 1276 |
+
try:
|
| 1277 |
+
db.session.commit()
|
| 1278 |
+
log_audit("ground_truth_updated", user_id=current_user.id, resource_type="report", resource_id=report.id)
|
| 1279 |
+
flash("Ground truth updated.", "success")
|
| 1280 |
+
except Exception:
|
| 1281 |
+
db.session.rollback()
|
| 1282 |
+
logger.exception("Failed to update ground truth")
|
| 1283 |
+
flash("Failed to update ground truth.", "error")
|
| 1284 |
+
|
| 1285 |
+
return redirect(url_for("case_detail", image_id=image_id))
|
| 1286 |
|
| 1287 |
@app.route("/logs")
|
| 1288 |
@login_required
|
| 1289 |
def logs_page():
|
| 1290 |
"""View user's inference logs"""
|
| 1291 |
+
if not SHOW_LOGS:
|
| 1292 |
+
abort(404)
|
| 1293 |
log_files = []
|
| 1294 |
|
| 1295 |
if LOGS_DIR.exists():
|
|
|
|
| 1312 |
def evaluation():
|
| 1313 |
"""Model evaluation page"""
|
| 1314 |
cases = _load_user_cases(current_user.id) if current_user.is_authenticated else []
|
| 1315 |
+
gt_stats = _compute_ground_truth_stats(current_user.id) if current_user.is_authenticated else None
|
| 1316 |
cal_probs = [r.cal_prob for r in cases if r.cal_prob is not None]
|
| 1317 |
|
| 1318 |
bins = [0] * 10
|
|
|
|
| 1337 |
bins=bins,
|
| 1338 |
band_data=band_data,
|
| 1339 |
total=len(cases),
|
| 1340 |
+
gt_stats=gt_stats,
|
| 1341 |
)
|
| 1342 |
|
| 1343 |
|
|
|
|
| 1346 |
def serve_gradcam(filename: str):
|
| 1347 |
"""Serve a user's Grad-CAM image from their report directory."""
|
| 1348 |
safe_name = Path(filename).name
|
| 1349 |
+
reports_dir = UserDataManager().get_user_reports_dir(current_user.id)
|
| 1350 |
return send_from_directory(reports_dir, safe_name)
|
| 1351 |
|
| 1352 |
+
@app.route("/report-json/<path:filename>")
|
| 1353 |
@login_required
|
| 1354 |
def serve_report_json(filename: str):
|
| 1355 |
+
"""Serve a user's report JSON file from their report directory."""
|
| 1356 |
safe_name = Path(filename).name
|
| 1357 |
+
reports_dir = UserDataManager().get_user_reports_dir(current_user.id)
|
| 1358 |
+
report_path = reports_dir / safe_name
|
| 1359 |
+
if report_path.exists():
|
| 1360 |
+
return send_from_directory(reports_dir, safe_name, mimetype="application/json")
|
| 1361 |
|
| 1362 |
+
image_id = safe_name.replace("_report.json", "")
|
|
|
|
|
|
|
|
|
|
| 1363 |
report = ScreeningReport.query.filter_by(user_id=current_user.id, image_id=image_id).first()
|
| 1364 |
+
if report and report.report_payload:
|
| 1365 |
+
return Response(report.report_payload, mimetype="application/json")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1366 |
|
| 1367 |
+
abort(404)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1368 |
|
| 1369 |
@app.errorhandler(401)
|
| 1370 |
def unauthorized(e):
|
|
|
|
| 1405 |
@app.cli.command()
|
| 1406 |
def create_admin():
|
| 1407 |
"""Create admin user (interactive)"""
|
|
|
|
|
|
|
| 1408 |
username = input("Username: ").strip()
|
| 1409 |
email = input("Email: ").strip()
|
| 1410 |
password = getpass("Password: ")
|
|
@@ -116,83 +116,29 @@ def _validate_otp(submitted_code: str, expected_purpose: str) -> tuple[bool, str
|
|
| 116 |
return True, "", payload
|
| 117 |
|
| 118 |
|
| 119 |
-
def
|
| 120 |
-
"""Return (plain_text, html) for OTP emails."""
|
| 121 |
if purpose == "verify_email":
|
| 122 |
title = "Verify your ICH Screening account"
|
| 123 |
-
body_line = (
|
| 124 |
-
"Welcome to ICH Screening. You're one step away from accessing our platform.\n"
|
| 125 |
-
"Enter the verification code below to confirm your email address and activate your account."
|
| 126 |
-
)
|
| 127 |
else:
|
| 128 |
title = "Your ICH Screening verification code"
|
| 129 |
-
|
| 130 |
-
|
| 131 |
-
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
plain = (
|
| 135 |
-
f"{title}\n"
|
| 136 |
-
f"{'=' * len(title)}\n\n"
|
| 137 |
-
f"Hi there,\n\n"
|
| 138 |
-
f"{body_line}\n\n"
|
| 139 |
-
f" Verification Code: {code}\n"
|
| 140 |
-
f" Valid for: 10 minutes\n\n"
|
| 141 |
-
"Security reminder: ICH Screening will never ask you to share this code "
|
| 142 |
-
"over the phone, email, or chat. If anyone requests it, treat it as a phishing attempt.\n\n"
|
| 143 |
-
"Didn't sign up? Simply ignore this email β your account will remain inactive "
|
| 144 |
-
"unless this code is entered.\n"
|
| 145 |
-
)
|
| 146 |
-
|
| 147 |
-
try:
|
| 148 |
-
from flask import render_template
|
| 149 |
-
html = render_template(
|
| 150 |
-
"email/otp_email.html",
|
| 151 |
-
title=title,
|
| 152 |
-
otp_code=code,
|
| 153 |
-
purpose=purpose,
|
| 154 |
-
recipient_name=None,
|
| 155 |
-
current_year=datetime.utcnow().year,
|
| 156 |
-
)
|
| 157 |
-
except Exception as exc:
|
| 158 |
-
logger.warning("Could not render OTP HTML email template: %s", exc)
|
| 159 |
-
html = None
|
| 160 |
-
|
| 161 |
-
return plain, html
|
| 162 |
-
|
| 163 |
-
|
| 164 |
-
def _password_reset_email_content(reset_link: str) -> tuple[str, str]:
|
| 165 |
-
"""Return (plain_text, html) for password-reset emails."""
|
| 166 |
-
_reset_title = "ICH Screening β Password Reset"
|
| 167 |
-
plain = (
|
| 168 |
-
f"{_reset_title}\n"
|
| 169 |
-
f"{'β' * len(_reset_title)}\n\n"
|
| 170 |
-
"Hi there,\n\n"
|
| 171 |
-
"We received a request to reset the password for your ICH Screening account.\n"
|
| 172 |
-
"Use the link below to choose a new password β it only takes a moment.\n\n"
|
| 173 |
-
f" Reset link: {reset_link}\n\n"
|
| 174 |
-
"This link is single-use and expires in 30 minutes.\n\n"
|
| 175 |
-
"Didn't request this? You can ignore this email β your password has not been\n"
|
| 176 |
-
"changed and your account remains intact.\n"
|
| 177 |
)
|
| 178 |
|
| 179 |
-
try:
|
| 180 |
-
from flask import render_template
|
| 181 |
-
html = render_template(
|
| 182 |
-
"email/password_reset_email.html",
|
| 183 |
-
reset_link=reset_link,
|
| 184 |
-
recipient_name=None,
|
| 185 |
-
current_year=datetime.utcnow().year,
|
| 186 |
-
)
|
| 187 |
-
except Exception as exc:
|
| 188 |
-
logger.warning("Could not render password-reset HTML email template: %s", exc)
|
| 189 |
-
html = None
|
| 190 |
|
| 191 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 192 |
|
| 193 |
|
| 194 |
-
def _send_email(to_email: str, subject: str, body: str
|
| 195 |
-
"""Send a (optionally multipart HTML + plain-text) email via SMTP."""
|
| 196 |
smtp_host = os.environ.get("SMTP_HOST", os.environ.get("EMAIL_HOST", "")).strip()
|
| 197 |
smtp_user = os.environ.get("SMTP_USER", os.environ.get("EMAIL_HOST_USER", "")).strip()
|
| 198 |
smtp_pass = os.environ.get("SMTP_PASSWORD", os.environ.get("EMAIL_HOST_PASSWORD", "")).strip()
|
|
@@ -211,11 +157,7 @@ def _send_email(to_email: str, subject: str, body: str, html_body: str | None =
|
|
| 211 |
msg["Subject"] = subject
|
| 212 |
msg["From"] = smtp_from
|
| 213 |
msg["To"] = to_email
|
| 214 |
-
# Plain-text part (always present as fallback for non-HTML clients)
|
| 215 |
msg.set_content(body)
|
| 216 |
-
# HTML alternative part (preferred by modern email clients when present)
|
| 217 |
-
if html_body:
|
| 218 |
-
msg.add_alternative(html_body, subtype="html")
|
| 219 |
|
| 220 |
try:
|
| 221 |
with smtplib.SMTP(smtp_host, smtp_port, timeout=20) as server:
|
|
@@ -301,12 +243,10 @@ def register():
|
|
| 301 |
db.session.commit()
|
| 302 |
|
| 303 |
otp_code = _store_otp(email=user.email, purpose="verify_email", user_id=user.id)
|
| 304 |
-
_plain, _html = _otp_email_content(otp_code, "verify_email")
|
| 305 |
sent = _send_email(
|
| 306 |
user.email,
|
| 307 |
"Your ICH Screening verification code",
|
| 308 |
-
|
| 309 |
-
html_body=_html,
|
| 310 |
)
|
| 311 |
if _auth_email_debug_enabled():
|
| 312 |
logger.info("DEV OTP for %s: %s", user.email, otp_code)
|
|
@@ -356,12 +296,10 @@ def login():
|
|
| 356 |
|
| 357 |
if not user.is_active:
|
| 358 |
otp_code = _store_otp(email=user.email, purpose="verify_email", user_id=user.id)
|
| 359 |
-
_plain, _html = _otp_email_content(otp_code, "verify_email")
|
| 360 |
sent = _send_email(
|
| 361 |
user.email,
|
| 362 |
"Your ICH Screening verification code",
|
| 363 |
-
|
| 364 |
-
html_body=_html,
|
| 365 |
)
|
| 366 |
if _auth_email_debug_enabled():
|
| 367 |
logger.info("DEV OTP resend/login for %s: %s", user.email, otp_code)
|
|
@@ -418,12 +356,10 @@ def forgot_password():
|
|
| 418 |
if user:
|
| 419 |
token = _token_serializer().dumps({"email": user.email, "purpose": "reset_password"})
|
| 420 |
reset_link = _build_external_link('auth.reset_password', token=token)
|
| 421 |
-
_plain, _html = _password_reset_email_content(reset_link)
|
| 422 |
sent = _send_email(
|
| 423 |
user.email,
|
| 424 |
'Reset your ICH Screening password',
|
| 425 |
-
|
| 426 |
-
html_body=_html,
|
| 427 |
)
|
| 428 |
if _auth_email_debug_enabled():
|
| 429 |
logger.info("DEV reset link for %s: %s", user.email, reset_link)
|
|
@@ -490,8 +426,7 @@ def resend_otp():
|
|
| 490 |
purpose = payload.get("purpose", "verify_email")
|
| 491 |
user_id = payload.get("user_id")
|
| 492 |
new_code = _store_otp(email=email, purpose=purpose, user_id=user_id)
|
| 493 |
-
|
| 494 |
-
sent = _send_email(email, "Your ICH Screening verification code", _plain, html_body=_html)
|
| 495 |
if _auth_email_debug_enabled():
|
| 496 |
logger.info("DEV OTP resend for %s: %s", email, new_code)
|
| 497 |
|
|
|
|
| 116 |
return True, "", payload
|
| 117 |
|
| 118 |
|
| 119 |
+
def _otp_body(code: str, purpose: str) -> str:
|
|
|
|
| 120 |
if purpose == "verify_email":
|
| 121 |
title = "Verify your ICH Screening account"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
else:
|
| 123 |
title = "Your ICH Screening verification code"
|
| 124 |
+
return (
|
| 125 |
+
f"{title}\n\n"
|
| 126 |
+
f"Your one-time password (OTP) is: {code}\n"
|
| 127 |
+
"This code expires in 10 minutes.\n\n"
|
| 128 |
+
"If you did not request this, you can ignore this email."
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 129 |
)
|
| 130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
|
| 132 |
+
def _password_reset_body(reset_link: str) -> str:
|
| 133 |
+
return (
|
| 134 |
+
"Reset your ICH Screening password\n\n"
|
| 135 |
+
f"Click the link below to set a new password:\n{reset_link}\n\n"
|
| 136 |
+
"This link expires in 30 minutes.\n"
|
| 137 |
+
"If you did not request this, you can ignore this email."
|
| 138 |
+
)
|
| 139 |
|
| 140 |
|
| 141 |
+
def _send_email(to_email: str, subject: str, body: str) -> bool:
|
|
|
|
| 142 |
smtp_host = os.environ.get("SMTP_HOST", os.environ.get("EMAIL_HOST", "")).strip()
|
| 143 |
smtp_user = os.environ.get("SMTP_USER", os.environ.get("EMAIL_HOST_USER", "")).strip()
|
| 144 |
smtp_pass = os.environ.get("SMTP_PASSWORD", os.environ.get("EMAIL_HOST_PASSWORD", "")).strip()
|
|
|
|
| 157 |
msg["Subject"] = subject
|
| 158 |
msg["From"] = smtp_from
|
| 159 |
msg["To"] = to_email
|
|
|
|
| 160 |
msg.set_content(body)
|
|
|
|
|
|
|
|
|
|
| 161 |
|
| 162 |
try:
|
| 163 |
with smtplib.SMTP(smtp_host, smtp_port, timeout=20) as server:
|
|
|
|
| 243 |
db.session.commit()
|
| 244 |
|
| 245 |
otp_code = _store_otp(email=user.email, purpose="verify_email", user_id=user.id)
|
|
|
|
| 246 |
sent = _send_email(
|
| 247 |
user.email,
|
| 248 |
"Your ICH Screening verification code",
|
| 249 |
+
_otp_body(otp_code, "verify_email"),
|
|
|
|
| 250 |
)
|
| 251 |
if _auth_email_debug_enabled():
|
| 252 |
logger.info("DEV OTP for %s: %s", user.email, otp_code)
|
|
|
|
| 296 |
|
| 297 |
if not user.is_active:
|
| 298 |
otp_code = _store_otp(email=user.email, purpose="verify_email", user_id=user.id)
|
|
|
|
| 299 |
sent = _send_email(
|
| 300 |
user.email,
|
| 301 |
"Your ICH Screening verification code",
|
| 302 |
+
_otp_body(otp_code, "verify_email"),
|
|
|
|
| 303 |
)
|
| 304 |
if _auth_email_debug_enabled():
|
| 305 |
logger.info("DEV OTP resend/login for %s: %s", user.email, otp_code)
|
|
|
|
| 356 |
if user:
|
| 357 |
token = _token_serializer().dumps({"email": user.email, "purpose": "reset_password"})
|
| 358 |
reset_link = _build_external_link('auth.reset_password', token=token)
|
|
|
|
| 359 |
sent = _send_email(
|
| 360 |
user.email,
|
| 361 |
'Reset your ICH Screening password',
|
| 362 |
+
_password_reset_body(reset_link),
|
|
|
|
| 363 |
)
|
| 364 |
if _auth_email_debug_enabled():
|
| 365 |
logger.info("DEV reset link for %s: %s", user.email, reset_link)
|
|
|
|
| 426 |
purpose = payload.get("purpose", "verify_email")
|
| 427 |
user_id = payload.get("user_id")
|
| 428 |
new_code = _store_otp(email=email, purpose=purpose, user_id=user_id)
|
| 429 |
+
sent = _send_email(email, "Your ICH Screening verification code", _otp_body(new_code, purpose))
|
|
|
|
| 430 |
if _auth_email_debug_enabled():
|
| 431 |
logger.info("DEV OTP resend for %s: %s", email, new_code)
|
| 432 |
|
|
@@ -8,6 +8,7 @@ from flask import session, redirect, url_for, request, g, abort, has_request_con
|
|
| 8 |
from flask_login import LoginManager, current_user
|
| 9 |
from models import db, User, AuditLog
|
| 10 |
from datetime import datetime
|
|
|
|
| 11 |
|
| 12 |
logger = logging.getLogger(__name__)
|
| 13 |
|
|
@@ -25,7 +26,22 @@ def init_auth(app):
|
|
| 25 |
@login_manager.user_loader
|
| 26 |
def load_user(user_id):
|
| 27 |
"""Load user from database by ID"""
|
| 28 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 29 |
|
| 30 |
|
| 31 |
def get_client_ip():
|
|
|
|
| 8 |
from flask_login import LoginManager, current_user
|
| 9 |
from models import db, User, AuditLog
|
| 10 |
from datetime import datetime
|
| 11 |
+
from sqlalchemy.exc import SQLAlchemyError
|
| 12 |
|
| 13 |
logger = logging.getLogger(__name__)
|
| 14 |
|
|
|
|
| 26 |
@login_manager.user_loader
|
| 27 |
def load_user(user_id):
|
| 28 |
"""Load user from database by ID"""
|
| 29 |
+
try:
|
| 30 |
+
return User.query.get(int(user_id))
|
| 31 |
+
except SQLAlchemyError as e:
|
| 32 |
+
logger.warning(f"User loader failed, clearing session context: {e}")
|
| 33 |
+
try:
|
| 34 |
+
db.session.rollback()
|
| 35 |
+
except Exception:
|
| 36 |
+
pass
|
| 37 |
+
return None
|
| 38 |
+
except Exception as e:
|
| 39 |
+
logger.warning(f"Unexpected user loader failure: {e}")
|
| 40 |
+
try:
|
| 41 |
+
db.session.rollback()
|
| 42 |
+
except Exception:
|
| 43 |
+
pass
|
| 44 |
+
return None
|
| 45 |
|
| 46 |
|
| 47 |
def get_client_ip():
|
|
@@ -88,6 +88,7 @@ class ScreeningReport(db.Model):
|
|
| 88 |
# File paths (relative to user's data dir)
|
| 89 |
report_json_path = db.Column(db.String(500))
|
| 90 |
gradcam_image_path = db.Column(db.String(500))
|
|
|
|
| 91 |
|
| 92 |
# Generated timestamp
|
| 93 |
generated_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False, index=True)
|
|
|
|
| 88 |
# File paths (relative to user's data dir)
|
| 89 |
report_json_path = db.Column(db.String(500))
|
| 90 |
gradcam_image_path = db.Column(db.String(500))
|
| 91 |
+
report_payload = db.Column(db.Text)
|
| 92 |
|
| 93 |
# Generated timestamp
|
| 94 |
generated_at = db.Column(db.DateTime, default=datetime.utcnow, nullable=False, index=True)
|
|
@@ -32,3 +32,8 @@ huggingface_hub>=0.17.0
|
|
| 32 |
requests>=2.31.0
|
| 33 |
python-dateutil>=2.8.0
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
requests>=2.31.0
|
| 33 |
python-dateutil>=2.8.0
|
| 34 |
|
| 35 |
+
|
| 36 |
+
celery
|
| 37 |
+
redis
|
| 38 |
+
groq
|
| 39 |
+
cloudinary
|
|
@@ -241,6 +241,11 @@ def build_report(
|
|
| 241 |
|
| 242 |
report["llm_summary"] = generate_medical_summary(inference, calib_cfg, report)
|
| 243 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 244 |
# Cloudinary Integration
|
| 245 |
cloud_name = os.environ.get("CLOUDINARY_CLOUD_NAME")
|
| 246 |
api_key = os.environ.get("CLOUDINARY_API_KEY")
|
|
|
|
| 241 |
|
| 242 |
report["llm_summary"] = generate_medical_summary(inference, calib_cfg, report)
|
| 243 |
|
| 244 |
+
groq_api_key = os.environ.get("GROQ_API_KEY")
|
| 245 |
+
if Groq and groq_api_key:
|
| 246 |
+
report["llm_provider"] = "groq"
|
| 247 |
+
report["llm_model"] = os.environ.get("LLM_MODEL", "llama-3.1-8b-instant")
|
| 248 |
+
|
| 249 |
# Cloudinary Integration
|
| 250 |
cloud_name = os.environ.get("CLOUDINARY_CLOUD_NAME")
|
| 251 |
api_key = os.environ.get("CLOUDINARY_API_KEY")
|
|
@@ -10,6 +10,7 @@
|
|
| 10 |
|
| 11 |
var title = document.getElementById('batchTitle');
|
| 12 |
var subtitle = document.getElementById('batchSubtitle');
|
|
|
|
| 13 |
var fill = document.getElementById('progressFill');
|
| 14 |
var pctLabel = document.getElementById('progressPct');
|
| 15 |
var currentFile = document.getElementById('currentFile');
|
|
@@ -42,6 +43,16 @@
|
|
| 42 |
statOK.textContent = data.succeeded;
|
| 43 |
statFail.textContent = data.failed_ids ? data.failed_ids.length : 0;
|
| 44 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 45 |
fill.style.width = pct + '%';
|
| 46 |
pctLabel.textContent = pct + '%';
|
| 47 |
currentFile.textContent = data.current_file ? 'Processing: ' + data.current_file : '';
|
|
|
|
| 10 |
|
| 11 |
var title = document.getElementById('batchTitle');
|
| 12 |
var subtitle = document.getElementById('batchSubtitle');
|
| 13 |
+
var queueStatus = document.getElementById('queueStatus');
|
| 14 |
var fill = document.getElementById('progressFill');
|
| 15 |
var pctLabel = document.getElementById('progressPct');
|
| 16 |
var currentFile = document.getElementById('currentFile');
|
|
|
|
| 43 |
statOK.textContent = data.succeeded;
|
| 44 |
statFail.textContent = data.failed_ids ? data.failed_ids.length : 0;
|
| 45 |
|
| 46 |
+
if (queueStatus) {
|
| 47 |
+
if (typeof data.queue_size === 'number') {
|
| 48 |
+
queueStatus.textContent = 'Queue size: ' + data.queue_size;
|
| 49 |
+
} else if (data.status === 'pending') {
|
| 50 |
+
queueStatus.textContent = 'Queued for processing...';
|
| 51 |
+
} else {
|
| 52 |
+
queueStatus.textContent = '';
|
| 53 |
+
}
|
| 54 |
+
}
|
| 55 |
+
|
| 56 |
fill.style.width = pct + '%';
|
| 57 |
pctLabel.textContent = pct + '%';
|
| 58 |
currentFile.textContent = data.current_file ? 'Processing: ' + data.current_file : '';
|
|
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Celery task workers for async inference and batch processing.
|
| 3 |
+
Handles long-running DICOM processing jobs with progress tracking via Redis.
|
| 4 |
+
|
| 5 |
+
Run worker with: celery -A tasks worker --loglevel=info
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import logging
|
| 9 |
+
import os
|
| 10 |
+
import shutil
|
| 11 |
+
import datetime
|
| 12 |
+
import ssl
|
| 13 |
+
import sys
|
| 14 |
+
import traceback
|
| 15 |
+
from pathlib import Path
|
| 16 |
+
from typing import Any
|
| 17 |
+
|
| 18 |
+
# Ensure the app directory is in the Python path so imports work in worker processes
|
| 19 |
+
APP_DIR = Path(__file__).parent.absolute()
|
| 20 |
+
if str(APP_DIR) not in sys.path:
|
| 21 |
+
sys.path.insert(0, str(APP_DIR))
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
from dotenv import load_dotenv
|
| 25 |
+
load_dotenv()
|
| 26 |
+
except ImportError:
|
| 27 |
+
pass
|
| 28 |
+
|
| 29 |
+
from celery import Celery, current_task
|
| 30 |
+
|
| 31 |
+
logger = logging.getLogger(__name__)
|
| 32 |
+
|
| 33 |
+
# Extract Redis URL from environment
|
| 34 |
+
REDIS_URL = os.environ.get("REDIS_URL", "redis://localhost:6379/0")
|
| 35 |
+
|
| 36 |
+
# Initialize Celery app
|
| 37 |
+
celery_app = Celery(
|
| 38 |
+
"ich_tasks",
|
| 39 |
+
broker=REDIS_URL,
|
| 40 |
+
backend=REDIS_URL,
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
# Configure Celery with SSL support for Upstash Redis
|
| 44 |
+
ssl_config = None
|
| 45 |
+
redis_backend_ssl = None
|
| 46 |
+
if REDIS_URL.startswith("rediss://"):
|
| 47 |
+
ssl_config = {"ssl_cert_reqs": ssl.CERT_NONE}
|
| 48 |
+
redis_backend_ssl = {"ssl_cert_reqs": ssl.CERT_NONE}
|
| 49 |
+
|
| 50 |
+
celery_app.conf.update(
|
| 51 |
+
broker_use_ssl=ssl_config,
|
| 52 |
+
redis_backend_use_ssl=redis_backend_ssl,
|
| 53 |
+
task_serializer="json",
|
| 54 |
+
accept_content=["json"],
|
| 55 |
+
result_serializer="json",
|
| 56 |
+
timezone="UTC",
|
| 57 |
+
enable_utc=True,
|
| 58 |
+
task_track_started=True,
|
| 59 |
+
task_time_limit=3600, # 1 hour hard limit
|
| 60 |
+
task_soft_time_limit=3300, # 55 min soft limit
|
| 61 |
+
result_expires=86400, # 24 hours
|
| 62 |
+
)
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@celery_app.task(bind=True, name="tasks.process_dicom_batch")
|
| 66 |
+
def process_dicom_batch(
|
| 67 |
+
self,
|
| 68 |
+
batch_id: str,
|
| 69 |
+
dcm_paths: list[str],
|
| 70 |
+
user_id: int,
|
| 71 |
+
temp_dir: str | None = None,
|
| 72 |
+
) -> dict[str, Any]:
|
| 73 |
+
"""
|
| 74 |
+
Process a batch of DICOM files asynchronously with progress tracking.
|
| 75 |
+
|
| 76 |
+
Args:
|
| 77 |
+
batch_id: Unique identifier for this batch job
|
| 78 |
+
dcm_paths: List of DICOM file paths to process
|
| 79 |
+
user_id: User ID for audit and data isolation
|
| 80 |
+
temp_dir: Optional temporary directory to clean up after
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Dictionary with final batch status and results matching frontend expectations
|
| 84 |
+
"""
|
| 85 |
+
# Import here to avoid circular imports. Add diagnostics to help debug
|
| 86 |
+
# ModuleNotFoundError issues when Celery workers can't find `app_new`.
|
| 87 |
+
try:
|
| 88 |
+
# Ensure APP_DIR is present in sys.path for worker subprocesses
|
| 89 |
+
if str(APP_DIR) not in sys.path:
|
| 90 |
+
sys.path.insert(0, str(APP_DIR))
|
| 91 |
+
logger.info(f"Inserted APP_DIR into sys.path: {APP_DIR}")
|
| 92 |
+
else:
|
| 93 |
+
logger.info(f"APP_DIR already in sys.path: {APP_DIR}")
|
| 94 |
+
|
| 95 |
+
logger.info(f"tasks.py APP_DIR={APP_DIR}")
|
| 96 |
+
logger.info(f"sys.path (first 10): {sys.path[:10]}")
|
| 97 |
+
# List files in the app dir for visibility
|
| 98 |
+
try:
|
| 99 |
+
files = [p.name for p in Path(APP_DIR).iterdir() if p.exists()]
|
| 100 |
+
logger.info(f"APP_DIR contents: {files[:50]}")
|
| 101 |
+
except Exception as _e:
|
| 102 |
+
logger.warning(f"Could not list APP_DIR contents: {_e}")
|
| 103 |
+
|
| 104 |
+
from app_new import app, _run_inference_on_dcm
|
| 105 |
+
from auth_utils import log_audit
|
| 106 |
+
from models import ScreeningUpload, db
|
| 107 |
+
except Exception as e:
|
| 108 |
+
logger.error("Failed importing application modules inside Celery worker:\n" + traceback.format_exc())
|
| 109 |
+
raise
|
| 110 |
+
|
| 111 |
+
total = len(dcm_paths)
|
| 112 |
+
succeeded_ids = []
|
| 113 |
+
failed_ids = []
|
| 114 |
+
started_at = datetime.datetime.now().isoformat()
|
| 115 |
+
|
| 116 |
+
logger.info(f"Batch {batch_id} starting: {total} files for user {user_id}")
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
with app.app_context():
|
| 120 |
+
for i, path_str in enumerate(dcm_paths, 1):
|
| 121 |
+
# Check if task was revoked (compat across Celery versions)
|
| 122 |
+
request_ctx = current_task.request
|
| 123 |
+
is_revoked = bool(getattr(request_ctx, "is_revoked", False)) or bool(
|
| 124 |
+
getattr(request_ctx, "revoked", False)
|
| 125 |
+
)
|
| 126 |
+
if is_revoked:
|
| 127 |
+
logger.info(f"Batch {batch_id} revoked, stopping")
|
| 128 |
+
break
|
| 129 |
+
|
| 130 |
+
path = Path(path_str)
|
| 131 |
+
image_id = path.stem
|
| 132 |
+
|
| 133 |
+
upload_record = ScreeningUpload(
|
| 134 |
+
user_id=user_id,
|
| 135 |
+
file_name=path.name,
|
| 136 |
+
original_filename=path.name,
|
| 137 |
+
file_size=path.stat().st_size if path.exists() else None,
|
| 138 |
+
file_path=str(path),
|
| 139 |
+
processing_status="processing",
|
| 140 |
+
)
|
| 141 |
+
db.session.add(upload_record)
|
| 142 |
+
db.session.commit()
|
| 143 |
+
|
| 144 |
+
# Update Celery task state with progress (matches _BATCHES format for frontend)
|
| 145 |
+
self.update_state(
|
| 146 |
+
state="PROGRESS",
|
| 147 |
+
meta={
|
| 148 |
+
"batch_id": batch_id,
|
| 149 |
+
"user_id": user_id,
|
| 150 |
+
"status": "running",
|
| 151 |
+
"total": total,
|
| 152 |
+
"processed": i - 1,
|
| 153 |
+
"succeeded": len(succeeded_ids),
|
| 154 |
+
"failed_ids": list(failed_ids),
|
| 155 |
+
"image_ids": list(succeeded_ids),
|
| 156 |
+
"current_file": image_id,
|
| 157 |
+
"started_at": started_at,
|
| 158 |
+
"finished_at": None,
|
| 159 |
+
"error": None,
|
| 160 |
+
"temp_dir": temp_dir,
|
| 161 |
+
},
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
try:
|
| 165 |
+
report, _ = _run_inference_on_dcm(path, user_id, upload_record.id)
|
| 166 |
+
if report:
|
| 167 |
+
upload_record.processing_status = "completed"
|
| 168 |
+
db.session.commit()
|
| 169 |
+
succeeded_ids.append(image_id)
|
| 170 |
+
else:
|
| 171 |
+
upload_record.processing_status = "failed"
|
| 172 |
+
db.session.commit()
|
| 173 |
+
failed_ids.append(image_id)
|
| 174 |
+
except Exception as e:
|
| 175 |
+
logger.error(f"Batch {batch_id}: failed {image_id} β {e}")
|
| 176 |
+
db.session.rollback()
|
| 177 |
+
upload_record.processing_status = "failed"
|
| 178 |
+
try:
|
| 179 |
+
db.session.commit()
|
| 180 |
+
except Exception:
|
| 181 |
+
db.session.rollback()
|
| 182 |
+
failed_ids.append(image_id)
|
| 183 |
+
|
| 184 |
+
# Update after processing each file
|
| 185 |
+
self.update_state(
|
| 186 |
+
state="PROGRESS",
|
| 187 |
+
meta={
|
| 188 |
+
"batch_id": batch_id,
|
| 189 |
+
"user_id": user_id,
|
| 190 |
+
"status": "running",
|
| 191 |
+
"total": total,
|
| 192 |
+
"processed": i,
|
| 193 |
+
"succeeded": len(succeeded_ids),
|
| 194 |
+
"failed_ids": list(failed_ids),
|
| 195 |
+
"image_ids": list(succeeded_ids),
|
| 196 |
+
"current_file": "",
|
| 197 |
+
"started_at": started_at,
|
| 198 |
+
"finished_at": None,
|
| 199 |
+
"error": None,
|
| 200 |
+
"temp_dir": temp_dir,
|
| 201 |
+
},
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
# Cleanup temporary directory if provided
|
| 205 |
+
if temp_dir and Path(temp_dir).exists():
|
| 206 |
+
try:
|
| 207 |
+
shutil.rmtree(temp_dir, ignore_errors=True)
|
| 208 |
+
logger.info(f"Cleaned up temp_dir: {temp_dir}")
|
| 209 |
+
except Exception as e:
|
| 210 |
+
logger.warning(f"Failed to clean temp_dir {temp_dir}: {e}")
|
| 211 |
+
|
| 212 |
+
# Log final audit result
|
| 213 |
+
with app.app_context():
|
| 214 |
+
audit_status = "success" if len(failed_ids) == 0 else "partial"
|
| 215 |
+
log_audit(
|
| 216 |
+
"batch_processing_completed",
|
| 217 |
+
user_id=user_id,
|
| 218 |
+
details=f"batch_id={batch_id}, processed={total}, succeeded={len(succeeded_ids)}, failed={len(failed_ids)}",
|
| 219 |
+
status=audit_status,
|
| 220 |
+
)
|
| 221 |
+
|
| 222 |
+
# Return final result matching _BATCHES format for frontend compatibility
|
| 223 |
+
result = {
|
| 224 |
+
"batch_id": batch_id,
|
| 225 |
+
"user_id": user_id,
|
| 226 |
+
"status": "completed",
|
| 227 |
+
"total": total,
|
| 228 |
+
"processed": total,
|
| 229 |
+
"succeeded": len(succeeded_ids),
|
| 230 |
+
"failed_ids": list(failed_ids),
|
| 231 |
+
"image_ids": list(succeeded_ids),
|
| 232 |
+
"current_file": "",
|
| 233 |
+
"started_at": started_at,
|
| 234 |
+
"finished_at": datetime.datetime.now().isoformat(),
|
| 235 |
+
"error": None,
|
| 236 |
+
"temp_dir": temp_dir,
|
| 237 |
+
}
|
| 238 |
+
|
| 239 |
+
logger.info(
|
| 240 |
+
f"Batch {batch_id} complete: {len(succeeded_ids)}/{total} succeeded, "
|
| 241 |
+
f"{len(failed_ids)} failed"
|
| 242 |
+
)
|
| 243 |
+
return result
|
| 244 |
+
|
| 245 |
+
except Exception as e:
|
| 246 |
+
logger.error(f"Batch {batch_id} error: {e}", exc_info=True)
|
| 247 |
+
with app.app_context():
|
| 248 |
+
log_audit(
|
| 249 |
+
"batch_processing_failed",
|
| 250 |
+
user_id=user_id,
|
| 251 |
+
details=f"batch_id={batch_id}, error={str(e)}",
|
| 252 |
+
status="failure",
|
| 253 |
+
)
|
| 254 |
+
raise
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
@celery_app.task(name="tasks.health_check")
|
| 258 |
+
def health_check() -> str:
|
| 259 |
+
"""Simple health check task for monitoring."""
|
| 260 |
+
return "Celery worker is healthy"
|
|
@@ -3,7 +3,7 @@
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
-
<title>Page Not Found β
|
| 7 |
<meta name="description" content="The page you requested could not be found."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
|
@@ -71,7 +71,7 @@
|
|
| 71 |
</div>
|
| 72 |
|
| 73 |
<p class="error-footer">
|
| 74 |
-
<a href="{{ url_for('home') }}">
|
| 75 |
</p>
|
| 76 |
</div>
|
| 77 |
</body>
|
|
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
+
<title>Page Not Found β AI Medical Intelligence Pipeline</title>
|
| 7 |
<meta name="description" content="The page you requested could not be found."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
|
|
|
| 71 |
</div>
|
| 72 |
|
| 73 |
<p class="error-footer">
|
| 74 |
+
<a href="{{ url_for('home') }}">AI Medical Intelligence Pipeline</a> β CT Scan Analysis
|
| 75 |
</p>
|
| 76 |
</div>
|
| 77 |
</body>
|
|
@@ -3,7 +3,7 @@
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
-
<title>Server Error β
|
| 7 |
<meta name="description" content="An internal server error occurred. Our team has been notified."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
|
@@ -69,7 +69,7 @@
|
|
| 69 |
</div>
|
| 70 |
|
| 71 |
<p class="error-footer">
|
| 72 |
-
<a href="{{ url_for('home') }}">
|
| 73 |
</p>
|
| 74 |
</div>
|
| 75 |
</body>
|
|
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
+
<title>Server Error β AI Medical Intelligence Pipeline</title>
|
| 7 |
<meta name="description" content="An internal server error occurred. Our team has been notified."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
|
|
|
| 69 |
</div>
|
| 70 |
|
| 71 |
<p class="error-footer">
|
| 72 |
+
<a href="{{ url_for('home') }}">AI Medical Intelligence Pipeline</a> β CT Scan Analysis
|
| 73 |
</p>
|
| 74 |
</div>
|
| 75 |
</body>
|
|
@@ -1,13 +1,13 @@
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
-
{% block title %}About β
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="hero">
|
| 7 |
<div class="hero-text">
|
| 8 |
<h1>About This System</h1>
|
| 9 |
<p>
|
| 10 |
-
AI
|
| 11 |
and Clinical Reporting
|
| 12 |
</p>
|
| 13 |
</div>
|
|
@@ -17,8 +17,8 @@
|
|
| 17 |
<section class="panel">
|
| 18 |
<h3>System Overview</h3>
|
| 19 |
<p>
|
| 20 |
-
This is an AI
|
| 21 |
-
hemorrhage (ICH)
|
| 22 |
explainability, confidence calibration, and structured clinical reporting to
|
| 23 |
support β not replace β medical decision-making.
|
| 24 |
</p>
|
|
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
+
{% block title %}About β AI Medical Intelligence Pipeline{% endblock %}
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="hero">
|
| 7 |
<div class="hero-text">
|
| 8 |
<h1>About This System</h1>
|
| 9 |
<p>
|
| 10 |
+
AI Medical Intelligence Pipeline for CT Scan Analysis with Explainability
|
| 11 |
and Clinical Reporting
|
| 12 |
</p>
|
| 13 |
</div>
|
|
|
|
| 17 |
<section class="panel">
|
| 18 |
<h3>System Overview</h3>
|
| 19 |
<p>
|
| 20 |
+
This is an AI medical intelligence pipeline designed to analyze CT brain
|
| 21 |
+
scans for intracranial hemorrhage (ICH). It combines deep learning with visual
|
| 22 |
explainability, confidence calibration, and structured clinical reporting to
|
| 23 |
support β not replace β medical decision-making.
|
| 24 |
</p>
|
|
@@ -3,8 +3,8 @@
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
-
<title>Login β
|
| 7 |
-
<meta name="description" content="Sign in to the
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
| 10 |
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&display=swap" rel="stylesheet"/>
|
|
@@ -22,7 +22,7 @@
|
|
| 22 |
<path d="M22 12h-4l-3 9L9 3l-3 9H2"/>
|
| 23 |
</svg>
|
| 24 |
</div>
|
| 25 |
-
<span class="auth-brand-name">
|
| 26 |
</div>
|
| 27 |
|
| 28 |
<div class="auth-headline">
|
|
@@ -35,7 +35,7 @@
|
|
| 35 |
<span class="feat-icon">
|
| 36 |
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><circle cx="12" cy="12" r="3"/><path d="M12 1v4M12 19v4M4.22 4.22l2.83 2.83M16.95 16.95l2.83 2.83M1 12h4M19 12h4M4.22 19.78l2.83-2.83M16.95 7.05l2.83-2.83"/></svg>
|
| 37 |
</span>
|
| 38 |
-
|
| 39 |
</li>
|
| 40 |
<li>
|
| 41 |
<span class="feat-icon">
|
|
@@ -185,7 +185,7 @@
|
|
| 185 |
</form>
|
| 186 |
|
| 187 |
<div class="auth-footer">
|
| 188 |
-
New to
|
| 189 |
</div>
|
| 190 |
</div>
|
| 191 |
</main>
|
|
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
+
<title>Login β AI Medical Intelligence Pipeline</title>
|
| 7 |
+
<meta name="description" content="Sign in to the AI Medical Intelligence Pipeline dashboard."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
| 10 |
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&display=swap" rel="stylesheet"/>
|
|
|
|
| 22 |
<path d="M22 12h-4l-3 9L9 3l-3 9H2"/>
|
| 23 |
</svg>
|
| 24 |
</div>
|
| 25 |
+
<span class="auth-brand-name">AI Medical Intelligence Pipeline</span>
|
| 26 |
</div>
|
| 27 |
|
| 28 |
<div class="auth-headline">
|
|
|
|
| 35 |
<span class="feat-icon">
|
| 36 |
<svg width="16" height="16" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="2"><circle cx="12" cy="12" r="3"/><path d="M12 1v4M12 19v4M4.22 4.22l2.83 2.83M16.95 16.95l2.83 2.83M1 12h4M19 12h4M4.22 19.78l2.83-2.83M16.95 7.05l2.83-2.83"/></svg>
|
| 37 |
</span>
|
| 38 |
+
AI medical intelligence for CT analysis
|
| 39 |
</li>
|
| 40 |
<li>
|
| 41 |
<span class="feat-icon">
|
|
|
|
| 185 |
</form>
|
| 186 |
|
| 187 |
<div class="auth-footer">
|
| 188 |
+
New to the pipeline? <a href="{{ url_for('auth.register') }}">Create a free account</a>
|
| 189 |
</div>
|
| 190 |
</div>
|
| 191 |
</main>
|
|
@@ -3,7 +3,7 @@
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
-
<title>Reset Password β
|
| 7 |
<meta name="description" content="Set a new password for your account."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
|
@@ -20,7 +20,7 @@
|
|
| 20 |
<path d="M22 12h-4l-3 9L9 3l-3 9H2"/>
|
| 21 |
</svg>
|
| 22 |
</div>
|
| 23 |
-
<span class="auth-brand-name">
|
| 24 |
</div>
|
| 25 |
|
| 26 |
<div class="auth-headline">
|
|
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
+
<title>Reset Password β AI Medical Intelligence Pipeline</title>
|
| 7 |
<meta name="description" content="Set a new password for your account."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
|
|
|
| 20 |
<path d="M22 12h-4l-3 9L9 3l-3 9H2"/>
|
| 21 |
</svg>
|
| 22 |
</div>
|
| 23 |
+
<span class="auth-brand-name">AI Medical Intelligence Pipeline</span>
|
| 24 |
</div>
|
| 25 |
|
| 26 |
<div class="auth-headline">
|
|
@@ -3,7 +3,7 @@
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
-
<title>Verify Email OTP β
|
| 7 |
<meta name="description" content="Verify your email with one-time password."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
|
@@ -20,7 +20,7 @@
|
|
| 20 |
<path d="M22 12h-4l-3 9L9 3l-3 9H2"/>
|
| 21 |
</svg>
|
| 22 |
</div>
|
| 23 |
-
<span class="auth-brand-name">
|
| 24 |
</div>
|
| 25 |
|
| 26 |
<div class="auth-headline">
|
|
@@ -29,7 +29,7 @@
|
|
| 29 |
</div>
|
| 30 |
|
| 31 |
<ul class="auth-features">
|
| 32 |
-
<li><span class="feat-icon">1</span>Open the email from
|
| 33 |
<li><span class="feat-icon">2</span>Copy the 6-digit code</li>
|
| 34 |
<li><span class="feat-icon">3</span>Enter it here β valid for 10 minutes</li>
|
| 35 |
</ul>
|
|
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8"/>
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1"/>
|
| 6 |
+
<title>Verify Email OTP β AI Medical Intelligence Pipeline</title>
|
| 7 |
<meta name="description" content="Verify your email with one-time password."/>
|
| 8 |
<link rel="preconnect" href="https://fonts.googleapis.com"/>
|
| 9 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin/>
|
|
|
|
| 20 |
<path d="M22 12h-4l-3 9L9 3l-3 9H2"/>
|
| 21 |
</svg>
|
| 22 |
</div>
|
| 23 |
+
<span class="auth-brand-name">AI Medical Intelligence Pipeline</span>
|
| 24 |
</div>
|
| 25 |
|
| 26 |
<div class="auth-headline">
|
|
|
|
| 29 |
</div>
|
| 30 |
|
| 31 |
<ul class="auth-features">
|
| 32 |
+
<li><span class="feat-icon">1</span>Open the email from the AI Medical Intelligence Pipeline</li>
|
| 33 |
<li><span class="feat-icon">2</span>Copy the 6-digit code</li>
|
| 34 |
<li><span class="feat-icon">3</span>Enter it here β valid for 10 minutes</li>
|
| 35 |
</ul>
|
|
@@ -3,7 +3,7 @@
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8" />
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
| 6 |
-
<title>{% block title %}
|
| 7 |
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
| 8 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
|
| 9 |
<link
|
|
@@ -36,7 +36,7 @@
|
|
| 36 |
<path d="M22 12h-4l-3 9L9 3l-3 9H2" />
|
| 37 |
</svg>
|
| 38 |
</span>
|
| 39 |
-
<span>
|
| 40 |
</a>
|
| 41 |
|
| 42 |
<nav class="nav-links">
|
|
@@ -47,8 +47,10 @@
|
|
| 47 |
class="{% if request.endpoint == 'upload' %}active{% endif %}">New Scan</a>
|
| 48 |
<a href="{{ url_for('reports') }}"
|
| 49 |
class="{% if request.endpoint == 'reports' %}active{% endif %}">Past Reports</a>
|
|
|
|
| 50 |
<a href="{{ url_for('logs_page') }}"
|
| 51 |
-
|
|
|
|
| 52 |
<a href="{{ url_for('evaluation') }}"
|
| 53 |
class="{% if request.endpoint == 'evaluation' %}active{% endif %}">Evaluation</a>
|
| 54 |
<a href="{{ url_for('about') }}"
|
|
@@ -89,7 +91,7 @@
|
|
| 89 |
<footer class="footer">
|
| 90 |
<div class="container footer-inner">
|
| 91 |
<p>
|
| 92 |
-
AI
|
| 93 |
Screening Tool, Not a Diagnostic Device
|
| 94 |
</p>
|
| 95 |
<p class="muted small">
|
|
|
|
| 3 |
<head>
|
| 4 |
<meta charset="utf-8" />
|
| 5 |
<meta name="viewport" content="width=device-width, initial-scale=1" />
|
| 6 |
+
<title>{% block title %}AI Medical Intelligence Pipeline{% endblock %}</title>
|
| 7 |
<link rel="preconnect" href="https://fonts.googleapis.com" />
|
| 8 |
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin />
|
| 9 |
<link
|
|
|
|
| 36 |
<path d="M22 12h-4l-3 9L9 3l-3 9H2" />
|
| 37 |
</svg>
|
| 38 |
</span>
|
| 39 |
+
<span>AI Medical Intelligence Pipeline</span>
|
| 40 |
</a>
|
| 41 |
|
| 42 |
<nav class="nav-links">
|
|
|
|
| 47 |
class="{% if request.endpoint == 'upload' %}active{% endif %}">New Scan</a>
|
| 48 |
<a href="{{ url_for('reports') }}"
|
| 49 |
class="{% if request.endpoint == 'reports' %}active{% endif %}">Past Reports</a>
|
| 50 |
+
{% if show_logs %}
|
| 51 |
<a href="{{ url_for('logs_page') }}"
|
| 52 |
+
class="{% if request.endpoint == 'logs_page' %}active{% endif %}">Logs</a>
|
| 53 |
+
{% endif %}
|
| 54 |
<a href="{{ url_for('evaluation') }}"
|
| 55 |
class="{% if request.endpoint == 'evaluation' %}active{% endif %}">Evaluation</a>
|
| 56 |
<a href="{{ url_for('about') }}"
|
|
|
|
| 91 |
<footer class="footer">
|
| 92 |
<div class="container footer-inner">
|
| 93 |
<p>
|
| 94 |
+
AI Medical Intelligence Pipeline for CT Scan Analysis —
|
| 95 |
Screening Tool, Not a Diagnostic Device
|
| 96 |
</p>
|
| 97 |
<p class="muted small">
|
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
-
{% block title %}Batch Processing β
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<div class="batch-page" data-batch-id="{{ batch_id }}" data-status-url="{{ url_for('batch_status', batch_id=batch_id) }}" data-reports-url="{{ url_for('reports') }}">
|
|
@@ -17,6 +17,11 @@
|
|
| 17 |
<p class="muted" id="batchSubtitle">
|
| 18 |
Analyzing {{ batch.total }} DICOM file{{ 's' if batch.total != 1 }} β please keep this page open.
|
| 19 |
</p>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
</section>
|
| 21 |
|
| 22 |
<!-- ββ Progress bar ββββββββββββββββββββββββββββββββββββββββββββββββββ -->
|
|
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
+
{% block title %}Batch Processing β AI Medical Intelligence Pipeline{% endblock %}
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<div class="batch-page" data-batch-id="{{ batch_id }}" data-status-url="{{ url_for('batch_status', batch_id=batch_id) }}" data-reports-url="{{ url_for('reports') }}">
|
|
|
|
| 17 |
<p class="muted" id="batchSubtitle">
|
| 18 |
Analyzing {{ batch.total }} DICOM file{{ 's' if batch.total != 1 }} β please keep this page open.
|
| 19 |
</p>
|
| 20 |
+
<p class="muted" id="queueStatus">
|
| 21 |
+
{% if batch.queue_size is not none %}
|
| 22 |
+
Queue size: {{ batch.queue_size }}
|
| 23 |
+
{% endif %}
|
| 24 |
+
</p>
|
| 25 |
</section>
|
| 26 |
|
| 27 |
<!-- ββ Progress bar ββββββββββββββββββββββββββββββββββββββββββββββββββ -->
|
|
@@ -89,6 +89,19 @@
|
|
| 89 |
</div>
|
| 90 |
</div>
|
| 91 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
<!-- Confidence bar -->
|
| 93 |
{% if row.cal_prob is not none %}
|
| 94 |
<div class="prob-bar-wrap">
|
|
@@ -143,6 +156,22 @@
|
|
| 143 |
</section>
|
| 144 |
{% endif %}
|
| 145 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 146 |
<!-- Model info (from payload) -->
|
| 147 |
{% if payload and payload.screening_module %}
|
| 148 |
<section class="panel" style="margin-top: 16px">
|
|
|
|
| 89 |
</div>
|
| 90 |
</div>
|
| 91 |
|
| 92 |
+
<form method="post" action="{{ url_for('update_ground_truth', image_id=row.image_id) }}" style="margin-top: 12px">
|
| 93 |
+
<label class="muted small" for="trueLabel">Update Ground Truth</label>
|
| 94 |
+
<div class="dir-input-row" style="margin-top: 6px">
|
| 95 |
+
<select name="true_label" id="trueLabel" class="input" style="max-width: 240px">
|
| 96 |
+
<option value="" {% if not row.true_label %}selected{% endif %}>Not set</option>
|
| 97 |
+
<option value="POSITIVE" {% if row.true_label == 'POSITIVE' %}selected{% endif %}>Positive</option>
|
| 98 |
+
<option value="NEGATIVE" {% if row.true_label == 'NEGATIVE' %}selected{% endif %}>Negative</option>
|
| 99 |
+
<option value="UNKNOWN" {% if row.true_label == 'UNKNOWN' %}selected{% endif %}>Unknown</option>
|
| 100 |
+
</select>
|
| 101 |
+
<button type="submit" class="btn btn-primary" style="margin-left: 8px">Save</button>
|
| 102 |
+
</div>
|
| 103 |
+
</form>
|
| 104 |
+
|
| 105 |
<!-- Confidence bar -->
|
| 106 |
{% if row.cal_prob is not none %}
|
| 107 |
<div class="prob-bar-wrap">
|
|
|
|
| 156 |
</section>
|
| 157 |
{% endif %}
|
| 158 |
|
| 159 |
+
{% if payload and (payload.llm_provider or payload.llm_model) %}
|
| 160 |
+
<section class="panel" style="margin-top: 16px">
|
| 161 |
+
<h3>LLM Information</h3>
|
| 162 |
+
<div class="kv-group" style="max-width: 500px">
|
| 163 |
+
<div class="kv">
|
| 164 |
+
<span>Provider</span>
|
| 165 |
+
<strong>{{ payload.llm_provider or 'N/A' }}</strong>
|
| 166 |
+
</div>
|
| 167 |
+
<div class="kv">
|
| 168 |
+
<span>Model</span>
|
| 169 |
+
<strong>{{ payload.llm_model or 'N/A' }}</strong>
|
| 170 |
+
</div>
|
| 171 |
+
</div>
|
| 172 |
+
</section>
|
| 173 |
+
{% endif %}
|
| 174 |
+
|
| 175 |
<!-- Model info (from payload) -->
|
| 176 |
{% if payload and payload.screening_module %}
|
| 177 |
<section class="panel" style="margin-top: 16px">
|
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
-
{% block title %}Evaluation β
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="hero">
|
|
@@ -182,4 +182,33 @@
|
|
| 182 |
</div>
|
| 183 |
</div>
|
| 184 |
</section>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 185 |
{% endblock %}
|
|
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
+
{% block title %}Evaluation β AI Medical Intelligence Pipeline{% endblock %}
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="hero">
|
|
|
|
| 182 |
</div>
|
| 183 |
</div>
|
| 184 |
</section>
|
| 185 |
+
|
| 186 |
+
{% if gt_stats %}
|
| 187 |
+
<section class="panel" style="margin-top: 16px">
|
| 188 |
+
<h3>Ground Truth Agreement</h3>
|
| 189 |
+
{% if gt_stats.total == 0 %}
|
| 190 |
+
<p class="muted small">No ground truth labels available yet.</p>
|
| 191 |
+
{% else %}
|
| 192 |
+
<div class="kv-group" style="max-width: 500px">
|
| 193 |
+
<div class="kv">
|
| 194 |
+
<span>Labeled Cases</span><strong>{{ gt_stats.total }}</strong>
|
| 195 |
+
</div>
|
| 196 |
+
<div class="kv">
|
| 197 |
+
<span>Accuracy</span>
|
| 198 |
+
<strong>{{ '%.1f'|format(gt_stats.accuracy * 100) }}%</strong>
|
| 199 |
+
</div>
|
| 200 |
+
<div class="kv">
|
| 201 |
+
<span>False Positive Rate</span>
|
| 202 |
+
<strong>{{ '%.1f'|format(gt_stats.fp_rate * 100) }}%</strong>
|
| 203 |
+
</div>
|
| 204 |
+
<div class="kv">
|
| 205 |
+
<span>TP / TN</span><strong>{{ gt_stats.tp }} / {{ gt_stats.tn }}</strong>
|
| 206 |
+
</div>
|
| 207 |
+
<div class="kv">
|
| 208 |
+
<span>FP / FN</span><strong>{{ gt_stats.fp }} / {{ gt_stats.fn }}</strong>
|
| 209 |
+
</div>
|
| 210 |
+
</div>
|
| 211 |
+
{% endif %}
|
| 212 |
+
</section>
|
| 213 |
+
{% endif %}
|
| 214 |
{% endblock %}
|
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
-
{% block title %}
|
| 4 |
|
| 5 |
{% block head %}
|
| 6 |
<link rel="stylesheet" href="{{ url_for('static', filename='css/home.css') }}"/>
|
|
@@ -14,7 +14,7 @@
|
|
| 14 |
<span class="badge-dot"></span>
|
| 15 |
AI-Powered Screening
|
| 16 |
</div>
|
| 17 |
-
<h1>
|
| 18 |
<p>
|
| 19 |
Clinical-grade CT scan analysis powered by deep learning β with Grad-CAM visualisation,
|
| 20 |
automated triage, and exportable PDF reports.
|
|
@@ -112,6 +112,7 @@
|
|
| 112 |
|
| 113 |
<!-- ββ Mini cards ββ -->
|
| 114 |
<section class="mini-cards">
|
|
|
|
| 115 |
<a href="{{ url_for('logs_page') }}" class="mini-card">
|
| 116 |
<div class="mini-card-icon">
|
| 117 |
<svg width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.8">
|
|
@@ -120,8 +121,9 @@
|
|
| 120 |
</svg>
|
| 121 |
</div>
|
| 122 |
<h3>Execution Logs</h3>
|
| 123 |
-
<p class="muted small">{{ log_count }} inference trace{{ 's' if log_count != 1 }} recorded</p>
|
| 124 |
</a>
|
|
|
|
| 125 |
|
| 126 |
<a href="{{ url_for('evaluation') }}" class="mini-card">
|
| 127 |
<div class="mini-card-icon">
|
|
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
+
{% block title %}AI Medical Intelligence Pipeline β Dashboard{% endblock %}
|
| 4 |
|
| 5 |
{% block head %}
|
| 6 |
<link rel="stylesheet" href="{{ url_for('static', filename='css/home.css') }}"/>
|
|
|
|
| 14 |
<span class="badge-dot"></span>
|
| 15 |
AI-Powered Screening
|
| 16 |
</div>
|
| 17 |
+
<h1>AI Medical Intelligence<br><span class="hero-grad">Pipeline for CT Analysis</span></h1>
|
| 18 |
<p>
|
| 19 |
Clinical-grade CT scan analysis powered by deep learning β with Grad-CAM visualisation,
|
| 20 |
automated triage, and exportable PDF reports.
|
|
|
|
| 112 |
|
| 113 |
<!-- ββ Mini cards ββ -->
|
| 114 |
<section class="mini-cards">
|
| 115 |
+
{% if show_logs %}
|
| 116 |
<a href="{{ url_for('logs_page') }}" class="mini-card">
|
| 117 |
<div class="mini-card-icon">
|
| 118 |
<svg width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.8">
|
|
|
|
| 121 |
</svg>
|
| 122 |
</div>
|
| 123 |
<h3>Execution Logs</h3>
|
| 124 |
+
<p class="muted small">{{ log_count | default(0) }} inference trace{{ 's' if (log_count | default(0)) != 1 }} recorded</p>
|
| 125 |
</a>
|
| 126 |
+
{% endif %}
|
| 127 |
|
| 128 |
<a href="{{ url_for('evaluation') }}" class="mini-card">
|
| 129 |
<div class="mini-card-icon">
|
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
-
{% block title %}
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="page-header">
|
|
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
+
{% block title %}AI Medical Intelligence Pipeline β Execution Logs{% endblock %}
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="page-header">
|
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
-
{% block title %}Past Reports β
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="breadcrumb">
|
|
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
+
{% block title %}Past Reports β AI Medical Intelligence Pipeline{% endblock %}
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="breadcrumb">
|
|
@@ -1,6 +1,6 @@
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
-
{% block title %}Upload Scan β
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="breadcrumb">
|
|
|
|
| 1 |
{% extends "base.html" %}
|
| 2 |
|
| 3 |
+
{% block title %}Upload Scan β AI Medical Intelligence Pipeline{% endblock %}
|
| 4 |
|
| 5 |
{% block content %}
|
| 6 |
<section class="breadcrumb">
|