door_orientation / inference.py
Clement
add dataset and utils
4b6c5c7
"""
Inference helpers for the ArcDetectorCNN door orientation model.
Supports single-image and batch inference on already-cropped images.
Classes (alphabetical ImageFolder order):
0 = double
1 = hinge_left
2 = hinge_right
Usage — single image:
from inference import load_model, predict
model = load_model("runs/blocks3_drop0.2/best_model.pt")
label, confidence = predict(img_bgr, model)
print(label, confidence) # e.g. "hinge_left", 0.94
Usage — batch:
labels, confidences = predict_batch([img1, img2, img3], model)
Usage — CLI:
python inference.py --model runs/blocks3_drop0.2/best_model.pt image1.png image2.png
"""
from __future__ import annotations
import argparse
import sys
from pathlib import Path
import cv2
from modal import Image
import numpy as np
import torch
_HERE = Path(__file__).parent
for _p in (_HERE, _HERE.parent.parent):
if str(_p) not in sys.path:
sys.path.insert(0, str(_p))
from cnn_dataset_builder import TRANSFORM, preprocess_real_crop # noqa: E402
from cnn_door_orientation_detection import ArcDetectorCNN, CLASS_NAMES # noqa: E402
if torch.cuda.is_available():
_DEVICE = torch.device("cuda")
elif torch.backends.mps.is_available():
_DEVICE = torch.device("mps")
else:
_DEVICE = torch.device("cpu")
# ── model loading ──────────────────────────────────────────────────────────────
def load_model(model_path: str | Path) -> ArcDetectorCNN:
"""
Load a trained ArcDetectorCNN from a .pt file.
Reads config.json from the same directory to restore n_blocks / dropout.
"""
import json
model_path = Path(model_path)
config_path = model_path.parent / "config.json"
config = json.loads(config_path.read_text()) if config_path.exists() else {}
model = ArcDetectorCNN(
n_blocks=config.get("n_blocks", 3),
dropout=config.get("dropout", 0.2),
)
model.load_state_dict(torch.load(model_path, map_location="cpu", weights_only=True))
model.eval()
return model.to(_DEVICE)
# ── preprocessing ──────────────────────────────────────────────────────────────
def _to_tensor(img: np.ndarray) -> torch.Tensor:
"""
Convert a single crop (BGR or grayscale uint8) to a normalised (1, 1, H, W) tensor.
Applies the same CLAHE preprocessing used during training.
"""
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if img.ndim == 3 else img
from PIL import Image
processed = preprocess_real_crop(gray)
return TRANSFORM(Image.fromarray(processed)).unsqueeze(0) # (1, 1, 128, 128)
def _to_batch_tensor(imgs: list[np.ndarray]) -> torch.Tensor:
"""Stack a list of crops into a (N, 1, H, W) tensor."""
return torch.cat([_to_tensor(img) for img in imgs], dim=0)
# ── inference ──────────────────────────────────────────────────────────────────
def predict(
img: np.ndarray,
model: ArcDetectorCNN,
confidence_threshold: float = 0.0,
) -> tuple[str, float]:
"""
Predict the door orientation class for a single crop.
Args:
img: H×W×3 BGR or H×W grayscale uint8 crop.
model: Loaded ArcDetectorCNN (use load_model()).
confidence_threshold: If the softmax confidence is below this, return "unknown".
Returns:
(class_name, confidence) where class_name is one of
"double" / "hinge_left" / "hinge_right" / "unknown".
"""
labels, confidences = predict_batch([img], model, confidence_threshold)
return labels[0], confidences[0]
def predict_batch(
imgs: list[np.ndarray],
model: ArcDetectorCNN,
confidence_threshold: float = 0.5,
) -> tuple[list[str], list[float]]:
"""
Predict door orientation classes for a batch of crops.
Args:
imgs: List of H×W×3 BGR or H×W grayscale uint8 crops.
model: Loaded ArcDetectorCNN (use load_model()).
confidence_threshold: Images whose top softmax score is below this get "unknown".
Returns:
(labels, confidences) — parallel lists, one entry per input image.
"""
model.eval()
batch = _to_batch_tensor(imgs).to(_DEVICE) # (N, 1, 128, 128)
with torch.no_grad():
probs = batch.float()
probs = model(probs).softmax(dim=1) # (N, 3)
confidences_t, indices_t = probs.max(dim=1) # (N,), (N,)
labels: list[str] = []
confidences: list[float] = []
for conf, idx in zip(confidences_t.cpu().tolist(), indices_t.cpu().tolist()):
if conf < confidence_threshold:
labels.append("unknown")
else:
labels.append(CLASS_NAMES[idx])
confidences.append(round(conf, 4))
return labels, confidences
# ── CLI ────────────────────────────────────────────────────────────────────────
_DEFAULT_RUNS = _HERE / "runs"
def _auto_detect_model(runs_dir: Path) -> Path:
import json
configs = list(runs_dir.glob("*/config.json"))
if not configs:
raise FileNotFoundError(f"No trained runs found in {runs_dir}. Run --train first.")
best = max(configs, key=lambda p: json.loads(p.read_text()).get("best_val_acc", 0))
return best.parent / "best_model.pt"
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="ArcDetectorCNN inference")
parser.add_argument("images", nargs="+", type=Path, help="Image file(s) to classify")
parser.add_argument(
"--model",
type=Path,
default=None,
help="Path to best_model.pt (auto-detected from --runs-dir if omitted)",
)
parser.add_argument(
"--runs-dir",
type=Path,
default=_DEFAULT_RUNS,
help=f"Runs directory for auto-detection (default: {_DEFAULT_RUNS})",
)
parser.add_argument(
"--threshold",
type=float,
default=0.0,
help="Confidence threshold below which output is 'unknown' (default: 0.0)",
)
args = parser.parse_args()
model_path = args.model or _auto_detect_model(args.runs_dir)
print(f"Model: {model_path}")
model = load_model(model_path)
imgs = []
paths = []
for p in args.images:
img = cv2.imread(str(p))
if img is None:
print(f" Warning: could not read {p} — skipping")
continue
imgs.append(img)
paths.append(p)
if not imgs:
print("No valid images to process.")
sys.exit(1)
labels, confidences = predict_batch(imgs, model, confidence_threshold=args.threshold)
print(f"\n{'Image':<40} {'Label':<12} Confidence")
print("-" * 62)
for p, label, conf in zip(paths, labels, confidences):
print(f"{str(p):<40} {label:<12} {conf:.4f}")