| """ |
| CNN door orientation detector (3-class or 4-class). |
| |
| Architecture: ArcDetectorCNN — lightweight 3-block mini-VGG with GAP, |
| trained on a mixed synthetic + real crop dataset. |
| |
| Classes (ImageFolder alphabetical order): |
| 0 = double (two arcs, sill at bottom) |
| 1 = hinge_left (arc from bottom-left corner) |
| 2 = hinge_right (arc from bottom-right corner) |
| 3 = no_arc (no door arc — plain room interior / background) [optional 4th class] |
| |
| The number of classes is stored in config.json so load_model() restores it |
| automatically. Old 3-class checkpoints (config without "num_classes") still load. |
| |
| Usage: |
| # Generate dataset first (place no_arc/ crops in raw_crops/ for 4-class training): |
| python cnn_dataset_builder.py --n-per-class 2000 |
| |
| # Train (mini sweep over dropout x depth): |
| python cnn_door_orientation_detection.py --train |
| |
| # Evaluate on test split: |
| python cnn_door_orientation_detection.py --eval |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import sys |
|
|
| from pathlib import Path |
|
|
| import cv2 |
| import numpy as np |
| import torch |
|
|
| from loguru import logger |
| from PIL import Image |
| from sklearn.metrics import classification_report |
| from sklearn.metrics import confusion_matrix |
| from torch import nn |
| from torch.optim.lr_scheduler import CosineAnnealingLR |
| from tqdm import tqdm |
|
|
| from cnn_dataset_builder import TRANSFORM |
| from cnn_dataset_builder import preprocess_real_crop |
|
|
| _HERE = Path(__file__).parent |
|
|
| from cnn_dataset_builder import get_dataloaders |
|
|
| if torch.cuda.is_available(): |
| DEVICE = torch.device("cuda") |
| elif torch.backends.mps.is_available(): |
| DEVICE = torch.device("mps") |
| else: |
| DEVICE = torch.device("cpu") |
|
|
| _DEFAULT_WORKERS = 4 if DEVICE.type == "cuda" else 0 |
|
|
| |
| |
| CLASS_NAMES = ["double", "hinge_left", "hinge_right", "no_arc"] |
| NO_ARC_IDX = 3 |
|
|
|
|
| |
|
|
|
|
| def _conv_block(in_ch: int, out_ch: int) -> nn.Sequential: |
| return nn.Sequential( |
| nn.Conv2d(in_ch, out_ch, kernel_size=3, padding=1, bias=False), |
| nn.BatchNorm2d(out_ch), |
| nn.ReLU(inplace=True), |
| nn.MaxPool2d(2), |
| ) |
|
|
|
|
| class ArcDetectorCNN(nn.Module): |
| """ |
| Mini-VGG for door orientation on 128x128 single-channel crops. |
| |
| Spatial sizes (128px input): |
| 3 blocks: 128→64→32→16 → GAP → 64-d feature |
| 4 blocks: 128→64→32→16→8 → GAP → 128-d feature |
| |
| Args: |
| n_blocks: 3 or 4 convolutional blocks. |
| dropout: dropout probability before the final linear layer. |
| num_classes: 3 (double/hinge_left/hinge_right) or 4 (+ no_arc). |
| Default is 3 for backward compatibility with existing checkpoints. |
| """ |
|
|
| def __init__(self, n_blocks: int = 3, dropout: float = 0.2, num_classes: int = 3) -> None: |
| super().__init__() |
| if n_blocks not in (3, 4): |
| raise ValueError("n_blocks must be 3 or 4") |
| channels = [1, 16, 32, 64, 128] |
| self.features = nn.Sequential( |
| *[_conv_block(channels[i], channels[i + 1]) for i in range(n_blocks)] |
| ) |
| self.gap = nn.AdaptiveAvgPool2d(1) |
| self.head = nn.Sequential( |
| nn.Flatten(), |
| nn.Dropout(p=dropout), |
| nn.Linear(channels[n_blocks], num_classes), |
| ) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| return self.head(self.gap(self.features(x))) |
|
|
|
|
| |
|
|
|
|
| def _run_epoch( |
| model: nn.Module, |
| loader: torch.utils.data.DataLoader, |
| criterion: nn.Module, |
| optimizer: torch.optim.Optimizer | None, |
| ) -> tuple[float, float]: |
| """One training or validation epoch. Returns (mean_loss, accuracy).""" |
| training = optimizer is not None |
| model.train() if training else model.eval() |
|
|
| total_loss = 0.0 |
| correct = 0 |
| n = 0 |
|
|
| ctx = torch.enable_grad() if training else torch.no_grad() |
| with ctx: |
| for imgs_no_device, labels in tqdm( |
| loader, desc="train" if training else "val ", leave=False |
| ): |
| imgs, targets = imgs_no_device.to(DEVICE), labels.to(DEVICE) |
| logits = model(imgs) |
| loss = criterion(logits, targets) |
|
|
| if training: |
| optimizer.zero_grad() |
| loss.backward() |
| optimizer.step() |
|
|
| total_loss += loss.item() * len(imgs) |
| correct += (logits.argmax(dim=1) == targets).sum().item() |
| n += len(imgs) |
|
|
| return total_loss / n, correct / n |
|
|
|
|
| def train( |
| dataset_dir: str | Path, |
| output_dir: str | Path, |
| *, |
| n_blocks: int = 3, |
| dropout: float = 0.2, |
| epochs: int = 60, |
| lr: float = 1e-3, |
| weight_decay: float = 1e-4, |
| batch_size: int = 64, |
| patience: int = 10, |
| num_workers: int = _DEFAULT_WORKERS, |
| num_classes: int = 4, |
| ) -> dict: |
| """ |
| Train one ArcDetectorCNN configuration. |
| |
| Saves best_model.pt + config.json to output_dir/. |
| Returns dict with training history and best validation accuracy. |
| |
| Args: |
| num_classes: 3 for the legacy 3-class model, 4 to include the no_arc class. |
| """ |
| output_dir = Path(output_dir) |
| output_dir.mkdir(parents=True, exist_ok=True) |
|
|
| train_loader, val_loader = get_dataloaders( |
| dataset_dir, batch_size=batch_size, num_workers=num_workers |
| ) |
|
|
| model = ArcDetectorCNN(n_blocks=n_blocks, dropout=dropout, num_classes=num_classes).to(DEVICE) |
| criterion = nn.CrossEntropyLoss() |
| optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay) |
| scheduler = CosineAnnealingLR(optimizer, T_max=epochs) |
|
|
| best_val_acc = 0.0 |
| epochs_no_improve = 0 |
| history: list[dict] = [] |
|
|
| epoch_bar = tqdm(range(1, epochs + 1), desc=f"blocks={n_blocks} drop={dropout}", unit="ep") |
| for epoch in epoch_bar: |
| tr_loss, tr_acc = _run_epoch(model, train_loader, criterion, optimizer) |
| val_loss, val_acc = _run_epoch(model, val_loader, criterion, None) |
| scheduler.step() |
|
|
| history.append( |
| { |
| "epoch": epoch, |
| "tr_loss": tr_loss, |
| "tr_acc": tr_acc, |
| "val_loss": val_loss, |
| "val_acc": val_acc, |
| } |
| ) |
| epoch_bar.set_postfix( |
| tr_loss=f"{tr_loss:.3f}", |
| tr_acc=f"{tr_acc:.3f}", |
| val_loss=f"{val_loss:.3f}", |
| val_acc=f"{val_acc:.3f}", |
| best=f"{best_val_acc:.3f}", |
| ) |
|
|
| if val_acc > best_val_acc: |
| best_val_acc = val_acc |
| epochs_no_improve = 0 |
| torch.save(model.state_dict(), output_dir / "best_model.pt") |
| else: |
| epochs_no_improve += 1 |
| if epochs_no_improve >= patience: |
| tqdm.write(f"Early stop at epoch {epoch} (no improvement for {patience} epochs)") |
| break |
|
|
| config = { |
| "n_blocks": n_blocks, |
| "dropout": dropout, |
| "best_val_acc": best_val_acc, |
| "num_classes": num_classes, |
| } |
| (output_dir / "config.json").write_text(json.dumps(config, indent=2)) |
| logger.info(f"Best val accuracy: {best_val_acc:.4f} -> {output_dir}/best_model.pt") |
| return {"history": history, **config} |
|
|
|
|
| def sweep(dataset_dir: str | Path, output_root: str | Path, num_classes: int = 4) -> None: |
| """Mini sweep: n_blocks ∈ {3, 4} x dropout ∈ {0.2, 0.3}.""" |
| output_root = Path(output_root) |
| results = [] |
| for n_blocks in (3, 4): |
| for dropout in (0.2, 0.3): |
| run_name = f"blocks{n_blocks}_drop{dropout}" |
| logger.info(f"\n{'=' * 60}\nRun: {run_name}\n{'=' * 60}") |
| result = train( |
| dataset_dir, |
| output_root / run_name, |
| n_blocks=n_blocks, |
| dropout=dropout, |
| num_classes=num_classes, |
| ) |
| results.append({"run": run_name, "val_acc": result["best_val_acc"]}) |
|
|
| results.sort(key=lambda r: r["val_acc"], reverse=True) |
| logger.info("\n── Sweep results (sorted by val accuracy) ──") |
| for r in results: |
| logger.info(f" {r['run']:30s} val_acc={r['val_acc']:.4f}") |
| logger.info(f"\nBest run: {results[0]['run']}") |
|
|
|
|
| |
|
|
|
|
| def load_model(model_path: str | Path, config_path: str | Path | None = None) -> ArcDetectorCNN: |
| """Load a trained ArcDetectorCNN from a .pt file. |
| |
| Reads config.json from the same directory to restore n_blocks, dropout, and |
| num_classes. Old checkpoints without "num_classes" in config are loaded as |
| 3-class models (backward compatible). |
| """ |
| model_path = Path(model_path) |
| if config_path is None: |
| config_path = model_path.parent / "config.json" |
| config = json.loads(Path(config_path).read_text()) if Path(config_path).exists() else {} |
| model = ArcDetectorCNN( |
| n_blocks=config.get("n_blocks", 3), |
| dropout=config.get("dropout", 0.2), |
| num_classes=config.get("num_classes", 3), |
| ) |
| model.load_state_dict(torch.load(model_path, map_location="cpu", weights_only=True)) |
| model.eval() |
| return model |
|
|
|
|
| def evaluate(model_path: str | Path, dataset_dir: str | Path) -> None: |
| """ |
| Evaluate a trained model on the held-out test split. |
| Prints accuracy, per-class precision/recall/F1, and confusion matrix. |
| """ |
| model_path = Path(model_path) |
| config_path = model_path.parent / "config.json" |
| config = json.loads(config_path.read_text()) if config_path.exists() else {} |
| num_classes = config.get("num_classes", 3) |
|
|
| model = load_model(model_path).to(DEVICE) |
| _, test_loader = get_dataloaders(dataset_dir, batch_size=64, num_workers=_DEFAULT_WORKERS) |
|
|
| all_preds: list[int] = [] |
| all_labels: list[int] = [] |
| with torch.no_grad(): |
| for imgs, labels in test_loader: |
| preds = model(imgs.to(DEVICE)).argmax(dim=1).cpu().tolist() |
| all_preds.extend(preds) |
| all_labels.extend(labels.tolist()) |
|
|
| logger.info("\n── Test set results ────────────────────────────────────────────────") |
| logger.info( |
| classification_report(all_labels, all_preds, target_names=CLASS_NAMES[:num_classes]) |
| ) |
| logger.info("Confusion matrix (rows=true, cols=pred):") |
| logger.info(confusion_matrix(all_labels, all_preds)) |
|
|
|
|
| |
|
|
|
|
| def predict_orientation( |
| side1: np.ndarray, |
| side2: np.ndarray, |
| model: ArcDetectorCNN, |
| confidence_threshold: float = 0.5, |
| ) -> tuple[str, str]: |
| """ |
| Predict the door orientation from a pair of side crops. |
| |
| Args: |
| side1: HxWx3 or HxW uint8 crop on one side of the sill. |
| side2: Crop on the other side. |
| model: Trained ArcDetectorCNN (use load_model() to get one). |
| confidence_threshold: If the best softmax score is below this, return "unknown". |
| |
| Returns: |
| (arc_side, orientation_class) where arc_side is "side1", "side2", or "unknown" |
| and orientation_class is one of CLASS_NAMES or "unknown". |
| """ |
|
|
| def _to_tensor(img: np.ndarray) -> torch.Tensor: |
| |
| gray = img if img.ndim == 2 else cv2.cvtColor(img, cv2.COLOR_RGB2GRAY) |
| processed = preprocess_real_crop(gray) |
| return TRANSFORM(Image.fromarray(processed)).unsqueeze(0) |
|
|
| model.eval() |
| with torch.no_grad(): |
| logits1 = model(_to_tensor(side1).to(DEVICE)) |
| logits2 = model(_to_tensor(side2).to(DEVICE)) |
|
|
| probs1 = logits1.softmax(dim=1)[0] |
| probs2 = logits2.softmax(dim=1)[0] |
|
|
| num_classes = probs1.shape[0] |
|
|
| if num_classes == 4: |
| |
| arc1 = 1.0 - probs1[NO_ARC_IDX].item() |
| arc2 = 1.0 - probs2[NO_ARC_IDX].item() |
| if max(arc1, arc2) < confidence_threshold: |
| return "unknown", "unknown" |
| if arc1 >= arc2: |
| return "side1", CLASS_NAMES[probs1[:3].argmax().item()] |
| return "side2", CLASS_NAMES[probs2[:3].argmax().item()] |
|
|
| |
| best_prob1, best_class1 = probs1.max(dim=0) |
| best_prob2, best_class2 = probs2.max(dim=0) |
| if max(best_prob1.item(), best_prob2.item()) < confidence_threshold: |
| return "unknown", "unknown" |
| if best_prob1.item() >= best_prob2.item(): |
| return "side1", CLASS_NAMES[best_class1.item()] |
| return "side2", CLASS_NAMES[best_class2.item()] |
|
|
|
|
| |
|
|
| _DATASET_DIR = _HERE / "mixed_dataset" |
| _RUNS_DIR = _HERE / "runs" |
|
|
| if __name__ == "__main__": |
| parser = argparse.ArgumentParser(description="ArcDetectorCNN 3-class — train or evaluate") |
| parser.add_argument("--train", action="store_true", help="Run mini hyperparameter sweep") |
| parser.add_argument("--eval", action="store_true", help="Evaluate best model on test split") |
| parser.add_argument( |
| "--dataset", |
| type=Path, |
| default=_DATASET_DIR, |
| help=f"Dataset directory (default: {_DATASET_DIR})", |
| ) |
| parser.add_argument( |
| "--runs-dir", |
| type=Path, |
| default=_RUNS_DIR, |
| help=f"Runs directory (default: {_RUNS_DIR})", |
| ) |
| parser.add_argument( |
| "--model", |
| type=Path, |
| default=None, |
| help="Path to .pt weights for --eval (auto-detected from --runs-dir if omitted)", |
| ) |
| args = parser.parse_args() |
|
|
| if args.train: |
| sweep(args.dataset, args.runs_dir) |
|
|
| if args.eval: |
| model_path = args.model |
| if model_path is None: |
| run_configs = list(args.runs_dir.glob("*/config.json")) |
| if not run_configs: |
| logger.info(f"No trained runs found in {args.runs_dir}. Run --train first.") |
| sys.exit(1) |
| best = max( |
| run_configs, |
| key=lambda p: json.loads(p.read_text()).get("best_val_acc", 0), |
| ) |
| model_path = best.parent / "best_model.pt" |
| logger.info(f"Auto-selected model: {model_path}") |
| evaluate(model_path, args.dataset) |
|
|
| if not args.train and not args.eval: |
| parser.print_help() |
|
|