door_orientation / cnn_dataset_builder.py
Clement
add dataset and utils
4b6c5c7
"""
Dataset builder for door orientation detection.
All images — real and "synthetic" — are derived from the hand-crafted real crops.
"Synthetic" images are generated by applying heavy augmentations to the real crops
so that the entire dataset shares the same visual style (grayscale floorplan look).
Four classes (sill always at the bottom of the crop):
double — two arcs, one from each bottom corner
hinge_left — single arc from the bottom-left corner
hinge_right — single arc from the bottom-right corner
no_arc — no door arc visible (plain room interior / background)
Output layout (PyTorch ImageFolder + explicit train/test split):
<output_dir>/
train/ double/ hinge_left/ hinge_right/ no_arc/
test/ double/ hinge_left/ hinge_right/ no_arc/
Note: no_arc/ is optional — if raw_crops/no_arc/ is absent a warning is logged
and the builder skips it, keeping backward compatibility with 3-class datasets.
Usage:
python cnn_dataset_builder.py --real-crops real_crops_dataset/ \\
--output-dir mixed_dataset/ \\
--n-per-class 2000
"""
# ruff: noqa: N806
from __future__ import annotations
import argparse
import re
from collections import defaultdict
from collections.abc import Callable
from pathlib import Path
import cv2
import numpy as np
import torch
from loguru import logger
from PIL import Image
from torch.utils.data import DataLoader
from torchvision import datasets
from torchvision import transforms
from tqdm import tqdm
# ── constants ──────────────────────────────────────────────────────────────────
IMG_SIZE = 128 # px, square
TRAIN_SPLIT = 0.85 # fraction of samples used for training
LABELS = ("double", "hinge_left", "hinge_right", "no_arc")
# Horizontal flip label mapping. no_arc stays no_arc — a plain room interior
# looks the same mirrored. Aim for no_arc sample count ≈ each door class count.
_FLIP_MAP = {
"hinge_left": "hinge_right",
"hinge_right": "hinge_left",
"double": "double",
"no_arc": "no_arc",
}
# ── helpers ────────────────────────────────────────────────────────────────────
def _plan_id_from_path(p: Path) -> str:
"""Return the plan ID prefix (e.g. 'plan-32') from a crop filename."""
m = re.match(r"^(plan-\d+)", p.stem)
return m.group(1) if m else p.stem
def _split_paths_by_plan(
paths: list[Path],
rng: np.random.Generator,
test_fraction: float,
) -> tuple[list[Path], list[Path]]:
"""
Split paths into (train_paths, test_paths) ensuring all crops from a given
floorplan land in the same split (no data leakage across the sill boundary).
"""
by_plan: dict[str, list[Path]] = defaultdict(list)
for p in paths:
by_plan[_plan_id_from_path(p)].append(p)
plan_ids = sorted(by_plan.keys())
rng.shuffle(plan_ids) # type: ignore[arg-type]
n_test_plans = max(1, int(len(plan_ids) * test_fraction))
test_plans = set(plan_ids[:n_test_plans])
train_plans = set(plan_ids[n_test_plans:])
train_paths = [p for pid in sorted(train_plans) for p in by_plan[pid]]
test_paths = [p for pid in sorted(test_plans) for p in by_plan[pid]]
return train_paths, test_paths
def _to_grayscale(img_bgr: np.ndarray, size: int) -> np.ndarray:
gray = cv2.cvtColor(img_bgr, cv2.COLOR_BGR2GRAY) if img_bgr.ndim == 3 else img_bgr
return cv2.resize(gray, (size, size), interpolation=cv2.INTER_LINEAR)
def _add_clutter(img: np.ndarray, rng: np.random.Generator, size: int) -> None:
"""
Overlay sparse dark hollow rectangles simulating furniture / room labels.
Rectangles are kept in the upper 65% of the image so they don't cover the sill.
"""
s = size - 1
t = int(rng.choice([1, 1, 2]))
for _ in range(int(rng.integers(1, 4))):
w = int(rng.uniform(0.05, 0.20) * size)
h = int(rng.uniform(0.05, 0.15) * size)
x0 = int(rng.uniform(0.02, 0.85) * size)
y0 = int(rng.uniform(0.02, 0.60) * size) # stay away from sill area
cv2.rectangle(img, (x0, y0), (min(x0 + w, s), min(y0 + h, s)), 0, t)
# ── real-crop preprocessing & augmentation ─────────────────────────────────────
def preprocess_real_crop(gray: np.ndarray, size: int = IMG_SIZE) -> np.ndarray:
"""
CLAHE + resize — boosts local contrast so the arc geometry is more visible
on real crops that have low-contrast lines on a light background.
"""
enhanced = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8)).apply(gray)
return cv2.resize(enhanced, (size, size), interpolation=cv2.INTER_LINEAR)
def augment_real_crop(
img_bgr: np.ndarray, rng: np.random.Generator, label: str, size: int = IMG_SIZE
) -> list[tuple[np.ndarray, str, str]]:
"""
Return (grayscale uint8, label, tag) triples for one real crop (x4):
- Original (CLAHE enhanced) → tag "orig"
- Rotation ±10° → tag "rot{angle}"
- Scale jitter (0.85-1.0, centre crop) → tag "sc{pct}"
- Horizontal flip with label swap → tag "hf"
"""
base = preprocess_real_crop(_to_grayscale(img_bgr, size), size)
results: list[tuple[np.ndarray, str, str]] = [(base.copy(), label, "orig")]
angle = float(rng.uniform(-10.0, 10.0))
M = cv2.getRotationMatrix2D((size / 2.0, size / 2.0), angle, 1.0)
results.append(
(
cv2.warpAffine(
base, M, (size, size), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE
),
label,
f"rot{abs(int(angle))}",
)
)
scale = float(rng.uniform(0.85, 1.0))
crop_sz = max(int(size * scale), 32)
off = (size - crop_sz) // 2
results.append(
(
cv2.resize(
base[off : off + crop_sz, off : off + crop_sz],
(size, size),
interpolation=cv2.INTER_LINEAR,
),
label,
f"sc{int(scale * 100)}",
)
)
results.append((cv2.flip(base, 1), _FLIP_MAP[label], "hf"))
return results
def synthesize_from_real_crop(
img_bgr: np.ndarray, rng: np.random.Generator, label: str, size: int = IMG_SIZE
) -> tuple[np.ndarray, str, str]:
"""
Generate one heavily-augmented image from a real crop for the synthetic batch.
Augmentations (all randomised):
1. Horizontal flip (50%) with label swap (hinge_left ↔ hinge_right)
2. Rotation ±10° (BORDER_REPLICATE — no black fill)
3. Scale jitter 0.70-1.0 (centre crop then resize)
4. Sparse dark clutter rectangles (50%)
5. Gaussian blur ∈ [0, 0.6]
6. Morphological erode (40%) or dilate (20%) — thins or thickens arc lines
Returns (grayscale uint8, label, tag) where tag encodes the applied transforms,
e.g. "hf_r7_s85_cl_er".
"""
base = preprocess_real_crop(_to_grayscale(img_bgr, size), size)
parts: list[str] = []
# 1. hflip
if rng.random() < 0.5:
base = cv2.flip(base, 1)
label = _FLIP_MAP[label]
parts.append("hf")
# 2. rotation ±10°
angle = float(rng.uniform(-10.0, 10.0))
M = cv2.getRotationMatrix2D((size / 2.0, size / 2.0), angle, 1.0)
base = cv2.warpAffine(
base, M, (size, size), flags=cv2.INTER_LINEAR, borderMode=cv2.BORDER_REPLICATE
)
parts.append(f"r{abs(int(angle))}")
# 3. scale jitter (centre crop 70-100%)
scale = float(rng.uniform(0.70, 1.0))
crop_sz = max(int(size * scale), 32)
off = (size - crop_sz) // 2
base = cv2.resize(
base[off : off + crop_sz, off : off + crop_sz], (size, size), interpolation=cv2.INTER_LINEAR
)
parts.append(f"s{int(scale * 100)}")
# 4. sparse clutter
if rng.random() < 0.5:
_add_clutter(base, rng, size)
parts.append("cl")
# 5. blur
sigma = float(rng.uniform(0.0, 0.6))
if sigma > 0.3:
base = cv2.GaussianBlur(base, (0, 0), sigma)
parts.append("bl")
# 6. morph — bias toward erode (thinner lines) to match real crop arc widths
morph = rng.choice(["none", "erode", "dilate"], p=[0.40, 0.40, 0.20])
if morph != "none":
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
op = cv2.MORPH_ERODE if morph == "erode" else cv2.MORPH_DILATE
base = cv2.morphologyEx(base, op, kernel, iterations=1)
parts.append(morph[:2]) # "er" or "di"
return base, label, "_".join(parts)
# ── validation ─────────────────────────────────────────────────────────────────
def _plan_ids_in_split(split_dir: Path) -> set[str]:
"""Return plan IDs of real (non-synthetic) crops found under split_dir/{label}/."""
ids: set[str] = set()
for label in LABELS:
for p in (split_dir / label).glob("*.png"):
if p.name.startswith("synth_"):
continue
pid = _plan_id_from_path(p)
if pid.startswith("plan-"):
ids.add(pid)
return ids
def _source_plan_ids(real_crops_dir: Path) -> set[str]:
"""Return all plan IDs present in the labeled source crops directory."""
ids: set[str] = set()
for label in LABELS:
class_dir = real_crops_dir / label
if not class_dir.exists():
continue
for p in class_dir.glob("*.png"):
pid = _plan_id_from_path(p)
if pid.startswith("plan-"):
ids.add(pid)
return ids
def check_no_plan_leakage(output_dir: Path, real_crops_dir: Path) -> None:
"""
Verify that:
- No floorplan ID appears in both train and test splits.
- All floorplan IDs from the source crops are represented in the dataset.
Synthetic images (prefixed with "synth_") are skipped — they are generated from
train crops only and intentionally appear in both splits.
Raises AssertionError if any check fails.
"""
train_plans = _plan_ids_in_split(output_dir / "train")
test_plans = _plan_ids_in_split(output_dir / "test")
overlap = train_plans & test_plans
assert not overlap, f"Data leakage: plans in both splits: {sorted(overlap)}"
missing = _source_plan_ids(real_crops_dir) - (train_plans | test_plans)
assert not missing, f"Plans not represented in dataset: {sorted(missing)}"
logger.info(
f"Leakage check passed: {len(train_plans)} train plans, "
f"{len(test_plans)} test plans, 0 overlap."
)
# ── dataset generation ─────────────────────────────────────────────────────────
_SaveFn = Callable[[np.ndarray, str, str, str, str], None]
def _process_real_label(
label: str,
real_crops_dir: Path,
rng: np.random.Generator,
real_test_fraction: float,
size: int,
save_real: _SaveFn,
) -> list[Path]:
"""Split one label's crops by floorplan and write real train/test images. Returns train paths."""
class_dir = real_crops_dir / label
if not class_dir.exists():
logger.info(f" Warning: {class_dir} not found — skipping real crops for {label}")
return []
paths = sorted(class_dir.glob("*.png"))
train_paths, test_paths = _split_paths_by_plan(paths, rng, real_test_fraction)
for p in tqdm(test_paths, desc=f"real test/{label}"):
img_bgr = cv2.imread(str(p))
if img_bgr is None:
logger.info(f" Warning: could not read {p} — skipping")
continue
save_real(preprocess_real_crop(_to_grayscale(img_bgr, size), size), "test", label, p.stem, "orig")
for p in tqdm(train_paths, desc=f"real train/{label}"):
img_bgr = cv2.imread(str(p))
if img_bgr is None:
logger.info(f" Warning: could not read {p} — skipping")
continue
for aug_img, aug_label, tag in augment_real_crop(img_bgr, rng, label, size):
save_real(aug_img, "train", aug_label, p.stem, tag)
return train_paths
def _process_synth_label(
label: str,
crops: list[Path],
rng: np.random.Generator,
n_train: int,
n_test: int,
size: int,
save_synth: _SaveFn,
) -> None:
"""Generate synthetic images for one label from the provided train crop paths."""
if not crops:
logger.info(f" Warning: no real train crops for {label} — skipping synthetic generation")
return
for split, n in [("train", n_train), ("test", n_test)]:
for _ in tqdm(range(n), desc=f"synth {split}/{label}"):
p = crops[int(rng.integers(0, len(crops)))]
img_bgr = cv2.imread(str(p))
if img_bgr is None:
continue
aug_img, aug_label, tag = synthesize_from_real_crop(img_bgr, rng, label, size)
save_synth(aug_img, split, aug_label, p.stem, tag)
def generate_dataset(
n_synthetic_per_class: int,
output_dir: str | Path,
real_crops_dir: str | Path,
seed: int = 42,
size: int = IMG_SIZE,
train_split: float = TRAIN_SPLIT,
real_test_fraction: float = 0.20,
) -> None:
"""
Build a mixed dataset.
Real crops are split at the floorplan level (no individual-file shuffling)
to prevent data leakage between train and test:
- ~20% of floorplans held out as-is (CLAHE only) → test split
- ~80% of floorplans augmented x4 (augment_real_crop) → train split
Synthetic (augmented from real train crops via synthesize_from_real_crop):
- n_synthetic_per_class x train_split → train split
- n_synthetic_per_class x (1 - train_split) → test split
Args:
n_synthetic_per_class: number of synthetic images per class.
output_dir: root output directory.
real_crops_dir: directory with subdirs double/, hinge_left/, hinge_right/.
real_test_fraction: fraction of floorplans held out for the test split.
"""
output_dir = Path(output_dir)
real_crops_dir = Path(real_crops_dir)
for split in ("train", "test"):
for label in LABELS:
(output_dir / split / label).mkdir(parents=True, exist_ok=True)
rng = np.random.default_rng(seed)
# Per-label counter used as collision-breaker suffix for synthetic filenames
synth_counters: dict[str, int] = dict.fromkeys(LABELS, 0)
def _save_real(img: np.ndarray, split: str, label: str, stem: str, tag: str) -> None:
Image.fromarray(img).save(output_dir / split / label / f"{stem}_{tag}.png")
def _save_synth(img: np.ndarray, split: str, label: str, stem: str, tag: str) -> None:
idx = synth_counters[label]
synth_counters[label] += 1
Image.fromarray(img).save(output_dir / split / label / f"synth_{stem}_{tag}_{idx:04d}.png")
# ── real crops: split by floorplan, process ────────────────────────────────
train_real_paths: dict[str, list[Path]] = {
label: _process_real_label(label, real_crops_dir, rng, real_test_fraction, size, _save_real)
for label in LABELS
}
# ── synthetic crops (augmented from real train crops) ─────────────────────
n_train = int(n_synthetic_per_class * train_split)
n_test_synth = n_synthetic_per_class - n_train
for label in LABELS:
_process_synth_label(
label, train_real_paths[label], rng, n_train, n_test_synth, size, _save_synth
)
check_no_plan_leakage(output_dir, real_crops_dir)
logger.info(f"\nDataset written to {output_dir}")
for split in ("train", "test"):
for label in LABELS:
n = len(list((output_dir / split / label).glob("*.png")))
logger.info(f" {split}/{label}: {n}")
# ── dataloader ─────────────────────────────────────────────────────────────────
TRANSFORM = transforms.Compose(
[
transforms.Grayscale(num_output_channels=1),
transforms.Resize((IMG_SIZE, IMG_SIZE)),
transforms.ToTensor(),
transforms.Normalize(mean=[0.5], std=[0.5]),
]
)
def get_dataloaders(
dataset_dir: str | Path,
batch_size: int = 64,
num_workers: int = 4,
) -> tuple[DataLoader, DataLoader]:
"""
Return (train_loader, test_loader).
Classes sorted alphabetically: 0=double, 1=hinge_left, 2=hinge_right, 3=no_arc.
"""
dataset_dir = Path(dataset_dir)
train_ds = datasets.ImageFolder(dataset_dir / "train", transform=TRANSFORM)
test_ds = datasets.ImageFolder(dataset_dir / "test", transform=TRANSFORM)
kw = {
"batch_size": batch_size,
"num_workers": num_workers,
"pin_memory": torch.cuda.is_available(),
}
return (
DataLoader(train_ds, shuffle=True, **kw),
DataLoader(test_ds, shuffle=False, **kw),
)
# ── entry point ────────────────────────────────────────────────────────────────
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Door orientation dataset builder")
parser.add_argument(
"--real-crops",
type=Path,
default=Path(__file__).parent / "labeled_crops",
help="Directory containing double/, hinge_left/, hinge_right/, no_door/ subfolders",
)
parser.add_argument(
"--output-dir",
type=Path,
default=Path(__file__).parent / "mixed_dataset",
help="Output directory (default: mixed_dataset/)",
)
parser.add_argument(
"--n-per-class",
type=int,
default=2_000,
help="Number of synthetic images per class (default: 2000)",
)
args = parser.parse_args()
generate_dataset(
n_synthetic_per_class=args.n_per_class,
output_dir=args.output_dir,
real_crops_dir=args.real_crops,
)