from __future__ import annotations import argparse import colorsys import heapq import json import math import random from collections import deque from pathlib import Path from typing import Dict, List, Sequence, Tuple import cv2 import numpy as np from PIL import Image from scipy.ndimage import zoom from tqdm import tqdm Cell = Tuple[int, int] Point = Tuple[int, int] Grid = List[List[int]] # ── Palette + colour helpers ─────────────────────────────────────────── PALETTE = [ (236, 152, 99), # peach (111, 163, 220), # clear blue (140, 197, 113), # grass green (223, 127, 186), # pink (194, 160, 85), # ochre (157, 134, 212), # purple (237, 211, 81), # yellow (75, 184, 173), # teal (224, 122, 112), # coral red (122, 160, 106), # sage ] PALETTE_HUE = [ colorsys.rgb_to_hsv(r / 255, g / 255, b / 255)[0] * 360 for r, g, b in PALETTE ] def hue_gap(h1: float, h2: float) -> float: d = abs(h1 - h2) % 360 return min(d, 360 - d) def pair_hue_gap(pair_a: Tuple[int, int], pair_b: Tuple[int, int]) -> float: """Maximum, over each colour in pair_a, of the nearest hue distance to any colour in pair_b. Tells us how visually contrastable the two pairs are at their closest point.""" best = 0.0 for ca in pair_a: nearest = min(hue_gap(PALETTE_HUE[ca], PALETTE_HUE[cb]) for cb in pair_b) best = max(best, nearest) return best def jitter_color( rng: random.Random, rgb: Tuple[int, int, int], hue_jitter: float = 18.0, sat_jitter: float = 0.12, val_jitter: float = 0.10, ) -> Tuple[float, float, float]: """Apply small HSV perturbation to a palette anchor so each region's rendered endpoints are unique (defeats colour-histogram attacks).""" r, g, b = [c / 255.0 for c in rgb] h, s, v = colorsys.rgb_to_hsv(r, g, b) h = (h + rng.uniform(-hue_jitter, hue_jitter) / 360.0) % 1.0 s = max(0.0, min(1.0, s + rng.uniform(-sat_jitter, sat_jitter))) v = max(0.0, min(1.0, v + rng.uniform(-val_jitter, val_jitter))) rr, gg, bb = colorsys.hsv_to_rgb(h, s, v) return (rr * 255.0, gg * 255.0, bb * 255.0) # ── Partition (connected-component growing on a small grid) ──────────── def sample_spaced_cells( rng: random.Random, rows: int, cols: int, count: int, min_dist: float, max_attempts: int = 800, ) -> List[Cell]: cells: List[Cell] = [] for _ in range(max_attempts): if len(cells) == count: break candidate = (rng.randrange(rows), rng.randrange(cols)) if all(math.hypot(candidate[0] - r, candidate[1] - c) >= min_dist for r, c in cells): cells.append(candidate) while len(cells) < count: candidate = (rng.randrange(rows), rng.randrange(cols)) if candidate not in cells: cells.append(candidate) return cells def _make_noise_field(rng: random.Random, rows: int, cols: int, scale: int = 15) -> np.ndarray: lo_r = max(2, rows // scale) lo_c = max(2, cols // scale) lo = np.array([[rng.gauss(0, 1) for _ in range(lo_c)] for _ in range(lo_r)]) field = zoom(lo, (rows / lo_r, cols / lo_c), order=1) return field[:rows, :cols] def make_connected_partition( rng: random.Random, rows: int, cols: int, num_regions: int, ) -> Tuple[np.ndarray, List[Cell]]: """Two-phase partition. Phase 1 (sequential round-robin): each region in turn claims one cell from its current frontier, repeated until every region has at least `min_cells_per_region` cells. This guarantees a minimum size for every region and prevents tiny / vanishing regions. Phase 2 (priority-queue Voronoi-like): the remaining unclaimed cells are distributed using the original distance-to-centroid + noise-field cost so the final boundaries look organic. """ labels = np.full((rows, cols), -1, dtype=np.int32) seeds = sample_spaced_cells( rng=rng, rows=rows, cols=cols, count=num_regions, min_dist=max(2.2, min(rows, cols) / max(3.4, math.sqrt(num_regions) + 0.5)), ) dirs = [(-1, 0), (1, 0), (0, -1), (0, 1)] cr = [float(r) for r, c in seeds] cc = [float(c) for r, c in seeds] size = [1] * num_regions # ── Phase 1: sequential round-robin grow ── fair_share = (rows * cols) // num_regions min_cells_per_region = max(8, (fair_share * 7) // 10) # 70% of fair share floor frontiers: List[List[Cell]] = [[] for _ in range(num_regions)] for rid, (r, c) in enumerate(seeds): labels[r, c] = rid for dr, dc in dirs: nr, nc = r + dr, c + dc if 0 <= nr < rows and 0 <= nc < cols and labels[nr, nc] == -1: frontiers[rid].append((nr, nc)) while True: # Stop when every region reached the minimum, OR no region can grow. if all(s >= min_cells_per_region for s in size): break progress = False for rid in range(num_regions): if size[rid] >= min_cells_per_region: continue # Pull a random claimable cell from this region's frontier. f = frontiers[rid] while f: idx = rng.randrange(len(f)) nr, nc = f.pop(idx) if labels[nr, nc] != -1: continue labels[nr, nc] = rid n = size[rid] cr[rid] = (cr[rid] * n + nr) / (n + 1) cc[rid] = (cc[rid] * n + nc) / (n + 1) size[rid] += 1 for dr, dc in dirs: rr, ccc = nr + dr, nc + dc if 0 <= rr < rows and 0 <= ccc < cols and labels[rr, ccc] == -1: f.append((rr, ccc)) progress = True break if not progress: break # all frontiers exhausted (only possible at very high n) # ── Phase 2: priority-queue grow on remaining cells ── fields = [_make_noise_field(rng, rows, cols, scale=15) for _ in range(num_regions)] noise_strength = rows * cols * 0.03 heap: list = [] for rid in range(num_regions): for nr, nc in frontiers[rid]: if labels[nr, nc] == -1: d = (nr - cr[rid]) ** 2 + (nc - cc[rid]) ** 2 d += fields[rid][nr, nc] * noise_strength d += rng.random() * 5.0 heapq.heappush(heap, (d, rid, nr, nc)) while heap: _, rid, nr, nc = heapq.heappop(heap) if labels[nr, nc] != -1: continue labels[nr, nc] = rid n = size[rid] cr[rid] = (cr[rid] * n + nr) / (n + 1) cc[rid] = (cc[rid] * n + nc) / (n + 1) size[rid] += 1 for dr, dc in dirs: rr, ccc = nr + dr, nc + dc if 0 <= rr < rows and 0 <= ccc < cols and labels[rr, ccc] == -1: d = (rr - cr[rid]) ** 2 + (ccc - cc[rid]) ** 2 d += fields[rid][rr, ccc] * noise_strength d += rng.random() * 5.0 heapq.heappush(heap, (d, rid, rr, ccc)) return labels, seeds def upsample_labels( grid_labels: np.ndarray, canvas_w: int, canvas_h: int, smooth_sigma: float = 4.5, ) -> np.ndarray: """Upsample a low-resolution label map to canvas size with smooth boundaries by per-region soft-mask interpolation + argmax.""" num_regions = int(grid_labels.max()) + 1 soft = np.empty((num_regions, canvas_h, canvas_w), dtype=np.float32) for rid in range(num_regions): mask = (grid_labels == rid).astype(np.float32) big = cv2.resize(mask, (canvas_w, canvas_h), interpolation=cv2.INTER_LINEAR) if smooth_sigma > 0: big = cv2.GaussianBlur(big, (0, 0), sigmaX=smooth_sigma, sigmaY=smooth_sigma) soft[rid] = big return np.argmax(soft, axis=0).astype(np.int32) def clean_tiny_components(labels: np.ndarray, min_frac: float = 0.002) -> np.ndarray: """Absorb tiny connected-component specks into the dominant neighbour label. Small ``min_frac`` keeps the cleanup conservative.""" h, w = labels.shape threshold = int(h * w * min_frac) out = labels.copy() num_regions = int(labels.max()) + 1 for rid in range(num_regions): mask = (out == rid).astype(np.uint8) num, comp = cv2.connectedComponents(mask) sizes = [int((comp == i).sum()) for i in range(num)] if num <= 2: continue keep = max(range(1, num), key=lambda i: sizes[i]) for i in range(1, num): if i == keep: continue if sizes[i] < threshold: small = (comp == i) ys, xs = np.where(small) nbrs = [] for dy, dx in [(-1, 0), (1, 0), (0, -1), (0, 1)]: ny, nx = np.clip(ys + dy, 0, h - 1), np.clip(xs + dx, 0, w - 1) nbrs.append(out[ny, nx]) nbr_labels = np.concatenate(nbrs) nbr_labels = nbr_labels[nbr_labels != rid] if len(nbr_labels) == 0: continue replacement = int(np.bincount(nbr_labels).argmax()) out[small] = replacement return out def relabel_contiguous(labels: np.ndarray) -> Tuple[np.ndarray, int]: unique = np.unique(labels) remap = -np.ones(int(unique.max()) + 1, dtype=np.int32) for new_id, old_id in enumerate(unique): remap[old_id] = new_id return remap[labels], len(unique) def adjacency_from_labels(labels: np.ndarray) -> Dict[int, set]: n = int(labels.max()) + 1 adj: Dict[int, set] = {rid: set() for rid in range(n)} diff_h = labels[:, :-1] != labels[:, 1:] a_h = labels[:, :-1][diff_h] b_h = labels[:, 1:][diff_h] diff_v = labels[:-1, :] != labels[1:, :] a_v = labels[:-1, :][diff_v] b_v = labels[1:, :][diff_v] for a, b in zip(a_h.tolist(), b_h.tolist()): adj[a].add(b) adj[b].add(a) for a, b in zip(a_v.tolist(), b_v.tolist()): adj[a].add(b) adj[b].add(a) return adj def adjacency_pairs(labels: np.ndarray) -> List[Tuple[int, int]]: adj = adjacency_from_labels(labels) edges: set[Tuple[int, int]] = set() for a, neighbours in adj.items(): for b in neighbours: if a < b: edges.add((a, b)) return sorted(edges) # ── Gradient assignment with adjacency constraints ──────────────────── def assign_gradients( rng: random.Random, num_regions: int, adj: Dict[int, set], hue_gap_min: float = 55.0, max_attempts: int = 400, ) -> List[Tuple[Tuple[float, float, float], Tuple[float, float, float], float]] | None: """Greedy assignment with restart. Adjacent regions must differ in their unordered colour pair AND have at least ``hue_gap_min`` degrees of hue separation at their closest pairing.""" palette_size = len(PALETTE) order = sorted(range(num_regions), key=lambda r: -len(adj[r])) # Since pure-flat fill uses only the FIRST colour of each pair, enforce # that the FIRST hues of adjacent regions are well separated, in addition # to the original pair-hue-gap constraint. PRIMARY_HUE_GAP_MIN = 60.0 for _ in range(max_attempts): pairs: List[Tuple[int, int] | None] = [None] * num_regions ok = True for rid in order: cand = [] for a in range(palette_size): for b in range(a + 1, palette_size): cand.append((a, b)) rng.shuffle(cand) chosen = None for a, b in cand: valid = True for n in adj[rid]: if pairs[n] is None: continue na, nb = pairs[n][0], pairs[n][1] if {a, b} == {na, nb}: valid = False break if pair_hue_gap((a, b), (na, nb)) < hue_gap_min: valid = False break if hue_gap(PALETTE_HUE[a], PALETTE_HUE[na]) < PRIMARY_HUE_GAP_MIN: valid = False break if valid: chosen = (a, b) break if chosen is None: ok = False break pairs[rid] = chosen if ok: angles = [rng.uniform(0, 2 * math.pi) for _ in range(num_regions)] jittered = [] for p, ang in zip(pairs, angles): ca = jitter_color(rng, PALETTE[p[0]]) cb = jitter_color(rng, PALETTE[p[1]]) jittered.append((ca, cb, ang)) return jittered return None # ── Per-region textures (defeat canny + colour-cluster attacks) ─────── TEXTURE_STYLES = ["speckle", "stripes_h", "stripes_v", "stripes_d1", "stripes_d2", "dots", "perlin"] def assign_textures( rng: random.Random, num_regions: int, adj: Dict[int, set], ) -> List[Tuple[str, Dict[str, float]]]: """Pick a (style, params) per region. Adjacent regions get different styles where possible so texture acts as a region cue for humans.""" out: List[Tuple[str, Dict[str, float]]] = [] for rid in range(num_regions): used_by_neighbours = {out[n][0] for n in adj[rid] if n < rid} candidates = [s for s in TEXTURE_STYLES if s not in used_by_neighbours] if not candidates: candidates = TEXTURE_STYLES style = rng.choice(candidates) params = { "freq": rng.uniform(0.08, 0.22), "amp": rng.uniform(0.08, 0.16), "orient": rng.uniform(0, math.pi), "speckle_seed": rng.randint(0, 2**31 - 1), } out.append((style, params)) return out def make_texture(style: str, params: Dict[str, float], h: int, w: int, rng_seed: int) -> np.ndarray: rng = np.random.default_rng(rng_seed) ys, xs = np.mgrid[0:h, 0:w].astype(np.float32) freq = params["freq"] orient = params["orient"] if style == "speckle": n = rng.standard_normal((h, w)).astype(np.float32) return cv2.GaussianBlur(n, (0, 0), sigmaX=0.7) * 1.4 if style.startswith("stripes"): if style == "stripes_h": theta = 0.0 elif style == "stripes_v": theta = math.pi / 2 elif style == "stripes_d1": theta = math.pi / 4 else: theta = -math.pi / 4 u = xs * math.cos(theta) + ys * math.sin(theta) return np.sin(u * freq * 2 * math.pi).astype(np.float32) if style == "dots": u = xs * freq * 2 * math.pi v = ys * freq * 2 * math.pi return (np.sin(u) * np.sin(v)).astype(np.float32) * 1.2 if style == "perlin": a = np.sin((xs * math.cos(orient) + ys * math.sin(orient)) * freq * 2 * math.pi) b = np.cos((xs * math.cos(orient + 1.1) + ys * math.sin(orient + 1.1)) * freq * 1.7 * 2 * math.pi) return ((a + b) * 0.5).astype(np.float32) return np.zeros((h, w), dtype=np.float32) # ── Renderer ────────────────────────────────────────────────────────── def render_region_canvas( labels: np.ndarray, assignments: List[Tuple], textures: List[Tuple[str, Dict[str, float]]], global_speckle: float = 0.05, ) -> np.ndarray: """Paint each region with its (gradient + texture). No drawn boundary lines — region discrimination relies on colour discontinuity and texture-style change.""" h, w = labels.shape # SINGLE global base colour (warm light grey) across the whole canvas. # Region labels and per-region palette assignments are intentionally # ignored: the goal is one uniform texture that flows continuously # across the entire image, so the only visible cue for the partition # is the boundary line drawn on top by render_instance. base = np.array([220.0, 215.0, 205.0], dtype=np.float32) img = np.broadcast_to(base, (h, w, 3)).copy() # Two-octave smooth noise field across the whole canvas, plus fine grain. rng_np = np.random.default_rng(0xC0FFEE) big = rng_np.standard_normal((h, w)).astype(np.float32) big = cv2.GaussianBlur(big, (0, 0), sigmaX=h / 6.0, sigmaY=h / 6.0) med = rng_np.standard_normal((h, w)).astype(np.float32) med = cv2.GaussianBlur(med, (0, 0), sigmaX=h / 18.0, sigmaY=h / 18.0) noise = big / (big.std() + 1e-6) + 0.7 * med / (med.std() + 1e-6) noise /= (noise.std() + 1e-6) # Multiplicative brightness modulation across the whole canvas: ±25%. img = img * (1.0 + 0.25 * noise[..., None]) # Per-channel fine grain (±8 brightness units). grain = rng_np.standard_normal((h, w, 3)).astype(np.float32) * 8.0 img = img + grain return np.clip(img, 0, 255).astype(np.uint8) BG_COLOR = (248, 246, 240) BORDER_COLOR = (45, 39, 32) def composite_full_image(painted: np.ndarray, width: int, height: int) -> np.ndarray: """Paste the painted region square onto the full canvas with a thin dark border around it.""" canvas_size = painted.shape[0] margin = (min(width, height) - canvas_size) // 2 full = np.full((height, width, 3), BG_COLOR, dtype=np.uint8) border = max(2, canvas_size // 256) y0 = margin x0 = (width - canvas_size) // 2 full[y0:y0 + canvas_size, x0:x0 + canvas_size] = painted full[y0 - border:y0, x0 - border:x0 + canvas_size + border] = BORDER_COLOR full[y0 + canvas_size:y0 + canvas_size + border, x0 - border:x0 + canvas_size + border] = BORDER_COLOR full[y0 - border:y0 + canvas_size + border, x0 - border:x0] = BORDER_COLOR full[y0 - border:y0 + canvas_size + border, x0 + canvas_size:x0 + canvas_size + border] = BORDER_COLOR return full # ── Sampling: build one valid instance ──────────────────────────────── def difficulty_for_regions(num_regions: int) -> str: if num_regions <= 6: return "easy" if num_regions <= 10: return "medium" return "hard" def count_region_cells(labels: np.ndarray, num_regions: int) -> List[int]: return [int((labels == rid).sum()) for rid in range(num_regions)] def sample_instance( rng: random.Random, width: int, height: int, min_regions: int, max_regions: int, grid_rows: int, grid_cols: int, canvas_size: int, min_region_frac: float, hue_gap_min: float, forced_target: int | None = None, ) -> Dict[str, object] | None: """Build one valid instance or return None if rejected. If forced_target is given, only accept samples whose actual region count equals it.""" max_feasible_regions = min(max_regions, max(2, (grid_rows * grid_cols) // 6)) min_feasible_regions = min(min_regions, max_feasible_regions) if forced_target is not None: target_n = max(2, min(max_feasible_regions, forced_target)) else: target_n = rng.randint(max(2, min_feasible_regions), max_feasible_regions) grid_labels, seeds = make_connected_partition(rng, grid_rows, grid_cols, target_n) if forced_target is not None: # 200x200 grid + 50%-fair-share phase-1 floor makes every region # big enough to survive heavy smoothing. Use the smoothed argmax # path so boundaries are smooth curves instead of NEAREST staircase. canvas_labels = upsample_labels(grid_labels, canvas_size, canvas_size, smooth_sigma=14.0) else: canvas_labels = upsample_labels(grid_labels, canvas_size, canvas_size, smooth_sigma=9.0) if forced_target is None: # Normal path: clean tiny specks and apply min-area floor. canvas_labels = clean_tiny_components(canvas_labels, min_frac=0.002) canvas_labels, actual_n = relabel_contiguous(canvas_labels) if actual_n < 2: return None region_pixels = count_region_cells(canvas_labels, actual_n) min_pixels = int(canvas_size * canvas_size * min_region_frac) if min(region_pixels) < min_pixels: return None else: # Forced path: trust the seeded grow — the partition produced exactly # forced_target connected regions. Skip clean_tiny_components and the # min-area floor so the count is honoured exactly. canvas_labels, actual_n = relabel_contiguous(canvas_labels) if actual_n != forced_target: return None region_pixels = count_region_cells(canvas_labels, actual_n) adj = adjacency_from_labels(canvas_labels) assignments = assign_gradients(rng, actual_n, adj, hue_gap_min=hue_gap_min) if assignments is None: return None textures = assign_textures(rng, actual_n, adj) margin = int(min(width, height) * 0.12) square_size = canvas_size square_left = (width - square_size) // 2 square_top = (height - square_size) // 2 return { "width": width, "height": height, "grid_rows": grid_rows, "grid_cols": grid_cols, "square_left": float(square_left), "square_top": float(square_top), "square_size": float(square_size), "num_regions": actual_n, "question": ( "How many separated regions are inside the square? " "A region is a maximal area inside the square that is filled with " "one continuous colour pattern. Two locations belong to the same " "region if you can travel between them without crossing into a " "differently-coloured area. Count every distinct region inside the " "square and report the total as a positive integer. " "Provide your final answer enclosed in ... tags." ), "answer": actual_n, "difficulty": difficulty_for_regions(actual_n), "region_seed_cells": [[int(r), int(c)] for r, c in seeds], "region_cell_counts": region_pixels, "region_adjacency": [[int(a), int(b)] for a, b in adjacency_pairs(canvas_labels)], "_canvas_labels": canvas_labels, "_assignments": assignments, "_textures": textures, } def render_instance(out_path: Path, record: Dict[str, object]) -> None: """Coloured fill renderer: each region is filled with its assigned gradient + texture, plus a thin dark boundary line drawn along the label-difference set so triple junctions never have hollow gaps.""" canvas_labels: np.ndarray = record.pop("_canvas_labels") # type: ignore[assignment] assignments = record.pop("_assignments") textures = record.pop("_textures") width = int(record["width"]) height = int(record["height"]) h, w = canvas_labels.shape painted = render_region_canvas(canvas_labels, assignments, textures) line_color = (32, 32, 32) line_thickness = max(1, min(h, w) // 700) # Smooth labels first via per-region one-hot → blur → argmax. This # removes the staircase from the original integer upsample but, crucially, # gives a SINGLE consistent label map (no triple-junction gaps). num_regions = int(canvas_labels.max()) + 1 smooth_sigma = max(2.5, min(h, w) / 140.0) soft = np.empty((num_regions, h, w), dtype=np.float32) for rid in range(num_regions): mask = (canvas_labels == rid).astype(np.float32) soft[rid] = cv2.GaussianBlur(mask, (0, 0), sigmaX=smooth_sigma, sigmaY=smooth_sigma) smoothed_labels = np.argmax(soft, axis=0).astype(np.int32) # Boundary mask: a pixel is on a boundary iff any of its 4-neighbours has # a different label. This produces a single closed curve per junction # with no hollow gap at triple points. boundary = np.zeros((h, w), dtype=np.uint8) diff_h = smoothed_labels[:, :-1] != smoothed_labels[:, 1:] diff_v = smoothed_labels[:-1, :] != smoothed_labels[1:, :] boundary[:, :-1][diff_h] = 1 boundary[:, 1:][diff_h] = 1 boundary[:-1, :][diff_v] = 1 boundary[1:, :][diff_v] = 1 if line_thickness > 1: kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (line_thickness, line_thickness)) boundary = cv2.dilate(boundary, kernel) painted[boundary > 0] = line_color full = composite_full_image(painted, width, height) Image.fromarray(full).save(out_path) # ── Dataset generation ──────────────────────────────────────────────── def ensure_output_dir(root: Path) -> Tuple[Path, Path]: root.mkdir(parents=True, exist_ok=True) images_dir = root / "images" images_dir.mkdir(exist_ok=True) return root, images_dir def generate_dataset( rng: random.Random, count: int, output_dir: Path, images_dir: Path, width: int, height: int, min_regions: int, max_regions: int, grid_rows: int, grid_cols: int, canvas_size: int, min_region_frac: float, hue_gap_min: float, forced_targets: List[int] | None = None, ) -> None: records: List[Dict[str, object]] = [] data_records: List[Dict[str, object]] = [] pbar = tqdm(total=count, desc="counting_regions") idx = 0 rejects = 0 while idx < count: target = (forced_targets[idx] if forced_targets and idx < len(forced_targets) else None) record = sample_instance( rng=rng, width=width, height=height, min_regions=min_regions, max_regions=max_regions, grid_rows=grid_rows, grid_cols=grid_cols, canvas_size=canvas_size, min_region_frac=min_region_frac, hue_gap_min=hue_gap_min, forced_target=target, ) if record is None: rejects += 1 continue image_name = f"counting_regions_{idx:05d}.png" render_instance(images_dir / image_name, record) record["image"] = f"images/{image_name}" records.append(record) data_records.append({ "image": record["image"], "question": record["question"], "answer": record["answer"], }) idx += 1 pbar.update(1) pbar.set_postfix(answer=record["answer"], rejects=rejects) pbar.close() with (output_dir / "annotations.jsonl").open("w", encoding="utf-8") as fh: for record in records: fh.write(json.dumps(record) + "\n") data_json = { "task": "counting_regions", "category": "distributed_scanning", "count": len(data_records), "items": data_records, } with (output_dir / "data.json").open("w", encoding="utf-8") as fh: json.dump(data_json, fh, indent=2) def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Generate a counting-regions dataset.") parser.add_argument("--output-root", type=Path, required=True) parser.add_argument("--count", type=int, default=36) parser.add_argument("--width", type=int, default=1024) parser.add_argument("--height", type=int, default=1024) parser.add_argument("--min-regions", type=int, default=6) parser.add_argument("--max-regions", type=int, default=12) parser.add_argument("--grid-rows", type=int, default=200) parser.add_argument("--grid-cols", type=int, default=200) parser.add_argument("--canvas-size", type=int, default=820, help="Pixel side of the painted square inside the image.") parser.add_argument("--min-region-frac", type=float, default=0.025, help="Reject samples whose smallest region covers " "less than this fraction of the canvas.") parser.add_argument("--hue-gap-min", type=float, default=55.0) parser.add_argument("--seed", type=int, default=23) parser.add_argument("--difficulty", type=int, default=5, help="Integer difficulty >=0; scales region count.") return parser.parse_args() def main() -> None: args = parse_args() d = max(0, int(args.difficulty)) # Canvas scaling: N_d = 10 + d, N_0 = 10. N_d = 10 + d N_0 = 10 s = math.sqrt(max(1.0, N_d / N_0)) args.width = int(round(args.width * s)) args.height = int(round(args.height * s)) args.canvas_size = int(round(args.canvas_size * s)) rng = random.Random(args.seed) output_dir, images_dir = ensure_output_dir(args.output_root) # num_regions ∈ [5, 10 + d] min_regions = 10 max_regions = 10 + 2 * d # Auto-scale: min_region_frac = min(0.04, 1 / (2.5 * max_regions)) # Use max_regions (upper bound) as a conservative frac floor so any # sampled count satisfies the constraint. min_region_frac = min(0.04, 1.0 / (2.5 * max_regions)) # Force per-instance region counts evenly spaced across [min_regions, # max_regions]. With count=5 and range=[10, 20] this yields [10, 13, 15, 18, 20]. if args.count > 1: forced_targets = [ int(round(min_regions + i * (max_regions - min_regions) / (args.count - 1))) for i in range(args.count) ] else: forced_targets = [min_regions] generate_dataset( rng=rng, count=args.count, output_dir=output_dir, images_dir=images_dir, width=args.width, height=args.height, min_regions=min_regions, max_regions=max_regions, grid_rows=args.grid_rows, grid_cols=args.grid_cols, canvas_size=args.canvas_size, min_region_frac=min_region_frac, hue_gap_min=args.hue_gap_min, forced_targets=forced_targets, ) print(f"forced region counts: {forced_targets}") print(f"Saved dataset to {args.output_root} (canvas {args.width}x{args.height})") if __name__ == "__main__": main()