activevision's picture
Initial release v0.4.0 — ActiveVision benchmark (85 instances, 17 tasks)
f69e256 verified
raw
history blame
22.3 kB
"""Generate contour_silhouette_count samples.
Each sample produces a SINGLE side-by-side image: Template on the left,
Field on the right, separated by a thin divider.
The Template panel shows a single smooth closed blob outline (white on dark),
and the Field shows 25-35 blob outlines scattered across a dark canvas.
Some blobs in the field are exact translated copies of the template shape;
the rest are distractors (different Fourier-descriptor blobs). The task is
to count how many field contours match the template exactly (translation only).
Blob shapes are generated via Fourier descriptors:
r(theta) = r0 + sum_{k=1}^{K} a_k * cos(k*theta + phi_k)
"""
from __future__ import annotations
import argparse
import io
import json
import math
import random
from pathlib import Path
from typing import List, Tuple
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from matplotlib.patches import Rectangle
from matplotlib.path import Path as MplPath
import numpy as np
from PIL import Image
from scipy.spatial.distance import directed_hausdorff
from tqdm import tqdm
QUESTION = (
"This image has two panels separated by a thin vertical divider. "
"The left panel shows the Template: a single closed contour. "
"The right panel shows the Field: many closed contours scattered across the "
"canvas, all at the SAME pixel scale as the Template. Count the number of "
"Field contours that are exact copies of the Template shape (translation "
"only — same size, same orientation, no rotation or mirroring) and report "
"the integer count. "
"Provide your final answer enclosed in <answer>...</answer> tags."
)
STROKE_WIDTH = 2.5
BG_COLOR = "#0a1020"
N_CONTOUR_PTS = 200 # number of points to sample on each blob boundary
# ---------------------------------------------------------------------------
# Fourier-descriptor blob generation
# ---------------------------------------------------------------------------
def fourier_blob(rng: random.Random, r0: float = 45.0,
K_range: Tuple[int, int] = (4, 6),
amp_scale: float = 0.18) -> np.ndarray:
"""Generate a closed blob shape via Fourier descriptors.
Returns an (N, 2) array of (x, y) points (centred near origin).
"""
K = rng.randint(*K_range)
amplitudes = [rng.uniform(0.05, amp_scale) * r0 for _ in range(K)]
phases = [rng.uniform(0, 2 * np.pi) for _ in range(K)]
theta = np.linspace(0, 2 * np.pi, N_CONTOUR_PTS, endpoint=False)
r = np.full_like(theta, r0)
for k_idx in range(K):
k = k_idx + 1
r = r + amplitudes[k_idx] * np.cos(k * theta + phases[k_idx])
# Ensure r stays positive
r = np.clip(r, 5.0, None)
x = r * np.cos(theta)
y = r * np.sin(theta)
pts = np.column_stack([x, y])
# Centre on centroid
pts -= pts.mean(axis=0)
return pts
def blob_hausdorff(a: np.ndarray, b: np.ndarray) -> float:
"""Symmetric Hausdorff distance between two blob point sets."""
d1 = directed_hausdorff(a, b)[0]
d2 = directed_hausdorff(b, a)[0]
return max(d1, d2)
def blob_bounding_radius(pts: np.ndarray) -> float:
"""Max distance from centroid (assumed ~origin) to any point."""
return float(np.max(np.linalg.norm(pts, axis=1)))
# ---------------------------------------------------------------------------
# Sample building
# ---------------------------------------------------------------------------
DISTRACTOR_HAUSDORFF_MIN = 8.0 # module-level override target (difficulty-tunable)
TEMPLATE_AMP_SCALE = 0.18 # base Fourier amplitude scale for the template blob
def build_sample(rng: random.Random, width: int, height: int,
target_matches: int, total_blobs: int
) -> Tuple[np.ndarray, List[np.ndarray], List[np.ndarray],
List[Tuple[np.ndarray, float]],
List[Tuple[np.ndarray, float]], int]:
"""Build a sample. Returns (template_pts, match_placements, distractor_placements,
match_centres, distractor_centres, realised_matches).
Each placement is the blob points translated to their canvas position.
match_placements: list of (N,2) arrays (copies of template at various offsets).
distractor_placements: list of (N,2) arrays (different blob shapes).
match_centres / distractor_centres: list of (centre_xy, bounding_radius).
"""
hausdorff_thresh = DISTRACTOR_HAUSDORFF_MIN # distractors must differ by at least this much
outline_gap = 10.0 # min visible gap between any two blob outlines
for _attempt in range(60):
# Generate template blob
r0 = rng.uniform(40.0, 50.0)
template = fourier_blob(rng, r0=r0, K_range=(4, 6), amp_scale=TEMPLATE_AMP_SCALE)
t_radius = blob_bounding_radius(template)
pad = t_radius + 20.0
# Track (centre, bounding_radius) for non-overlap checks.
placed_items: List[Tuple[np.ndarray, float]] = []
def fits(cand: np.ndarray, cand_radius: float) -> bool:
for c, r in placed_items:
if np.linalg.norm(cand - c) < (cand_radius + r + outline_gap):
return False
return True
# 1. Place match copies (same template, same radius).
match_placements: List[np.ndarray] = []
match_centres: List[Tuple[np.ndarray, float]] = []
ok = True
for _ in range(target_matches):
placed = False
for _try in range(800):
cx = rng.uniform(pad, width - pad)
cy = rng.uniform(pad, height - pad)
cand = np.array([cx, cy])
if not fits(cand, t_radius):
continue
placed_items.append((cand, t_radius))
match_placements.append(template + cand)
match_centres.append((cand, t_radius))
placed = True
break
if not placed:
ok = False
break
if not ok:
continue
# 2. Place distractors with per-blob bounding radii.
n_distractors = total_blobs - target_matches
distractor_placements: List[np.ndarray] = []
distractor_centres: List[Tuple[np.ndarray, float]] = []
fail = False
for _ in range(n_distractors):
placed = False
for _try in range(800):
d_r0 = rng.uniform(38.0, 52.0)
distractor = fourier_blob(rng, r0=d_r0, K_range=(4, 6),
amp_scale=0.22)
if blob_hausdorff(distractor, template) < hausdorff_thresh:
continue
d_radius = blob_bounding_radius(distractor)
d_pad = d_radius + 20.0
if d_pad >= width / 2 or d_pad >= height / 2:
continue
cx = rng.uniform(d_pad, width - d_pad)
cy = rng.uniform(d_pad, height - d_pad)
cand = np.array([cx, cy])
if not fits(cand, d_radius):
continue
placed_items.append((cand, d_radius))
distractor_placements.append(distractor + cand)
distractor_centres.append((cand, d_radius))
placed = True
break
if not placed:
fail = True
break
if fail:
continue
realised = target_matches # by construction, matches are exact copies
return (template, match_placements, distractor_placements,
match_centres, distractor_centres, realised)
raise RuntimeError("Failed to build sample after many attempts")
# ---------------------------------------------------------------------------
# Rendering
# ---------------------------------------------------------------------------
def _draw_blob_outline(ax, pts: np.ndarray, stroke_width: float,
color: str = "white", alpha: float = 1.0,
zorder: int = 3) -> None:
"""Draw a closed smooth polygon outline (anti-aliased)."""
# Close the loop
closed = np.vstack([pts, pts[0:1]])
ax.plot(closed[:, 0], closed[:, 1], color=color, linewidth=stroke_width,
alpha=alpha, solid_capstyle="round", solid_joinstyle="round",
antialiased=True, zorder=zorder)
LABEL_BADGE_COLOR = "#f5d76e" # warm yellow filled circle behind label letter
LABEL_TEXT_COLOR = "#0a1020" # dark text on the badge (matches BG)
LABEL_BADGE_EDGE = "#ffffff" # thin white outline so badge stands out
def _pick_label_position(centre: np.ndarray, bbox_r: float,
own_idx: int,
all_contours: List[np.ndarray],
placed_label_centres: List[np.ndarray],
width: int, height: int,
badge_radius: float,
other_clearance: float = 10.0,
label_clearance: float = 24.0) -> np.ndarray:
"""Multi-candidate / best-score label placement.
Samples ~16 angular anchors around the contour's bounding circle, with
several radial offsets each. Filters by hard gates (panel margin, distance
to other contours, distance to placed labels), scores remaining candidates,
and returns the argmax. Falls back by relaxing other_clearance.
"""
margin = badge_radius + 4.0
n_angles = 16
angles = [2 * math.pi * k / n_angles for k in range(n_angles)]
radial_offsets = [8.0, 14.0, 22.0, 30.0]
# Build candidate list (pos, offset)
candidates: List[Tuple[np.ndarray, float]] = []
for ang in angles:
ux, uy = math.cos(ang), math.sin(ang)
for off in radial_offsets:
r = bbox_r + off
bx = centre[0] + ux * r
by = centre[1] + uy * r
candidates.append((np.array([bx, by]), off))
# Other-contour points: concat all contours except own
other_pts_list = [c for k, c in enumerate(all_contours) if k != own_idx]
if other_pts_list:
other_pts = np.vstack(other_pts_list)
else:
other_pts = None
def gate_and_score(min_other: float) -> Tuple[np.ndarray, float] | None:
best = None
best_score = -1e18
for pos, off in candidates:
bx, by = pos[0], pos[1]
# Panel margin
if not (margin <= bx <= width - margin and
margin <= by <= height - margin):
continue
# Distance to other contour points
if other_pts is not None and len(other_pts) > 0:
other_d = float(np.min(np.linalg.norm(other_pts - pos, axis=1)))
else:
other_d = 1e9
if other_d < min_other:
continue
# Distance to placed label centres
if placed_label_centres:
lbl_arr = np.array(placed_label_centres)
label_d = float(np.min(np.linalg.norm(lbl_arr - pos, axis=1)))
else:
label_d = 1e9
if label_d < label_clearance:
continue
score = other_d + 0.5 * label_d - 0.4 * off
if score > best_score:
best_score = score
best = pos
return (best, best_score) if best is not None else None
for relax in (1.0, 0.5, 0.3):
res = gate_and_score(other_clearance * relax)
if res is not None:
return res[0]
# Last-resort: pick any in-panel candidate maximising other_d
best = None
best_d = -1.0
for pos, off in candidates:
bx, by = pos[0], pos[1]
if not (margin <= bx <= width - margin and
margin <= by <= height - margin):
continue
if other_pts is not None and len(other_pts) > 0:
other_d = float(np.min(np.linalg.norm(other_pts - pos, axis=1)))
else:
other_d = 1e9
if other_d > best_d:
best_d = other_d
best = pos
if best is not None:
return best
# Absolute fallback: upper-right diagonal
ux, uy = 1.0 / math.sqrt(2), -1.0 / math.sqrt(2)
return np.array([centre[0] + ux * (bbox_r + badge_radius + 6.0),
centre[1] + uy * (bbox_r + badge_radius + 6.0)])
def _index_to_letters(idx: int) -> str:
"""0->A, 25->Z, 26->AA, 27->AB, ..."""
letters = ""
n = idx
while True:
letters = chr(ord("A") + (n % 26)) + letters
n = n // 26 - 1
if n < 0:
break
return letters
def render_field(width: int, height: int,
match_placements: List[np.ndarray],
distractor_placements: List[np.ndarray],
labels: List[str],
centres: List[Tuple[np.ndarray, float]]) -> Image.Image:
"""Render the field image: dark canvas with many blob outlines plus labels.
`labels` and `centres` are aligned by index with the concatenation
`match_placements + distractor_placements`.
Returns PIL Image.
"""
fig = plt.figure(figsize=(width / 100, height / 100), dpi=100,
facecolor=BG_COLOR)
ax = fig.add_axes([0, 0, 1, 1])
ax.set_xlim(0, width)
ax.set_ylim(height, 0)
ax.axis("off")
ax.set_facecolor(BG_COLOR)
all_blobs = match_placements + distractor_placements
for blob in all_blobs:
_draw_blob_outline(ax, blob, STROKE_WIDTH)
# Letter labels removed — task is now a count, no per-contour labels needed.
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=100, bbox_inches="tight", pad_inches=0,
facecolor=fig.get_facecolor())
plt.close(fig)
buf.seek(0)
return Image.open(buf).convert("RGB")
def render_template(template: np.ndarray) -> Image.Image:
"""Render the template panel at the SAME pixel scale as the field.
Tight bounding box + margin, with a TEMPLATE header strip.
Returns a PIL Image.
"""
tpl_min = template.min(axis=0)
tpl_max = template.max(axis=0)
span_x = float(tpl_max[0] - tpl_min[0])
span_y = float(tpl_max[1] - tpl_min[1])
margin = 40.0
header_h = 34.0
min_width = 200.0
content_w = span_x + 2 * margin
content_h = span_y + 2 * margin
canvas_w = max(content_w, min_width)
canvas_h = header_h + content_h
# Offset to centre blob in canvas
margin_x = (canvas_w - span_x) / 2.0
ox = margin_x - tpl_min[0]
oy = (header_h + margin) - tpl_min[1]
disp = template + np.array([ox, oy])
fig = plt.figure(figsize=(canvas_w / 100, canvas_h / 100), dpi=100,
facecolor="#070b14")
ax = fig.add_axes([0, 0, 1, 1])
ax.set_xlim(0, canvas_w)
ax.set_ylim(canvas_h, 0)
ax.axis("off")
ax.set_facecolor("#070b14")
# Header strip
ax.add_patch(Rectangle((0, 0), canvas_w, header_h,
facecolor="#11182a", edgecolor="none", zorder=4))
ax.plot([0, canvas_w], [header_h, header_h],
color="#2d3a5a", linewidth=1.0, zorder=5)
ax.text(canvas_w / 2, header_h / 2, "TEMPLATE",
color="#cfe0ff", fontsize=14, fontweight="bold",
ha="center", va="center", zorder=6,
family="DejaVu Sans")
# Border
ax.add_patch(Rectangle((1.5, 1.5), canvas_w - 3, canvas_h - 3,
facecolor="none", edgecolor="#7aa6ff",
linewidth=2.0, zorder=6))
_draw_blob_outline(ax, disp, STROKE_WIDTH, zorder=7)
buf = io.BytesIO()
fig.savefig(buf, format="png", dpi=100, bbox_inches=None, pad_inches=0,
facecolor=fig.get_facecolor())
plt.close(fig)
buf.seek(0)
return Image.open(buf).convert("RGB")
DIVIDER_WIDTH = 3
DIVIDER_COLOR = (51, 51, 85) # #333355
def render_combined(out_path: Path, template_img: Image.Image,
field_img: Image.Image) -> None:
"""Concatenate template (left) and field (right) with a thin divider,
then pad to a square canvas (BG colour) so downstream image-edit
models receive a 1:1 input."""
bg = tuple(int(BG_COLOR.lstrip("#")[i:i+2], 16) for i in (0, 2, 4))
tw, th = template_img.size
fw, fh = field_img.size
inner_h = max(th, fh)
inner_w = tw + DIVIDER_WIDTH + fw
side = max(inner_w, inner_h)
combined = Image.new("RGB", (side, side), bg)
x0 = (side - inner_w) // 2
y0 = (side - inner_h) // 2
combined.paste(template_img, (x0, y0 + (inner_h - th) // 2))
for x in range(x0 + tw, x0 + tw + DIVIDER_WIDTH):
for y in range(y0, y0 + inner_h):
combined.putpixel((x, y), DIVIDER_COLOR)
combined.paste(field_img, (x0 + tw + DIVIDER_WIDTH, y0 + (inner_h - fh) // 2))
combined.save(out_path)
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument("--output-root", type=Path, required=True)
parser.add_argument("--count", type=int, default=30)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--width", type=int, default=900)
parser.add_argument("--height", type=int, default=900)
parser.add_argument("--difficulty", type=int, default=5,
help="Integer difficulty >=0; scales match count, total contours, distractor similarity.")
args = parser.parse_args()
d = max(0, int(args.difficulty))
# Canvas scaling: N_d = 10 + 2*d, N_0 = 10
N_d = 10 + 2 * d
N_0 = 10
s = math.sqrt(max(1.0, N_d / N_0))
args.width = int(round(args.width * s))
args.height = int(round(args.height * s))
global DISTRACTOR_HAUSDORFF_MIN, TEMPLATE_AMP_SCALE
if d > 0:
_min_matches = 5
_max_matches = 5 + 2 * d
_total_contours = 10 + 2 * d
DISTRACTOR_HAUSDORFF_MIN = float(max(6, 30 - 3 * d))
TEMPLATE_AMP_SCALE = 0.18 * (1 + 0.1 * d)
else:
_min_matches = 3
_max_matches = 5
_total_contours = 10
DISTRACTOR_HAUSDORFF_MIN = 30.0
TEMPLATE_AMP_SCALE = 0.18
out_root: Path = args.output_root
img_dir = out_root / "images"
img_dir.mkdir(parents=True, exist_ok=True)
ann_path = out_root / "annotations.jsonl"
master_rng = random.Random(args.seed)
# Force evenly-spaced answers across [_min_matches, _max_matches].
if args.count > 1:
plan = [int(round(_min_matches + i * (_max_matches - _min_matches) / (args.count - 1))) for i in range(args.count)]
else:
plan = [_min_matches]
print(f"forced contour silhouette match counts: {plan}")
records = []
with ann_path.open("w") as f:
for i in tqdm(range(args.count), desc="contour_silhouette_count"):
target = plan[i]
if _total_contours is not None:
total_blobs = max(_total_contours, target + 12)
else:
total_blobs = master_rng.randint(18, 26)
total_blobs = max(total_blobs, target + 12)
total_blobs = min(total_blobs, 26)
built = False
for retry in range(20):
sub_seed = master_rng.randint(0, 2**31 - 1)
sub_rng = random.Random(sub_seed)
try:
(template, match_pl, distractor_pl,
match_centres, distractor_centres,
realised) = build_sample(
sub_rng, args.width, args.height,
target_matches=target,
total_blobs=total_blobs,
)
except RuntimeError:
continue
built = True
break
if not built:
raise RuntimeError(f"sample {i} could not be generated")
# Assign letter labels to all field contours in a randomised order so
# match labels are not always the first letters. We label every
# contour (matches + distractors) and record which labels are matches.
n_total = len(match_pl) + len(distractor_pl)
order = list(range(n_total))
sub_rng.shuffle(order)
# `order[k]` = original index that should receive the k-th letter.
label_for_orig = [""] * n_total
for k, orig in enumerate(order):
label_for_orig[orig] = _index_to_letters(k)
match_labels = label_for_orig[:len(match_pl)]
distractor_labels = label_for_orig[len(match_pl):]
all_labels = match_labels + distractor_labels
all_centres = match_centres + distractor_centres
answer = str(realised)
img_name = f"contour_silhouette_count_{i:05d}.png"
tpl_img = render_template(template)
fld_img = render_field(args.width, args.height,
match_pl, distractor_pl,
all_labels, all_centres)
render_combined(img_dir / img_name, tpl_img, fld_img)
rec = {
"image": f"images/{img_name}",
"question": QUESTION,
"answer": answer,
"num_matches": realised,
"match_labels": sorted(match_labels),
"total_blobs": n_total,
"metadata": {
"seed": sub_seed,
"template_points": template.tolist(),
},
}
f.write(json.dumps(rec) + "\n")
f.flush()
records.append(rec)
data_json = {
"task": "contour_silhouette_count",
"category": "visual_attribute_transfer",
"count": len(records),
"items": records,
}
(out_root / "data.json").write_text(json.dumps(data_json, indent=2))
print(f"Saved {len(records)} samples to {out_root}")
# Print answer distribution
from collections import Counter
dist = Counter(r["num_matches"] for r in records)
print("Match-count distribution:", dict(sorted(dist.items())))
if __name__ == "__main__":
main()