KHLR / Scripts /synthetic_line_generator.py
Karez's picture
Upload Scripts/synthetic_line_generator.py with huggingface_hub
92b19da verified
"""
Constrained Recipe-Based Synthetic Handwritten Line Generation
Generates synthetic text lines by concatenating real handwritten word images
with guaranteed text uniqueness, single-writer consistency, and leakage-free
data partitioning.
Usage:
python synthetic_line_generator.py \
--unique_words_dir ./data/Unique-Words \
--person_names_dir ./data/Person-Names \
--output_dir ./data/Synthetic-Lines \
--training_writers ./writers/Training.txt \
--validation_writers ./writers/Validation.txt \
--testing_writers ./writers/Testing.txt
"""
import os
import glob
import random
import argparse
import re
import numpy as np
from collections import defaultdict
from datetime import datetime
from PIL import Image, TiffImagePlugin
# =============================================================================
# ARGUMENT PARSER
# =============================================================================
def parse_args():
parser = argparse.ArgumentParser(
description="Constrained Recipe-Based Synthetic Handwritten Line Generation")
# Data paths
parser.add_argument("--unique_words_dir", type=str, required=True,
help="Root directory of unique word samples (with Training/Validation/Testing subfolders)")
parser.add_argument("--person_names_dir", type=str, required=True,
help="Root directory of person name samples (with Training/Validation/Testing subfolders)")
parser.add_argument("--output_dir", type=str, required=True,
help="Output directory for generated synthetic lines")
# Writer files per subset
parser.add_argument("--training_writers", type=str, default=None,
help="Text file listing training writer IDs (one per line)")
parser.add_argument("--validation_writers", type=str, default=None,
help="Text file listing validation writer IDs (one per line)")
parser.add_argument("--testing_writers", type=str, default=None,
help="Text file listing testing writer IDs (one per line)")
# Canvas and composition parameters
parser.add_argument("--img_height", type=int, default=155)
parser.add_argument("--img_width", type=int, default=2470)
parser.add_argument("--baseline_ratio", type=float, default=0.75)
parser.add_argument("--text_height_ratio", type=float, default=0.88)
parser.add_argument("--spacing_min", type=int, default=10)
parser.add_argument("--spacing_max", type=int, default=30)
parser.add_argument("--baseline_jitter", type=int, default=1)
parser.add_argument("--left_margin", type=int, default=8)
parser.add_argument("--right_margin", type=int, default=8)
# Grouping parameters
parser.add_argument("--unique_group_size", type=int, default=20,
help="Number of writers sharing the same unique words")
parser.add_argument("--person_group_size", type=int, default=5,
help="Number of writers sharing the same person names")
# Generation parameters
parser.add_argument("--max_groups", type=int, default=None,
help="Process only first N groups (for testing)")
parser.add_argument("--seed", type=int, default=42)
return parser.parse_args()
# =============================================================================
# CONFIGURATION (set from args)
# =============================================================================
class Config:
"""Holds all generation parameters"""
def __init__(self, args):
self.img_height = args.img_height
self.img_width = args.img_width
self.baseline_ratio = args.baseline_ratio
self.text_height_ratio = args.text_height_ratio
self.spacing_range = (args.spacing_min, args.spacing_max)
self.baseline_jitter = args.baseline_jitter
self.left_margin = args.left_margin
self.right_margin = args.right_margin
self.unique_group_size = args.unique_group_size
self.person_group_size = args.person_group_size
self.max_words_for_scaling = 8
self.source_subsets = ["Training", "Validation", "Testing"]
TiffImagePlugin.WRITE_LIBTIFF = True
# =============================================================================
# LOGGER
# =============================================================================
class Logger:
def __init__(self, log_path):
os.makedirs(os.path.dirname(log_path) if os.path.dirname(log_path) else ".", exist_ok=True)
self.f = open(log_path, "w", encoding="utf-8")
self.f.write(f"{'=' * 80}\nSYNTHETIC LINE GENERATION LOG\n"
f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n{'=' * 80}\n\n")
self.f.flush()
def log(self, message, console=True):
self.f.write(message + "\n")
self.f.flush()
if console:
print(message)
def section(self, title):
self.log(f"\n{'=' * 60}\n{title}\n{'=' * 60}")
def subsection(self, title):
self.log(f"\n{'-' * 40}\n{title}\n{'-' * 40}")
def log_line_detail(self, filename, writer_id, recipe_type, subgroup, words_info, text):
self.log(f" {filename}.tif", console=False)
self.log(f" Writer: DNDK{writer_id:05d}", console=False)
self.log(f" Type: {recipe_type}"
+ (f" (sub-group {subgroup[0]}-{subgroup[1]})" if subgroup else ""), console=False)
self.log(f" Words: {words_info}", console=False)
self.log(f" Text: {text}", console=False)
def close(self):
self.f.close()
# =============================================================================
# TEXT NORMALIZATION
# =============================================================================
def normalize_label(text):
if text is None:
return ""
text = text.replace("\u00A0", " ").replace("\r", " ").replace("\n", " ")
return " ".join(text.strip().split())
# =============================================================================
# OTSU THRESHOLD
# =============================================================================
def otsu_threshold(gray_uint8):
hist = np.bincount(gray_uint8.ravel(), minlength=256).astype(np.float64)
total = gray_uint8.size
sum_total = np.dot(np.arange(256), hist)
sum_b, w_b, max_var, threshold = 0.0, 0.0, 0.0, 127
for t in range(256):
w_b += hist[t]
if w_b == 0:
continue
w_f = total - w_b
if w_f == 0:
break
sum_b += t * hist[t]
m_b = sum_b / w_b
m_f = (sum_total - sum_b) / w_f
var_between = w_b * w_f * (m_b - m_f) ** 2
if var_between > max_var:
max_var = var_between
threshold = t
return threshold
# =============================================================================
# INK EXTRACTION WITH DIACRITICAL PRESERVATION
# =============================================================================
def build_word_cutout_with_baseline(img_pil):
"""Extract ink region with adaptive Otsu threshold (+20 for diacritical preservation)"""
img_rgb = img_pil.convert("RGB")
gray = np.array(img_rgb.convert("L"))
thr = otsu_threshold(gray)
thr_adjusted = min(thr + 20, 250)
ink = gray < thr_adjusted
if ink.mean() < 0.001 or ink.mean() > 0.8:
ink = gray > max(thr - 20, 5)
if ink.mean() < 0.001:
ink = gray < 250
rows = np.where(ink.any(axis=1))[0]
cols = np.where(ink.any(axis=0))[0]
if len(rows) == 0 or len(cols) == 0:
h, w = gray.shape
alpha = Image.new("L", (w, h), 0)
crop = img_rgb.crop((0, 0, w, h)).convert("RGBA")
crop.putalpha(alpha)
return crop, int(h * 0.8), (0, 0, w, h)
top, bottom = rows[0], rows[-1]
left, right = cols[0], cols[-1]
bbox = (left, top, right + 1, bottom + 1)
ink_crop = ink[top:bottom + 1, left:right + 1]
h, w = ink_crop.shape
bottoms = np.full(w, np.nan)
for x in range(w):
ys = np.where(ink_crop[:, x])[0]
if ys.size > 0:
bottoms[x] = ys[-1]
baseline = int(np.nanmedian(bottoms)) if np.isfinite(bottoms).any() else int(h * 0.8)
alpha = Image.fromarray((ink_crop.astype(np.uint8)) * 255, mode="L")
crop = img_rgb.crop(bbox).convert("RGBA")
crop.putalpha(alpha)
return crop, baseline, bbox
# =============================================================================
# SCALING HELPERS
# =============================================================================
def scale_word_to_text_height(word_rgba, baseline, target_h):
w, h = word_rgba.size
if h <= 0:
return word_rgba, baseline
s = target_h / float(h)
return (word_rgba.resize((max(1, int(round(w * s))), max(1, int(round(h * s)))),
Image.LANCZOS), int(round(baseline * s)))
def apply_uniform_scale(word_rgba, baseline, factor):
w, h = word_rgba.size
return (word_rgba.resize((max(1, int(round(w * factor))), max(1, int(round(h * factor)))),
Image.LANCZOS), int(round(baseline * factor)))
def calculate_scale_for_exact_words(words, cfg):
target_h = int(round(cfg.img_height * cfg.text_height_ratio))
usable = cfg.img_width - cfg.left_margin - cfg.right_margin
total_w = 0
for wp in words:
try:
img = Image.open(wp["path"])
rgba, bl, _ = build_word_cutout_with_baseline(img)
rgba_s, _ = scale_word_to_text_height(rgba, bl, target_h)
total_w += rgba_s.size[0]
except Exception:
return 1.0
total_w += np.mean(cfg.spacing_range) * (len(words) - 1)
return usable / total_w if total_w > 0 else 1.0
def calculate_standard_scale_factor(word_samples, cfg):
target_h = int(round(cfg.img_height * cfg.text_height_ratio))
usable = cfg.img_width - cfg.left_margin - cfg.right_margin
n = min(50, len(word_samples))
if n == 0:
return 1.0
widths = []
for wp in random.sample(word_samples, n):
try:
img = Image.open(wp["path"])
rgba, bl, _ = build_word_cutout_with_baseline(img)
rgba_s, _ = scale_word_to_text_height(rgba, bl, target_h)
widths.append(rgba_s.size[0])
except Exception:
continue
if not widths:
return 1.0
est = np.mean(widths) * cfg.max_words_for_scaling + np.mean(cfg.spacing_range) * (cfg.max_words_for_scaling - 1)
sf = (usable / est) if est > usable else 1.0
return sf * 0.95
# =============================================================================
# FILENAME PARSING AND GROUPING
# =============================================================================
def parse_writer_id(filename):
m = re.search(r"DNDK(\d+)_", filename)
return int(m.group(1)) if m else None
def parse_word_number(filename):
m = re.search(r"_(\d+)_(\d+)\.", filename)
return int(m.group(2)) if m else None
def get_unique_word_group(writer_id, group_size):
g = (writer_id - 1) // group_size
start = g * group_size + 1
return (start, start + group_size - 1)
def get_person_name_subgroup(writer_id, group_size):
g = (writer_id - 1) // group_size
start = g * group_size + 1
return (start, start + group_size - 1)
# =============================================================================
# LINE LENGTH SAMPLING (P(k) distribution)
# =============================================================================
def sample_line_length():
r = random.random()
if r < 0.25:
return 7
elif r < 0.75:
return 8
else:
return random.choice([4, 5, 6])
# =============================================================================
# WRITER FILE LOADING
# =============================================================================
def load_writer_names_from_file(filepath):
writers = set()
if filepath is None or not os.path.isfile(filepath):
return writers
with open(filepath, "r", encoding="utf-8") as f:
for line in f:
line = line.strip()
if not line:
continue
m = re.search(r"DNDK(\d+)", line)
if m:
writers.add(int(m.group(1)))
else:
try:
writers.add(int(line))
except ValueError:
pass
return writers
# =============================================================================
# WORD POOL LOADING (cross-subset search)
# =============================================================================
def load_word_pool_all_subsets(root_dir, source_tag, allowed_writers, source_subsets,
allowed_exts=(".png", ".jpg", ".jpeg", ".bmp", ".tif", ".tiff")):
"""Search all source subfolders for word samples belonging to allowed writers"""
if allowed_writers is not None and len(allowed_writers) == 0:
return {}, 0, {}
writer_words = defaultdict(dict)
total_loaded = 0
subset_counts = {}
for src_subset in source_subsets:
subset_dir = os.path.join(root_dir, src_subset)
if not os.path.exists(subset_dir):
continue
loaded = 0
for ext in allowed_exts:
for path in glob.glob(os.path.join(subset_dir, f"*{ext}")):
fn = os.path.basename(path)
txt_path = os.path.splitext(path)[0] + ".txt"
if not os.path.isfile(txt_path):
continue
wid = parse_writer_id(fn)
if wid is None:
continue
if allowed_writers is not None and wid not in allowed_writers:
continue
wn = parse_word_number(fn)
if wn is None:
continue
if wn in writer_words[wid]:
continue
try:
with open(txt_path, "r", encoding="utf-8") as f:
lbl = normalize_label(f.read())
except UnicodeDecodeError:
with open(txt_path, "r", encoding="utf-8-sig") as f:
lbl = normalize_label(f.read())
writer_words[wid][wn] = dict(
path=path, label=lbl, writer=f"DNDK{wid:05d}",
writer_id=wid, word_num=wn, source=source_tag, found_in=src_subset)
loaded += 1
subset_counts[src_subset] = loaded
total_loaded += loaded
return dict(writer_words), total_loaded, subset_counts
# =============================================================================
# BUILD HIERARCHICAL GROUPS
# =============================================================================
def build_big_groups(unique_data, person_data, cfg):
all_writers = set(unique_data.keys()) | set(person_data.keys())
groups = {}
for wid in all_writers:
bg = get_unique_word_group(wid, cfg.unique_group_size)
if bg not in groups:
groups[bg] = dict(writers=set(), unique_word_nums=set(), subgroups={}, pools={})
g = groups[bg]
g["writers"].add(wid)
g["pools"][wid] = dict(unique=unique_data.get(wid, {}), person_names=person_data.get(wid, {}))
if wid in unique_data:
g["unique_word_nums"].update(unique_data[wid].keys())
if wid in person_data:
sg = get_person_name_subgroup(wid, cfg.person_group_size)
if sg not in g["subgroups"]:
g["subgroups"][sg] = dict(writers=set(), pn_nums=set())
g["subgroups"][sg]["writers"].add(wid)
g["subgroups"][sg]["pn_nums"].update(person_data[wid].keys())
return groups
# =============================================================================
# SAVE HELPERS
# =============================================================================
def save_tiff_with_metadata(image, save_path):
if image.mode != "RGB":
image = image.convert("RGB")
info = TiffImagePlugin.ImageFileDirectory_v2()
info[317] = 2
image.save(save_path, format="TIFF", compression="tiff_lzw", dpi=(300, 300), tiffinfo=info)
def save_pair(out_dir, base_name, image, text):
os.makedirs(out_dir, exist_ok=True)
save_tiff_with_metadata(image, os.path.join(out_dir, base_name + ".tif"))
with open(os.path.join(out_dir, base_name + ".txt"), "w", encoding="utf-8") as f:
f.write(normalize_label(text))
# =============================================================================
# RTL LINE COMPOSITION
# =============================================================================
def compose_line(words, standard_scale, cfg):
"""Compose words right-to-left on a white canvas with baseline alignment"""
target_text_h = int(round(cfg.img_height * cfg.text_height_ratio))
target_baseline_y = int(round(cfg.img_height * cfg.baseline_ratio))
actual_scale = (calculate_scale_for_exact_words(words, cfg)
if len(words) == 8 else standard_scale)
def process_words(scale):
result = []
for wp in words:
img = Image.open(wp["path"])
rgba, bl, _ = build_word_cutout_with_baseline(img)
rgba_s, bl_s = scale_word_to_text_height(rgba, bl, target_text_h)
rgba_f, bl_f = apply_uniform_scale(rgba_s, bl_s, scale)
result.append(dict(img=rgba_f, baseline=bl_f, label=normalize_label(wp["label"])))
return result
processed = process_words(actual_scale)
word_widths = [p["img"].size[0] for p in processed]
gaps = [int(random.randint(*cfg.spacing_range) * actual_scale) for _ in range(max(0, len(processed) - 1))]
content_w = sum(word_widths) + sum(gaps)
usable = cfg.img_width - cfg.left_margin - cfg.right_margin
if content_w > usable:
actual_scale *= (usable / content_w) * 0.92
processed = process_words(actual_scale)
word_widths = [p["img"].size[0] for p in processed]
gaps = [int(random.randint(*cfg.spacing_range) * actual_scale) for _ in range(max(0, len(processed) - 1))]
content_w = sum(word_widths) + sum(gaps)
canvas = Image.new("RGB", (cfg.img_width, cfg.img_height), color=(255, 255, 255))
usable_right = cfg.img_width - cfg.right_margin
offset_x = max(cfg.left_margin, usable_right - content_w)
ordered = list(reversed(processed))
gaps_ordered = list(reversed(gaps)) if gaps else []
x = offset_x
for idx, p in enumerate(ordered):
w, h = p["img"].size
x = min(x, cfg.img_width - cfg.right_margin - w)
jitter = random.randint(-cfg.baseline_jitter, cfg.baseline_jitter)
y = max(0, min(target_baseline_y + jitter - p["baseline"], cfg.img_height - h))
canvas.paste(p["img"], (x, y), p["img"])
x += w
if idx < len(ordered) - 1 and idx < len(gaps_ordered):
x += gaps_ordered[idx]
x = min(x, cfg.img_width - cfg.right_margin)
return canvas, " ".join(p["label"] for p in processed)
# =============================================================================
# PROCESS ONE BIG GROUP
# =============================================================================
def process_big_group(group_range, group_data, out_dir, standard_scale, cfg, logger):
all_writers = sorted(group_data["writers"])
if not all_writers:
return 0
logger.subsection(f"Big Group {group_range[0]}-{group_range[1]} ({len(all_writers)} writers)")
# Build tagged word pool per writer
writer_tagged_pool = {}
for wid in all_writers:
pool = {}
for wn, rec in group_data["pools"][wid]["unique"].items():
pool[("u", wn)] = rec
sg = get_person_name_subgroup(wid, cfg.person_group_size)
for wn, rec in group_data["pools"][wid]["person_names"].items():
pool[("p", sg[0], wn)] = rec
writer_tagged_pool[wid] = pool
unique_tags = sorted([("u", wn) for wn in group_data["unique_word_nums"]])
sg_pn_tags = {}
for sg_range, sg_info in sorted(group_data["subgroups"].items()):
sg_pn_tags[sg_range] = sorted([("p", sg_range[0], wn) for wn in sg_info["pn_nums"]])
subgroup_list = sorted(group_data["subgroups"].keys())
# Estimate recipe count
total_samples = sum(len(p) for p in writer_tagged_pool.values())
recipe_attempts = max(1, total_samples // 6) * 2
logger.log(f" Unique tags: {len(unique_tags)} | Sub-groups: {len(subgroup_list)} | "
f"Samples: {total_samples} | Attempts: {recipe_attempts}")
# Generate recipes with signature-based uniqueness
used_signatures = set()
recipes = []
for _ in range(recipe_attempts):
length = sample_line_length()
include_pn = (random.random() < 0.40) and bool(subgroup_list)
if include_pn:
sg = random.choice(subgroup_list)
pn_tags = sg_pn_tags.get(sg, [])
if pn_tags and len(unique_tags) >= 1:
max_pn = min(3, len(pn_tags), length - 1)
if max_pn >= 1:
n_pn = random.randint(1, max_pn)
n_u = length - n_pn
if n_u > len(unique_tags):
n_u = len(unique_tags)
n_pn = length - n_u
if n_u >= 1 and 1 <= n_pn <= len(pn_tags):
sampled = random.sample(unique_tags, n_u) + random.sample(pn_tags, n_pn)
sig = tuple(sorted(sampled))
if sig not in used_signatures:
recipes.append(dict(tags=sampled, signature=sig, type="mixed", subgroup=sg))
used_signatures.add(sig)
continue
if len(unique_tags) >= length:
sampled = random.sample(unique_tags, length)
sig = tuple(sorted(sampled))
if sig not in used_signatures:
recipes.append(dict(tags=sampled, signature=sig, type="pure", subgroup=None))
used_signatures.add(sig)
# Assign recipes: mixed first, then pure
mixed = [r for r in recipes if r["type"] == "mixed"]
pure = [r for r in recipes if r["type"] == "pure"]
random.shuffle(mixed)
random.shuffle(pure)
writer_used = defaultdict(set)
writer_counter = defaultdict(int)
lines_created = 0
for recipe in mixed + pure:
eligible = (sorted(group_data["subgroups"].get(recipe["subgroup"], {}).get("writers", set()))
if recipe["type"] == "mixed" else all_writers)
for wid in eligible:
pool = writer_tagged_pool.get(wid, {})
tags = recipe["tags"]
if all(t in pool for t in tags) and all(t not in writer_used[wid] for t in tags):
word_records = [pool[t] for t in tags]
img, text = compose_line(word_records, standard_scale, cfg)
writer_counter[wid] += 1
base_name = f"DNDK{wid:05d}_6_{writer_counter[wid]}"
save_pair(out_dir, base_name, img, text)
for t in tags:
writer_used[wid].add(t)
lines_created += 1
if lines_created % 50 == 0:
logger.log(f" ... {lines_created} lines created")
break
total_used = sum(len(u) for u in writer_used.values())
pct = (total_used / total_samples * 100) if total_samples else 0
logger.log(f" Result: {lines_created} lines | {total_used}/{total_samples} words ({pct:.1f}%)")
return lines_created
# =============================================================================
# MAIN GENERATOR
# =============================================================================
def generate(args):
cfg = Config(args)
random.seed(args.seed)
np.random.seed(args.seed)
writer_files = {
"Training": args.training_writers,
"Validation": args.validation_writers,
"Testing": args.testing_writers,
}
os.makedirs(args.output_dir, exist_ok=True)
logger = Logger(os.path.join(args.output_dir, "generation_log.txt"))
logger.section("CONFIGURATION")
logger.log(f" Canvas: {cfg.img_width} x {cfg.img_height}")
logger.log(f" Unique-word group: {cfg.unique_group_size} writers")
logger.log(f" Person-name group: {cfg.person_group_size} writers")
logger.log(f" Line lengths: 4-8 (50%->8, 25%->7, 25%->4|5|6)")
logger.log(f" Person-name mix: ~40% of recipes")
logger.log(f" Seed: {args.seed}")
overall = {}
for out_subset in ["Training", "Validation", "Testing"]:
logger.section(f"{out_subset.upper()} SUBSET")
wf = writer_files.get(out_subset)
if wf and os.path.isfile(wf):
allowed_writers = load_writer_names_from_file(wf)
logger.log(f" Writers: {len(allowed_writers)} from {wf}")
if not allowed_writers:
continue
else:
allowed_writers = None
logger.log(f" No writer file — using ALL writers")
unique_data, u_total, u_counts = load_word_pool_all_subsets(
args.unique_words_dir, "unique", allowed_writers, cfg.source_subsets)
person_data, p_total, p_counts = load_word_pool_all_subsets(
args.person_names_dir, "person_name", allowed_writers, cfg.source_subsets)
logger.log(f" Unique words: {u_total} from {len(unique_data)} writers")
logger.log(f" Person names: {p_total} from {len(person_data)} writers")
if not unique_data and not person_data:
continue
big_groups = build_big_groups(unique_data, person_data, cfg)
groups_sorted = sorted(big_groups.keys())
if args.max_groups:
groups_sorted = groups_sorted[:args.max_groups]
all_records = []
for gdata in big_groups.values():
for pools in gdata["pools"].values():
all_records.extend(pools["unique"].values())
all_records.extend(pools["person_names"].values())
if not all_records:
continue
standard_scale = calculate_standard_scale_factor(all_records, cfg)
subset_out = os.path.join(args.output_dir, out_subset)
total_lines = 0
for gi, g_range in enumerate(groups_sorted, 1):
n = process_big_group(g_range, big_groups[g_range], subset_out, standard_scale, cfg, logger)
total_lines += n
overall[out_subset] = total_lines
logger.log(f"\n {out_subset} total: {total_lines} lines")
logger.section("SUMMARY")
for subset, count in overall.items():
logger.log(f" {subset}: {count} lines")
logger.log(f" TOTAL: {sum(overall.values())} lines")
logger.close()
print(f"\nDone! {sum(overall.values())} lines generated.")
# =============================================================================
# ENTRY POINT
# =============================================================================
if __name__ == "__main__":
generate(parse_args())