Upload Scripts/synthetic_line_generator.py with huggingface_hub
Browse files
Scripts/synthetic_line_generator.py
ADDED
|
@@ -0,0 +1,670 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Constrained Recipe-Based Synthetic Handwritten Line Generation
|
| 3 |
+
|
| 4 |
+
Generates synthetic text lines by concatenating real handwritten word images
|
| 5 |
+
with guaranteed text uniqueness, single-writer consistency, and leakage-free
|
| 6 |
+
data partitioning.
|
| 7 |
+
|
| 8 |
+
Usage:
|
| 9 |
+
python synthetic_line_generator.py \
|
| 10 |
+
--unique_words_dir ./data/Unique-Words \
|
| 11 |
+
--person_names_dir ./data/Person-Names \
|
| 12 |
+
--output_dir ./data/Synthetic-Lines \
|
| 13 |
+
--training_writers ./writers/Training.txt \
|
| 14 |
+
--validation_writers ./writers/Validation.txt \
|
| 15 |
+
--testing_writers ./writers/Testing.txt
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import glob
|
| 20 |
+
import random
|
| 21 |
+
import argparse
|
| 22 |
+
import re
|
| 23 |
+
import numpy as np
|
| 24 |
+
from collections import defaultdict
|
| 25 |
+
from datetime import datetime
|
| 26 |
+
from PIL import Image, TiffImagePlugin
|
| 27 |
+
|
| 28 |
+
# =============================================================================
|
| 29 |
+
# ARGUMENT PARSER
|
| 30 |
+
# =============================================================================
|
| 31 |
+
|
| 32 |
+
def parse_args():
|
| 33 |
+
parser = argparse.ArgumentParser(
|
| 34 |
+
description="Constrained Recipe-Based Synthetic Handwritten Line Generation")
|
| 35 |
+
|
| 36 |
+
# Data paths
|
| 37 |
+
parser.add_argument("--unique_words_dir", type=str, required=True,
|
| 38 |
+
help="Root directory of unique word samples (with Training/Validation/Testing subfolders)")
|
| 39 |
+
parser.add_argument("--person_names_dir", type=str, required=True,
|
| 40 |
+
help="Root directory of person name samples (with Training/Validation/Testing subfolders)")
|
| 41 |
+
parser.add_argument("--output_dir", type=str, required=True,
|
| 42 |
+
help="Output directory for generated synthetic lines")
|
| 43 |
+
|
| 44 |
+
# Writer files per subset
|
| 45 |
+
parser.add_argument("--training_writers", type=str, default=None,
|
| 46 |
+
help="Text file listing training writer IDs (one per line)")
|
| 47 |
+
parser.add_argument("--validation_writers", type=str, default=None,
|
| 48 |
+
help="Text file listing validation writer IDs (one per line)")
|
| 49 |
+
parser.add_argument("--testing_writers", type=str, default=None,
|
| 50 |
+
help="Text file listing testing writer IDs (one per line)")
|
| 51 |
+
|
| 52 |
+
# Canvas and composition parameters
|
| 53 |
+
parser.add_argument("--img_height", type=int, default=155)
|
| 54 |
+
parser.add_argument("--img_width", type=int, default=2470)
|
| 55 |
+
parser.add_argument("--baseline_ratio", type=float, default=0.75)
|
| 56 |
+
parser.add_argument("--text_height_ratio", type=float, default=0.88)
|
| 57 |
+
parser.add_argument("--spacing_min", type=int, default=10)
|
| 58 |
+
parser.add_argument("--spacing_max", type=int, default=30)
|
| 59 |
+
parser.add_argument("--baseline_jitter", type=int, default=1)
|
| 60 |
+
parser.add_argument("--left_margin", type=int, default=8)
|
| 61 |
+
parser.add_argument("--right_margin", type=int, default=8)
|
| 62 |
+
|
| 63 |
+
# Grouping parameters
|
| 64 |
+
parser.add_argument("--unique_group_size", type=int, default=20,
|
| 65 |
+
help="Number of writers sharing the same unique words")
|
| 66 |
+
parser.add_argument("--person_group_size", type=int, default=5,
|
| 67 |
+
help="Number of writers sharing the same person names")
|
| 68 |
+
|
| 69 |
+
# Generation parameters
|
| 70 |
+
parser.add_argument("--max_groups", type=int, default=None,
|
| 71 |
+
help="Process only first N groups (for testing)")
|
| 72 |
+
parser.add_argument("--seed", type=int, default=42)
|
| 73 |
+
|
| 74 |
+
return parser.parse_args()
|
| 75 |
+
|
| 76 |
+
# =============================================================================
|
| 77 |
+
# CONFIGURATION (set from args)
|
| 78 |
+
# =============================================================================
|
| 79 |
+
|
| 80 |
+
class Config:
|
| 81 |
+
"""Holds all generation parameters"""
|
| 82 |
+
def __init__(self, args):
|
| 83 |
+
self.img_height = args.img_height
|
| 84 |
+
self.img_width = args.img_width
|
| 85 |
+
self.baseline_ratio = args.baseline_ratio
|
| 86 |
+
self.text_height_ratio = args.text_height_ratio
|
| 87 |
+
self.spacing_range = (args.spacing_min, args.spacing_max)
|
| 88 |
+
self.baseline_jitter = args.baseline_jitter
|
| 89 |
+
self.left_margin = args.left_margin
|
| 90 |
+
self.right_margin = args.right_margin
|
| 91 |
+
self.unique_group_size = args.unique_group_size
|
| 92 |
+
self.person_group_size = args.person_group_size
|
| 93 |
+
self.max_words_for_scaling = 8
|
| 94 |
+
self.source_subsets = ["Training", "Validation", "Testing"]
|
| 95 |
+
|
| 96 |
+
TiffImagePlugin.WRITE_LIBTIFF = True
|
| 97 |
+
|
| 98 |
+
# =============================================================================
|
| 99 |
+
# LOGGER
|
| 100 |
+
# =============================================================================
|
| 101 |
+
|
| 102 |
+
class Logger:
|
| 103 |
+
def __init__(self, log_path):
|
| 104 |
+
os.makedirs(os.path.dirname(log_path) if os.path.dirname(log_path) else ".", exist_ok=True)
|
| 105 |
+
self.f = open(log_path, "w", encoding="utf-8")
|
| 106 |
+
self.f.write(f"{'=' * 80}\nSYNTHETIC LINE GENERATION LOG\n"
|
| 107 |
+
f"Generated: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n{'=' * 80}\n\n")
|
| 108 |
+
self.f.flush()
|
| 109 |
+
|
| 110 |
+
def log(self, message, console=True):
|
| 111 |
+
self.f.write(message + "\n")
|
| 112 |
+
self.f.flush()
|
| 113 |
+
if console:
|
| 114 |
+
print(message)
|
| 115 |
+
|
| 116 |
+
def section(self, title):
|
| 117 |
+
self.log(f"\n{'=' * 60}\n{title}\n{'=' * 60}")
|
| 118 |
+
|
| 119 |
+
def subsection(self, title):
|
| 120 |
+
self.log(f"\n{'-' * 40}\n{title}\n{'-' * 40}")
|
| 121 |
+
|
| 122 |
+
def log_line_detail(self, filename, writer_id, recipe_type, subgroup, words_info, text):
|
| 123 |
+
self.log(f" {filename}.tif", console=False)
|
| 124 |
+
self.log(f" Writer: DNDK{writer_id:05d}", console=False)
|
| 125 |
+
self.log(f" Type: {recipe_type}"
|
| 126 |
+
+ (f" (sub-group {subgroup[0]}-{subgroup[1]})" if subgroup else ""), console=False)
|
| 127 |
+
self.log(f" Words: {words_info}", console=False)
|
| 128 |
+
self.log(f" Text: {text}", console=False)
|
| 129 |
+
|
| 130 |
+
def close(self):
|
| 131 |
+
self.f.close()
|
| 132 |
+
|
| 133 |
+
# =============================================================================
|
| 134 |
+
# TEXT NORMALIZATION
|
| 135 |
+
# =============================================================================
|
| 136 |
+
|
| 137 |
+
def normalize_label(text):
|
| 138 |
+
if text is None:
|
| 139 |
+
return ""
|
| 140 |
+
text = text.replace("\u00A0", " ").replace("\r", " ").replace("\n", " ")
|
| 141 |
+
return " ".join(text.strip().split())
|
| 142 |
+
|
| 143 |
+
# =============================================================================
|
| 144 |
+
# OTSU THRESHOLD
|
| 145 |
+
# =============================================================================
|
| 146 |
+
|
| 147 |
+
def otsu_threshold(gray_uint8):
|
| 148 |
+
hist = np.bincount(gray_uint8.ravel(), minlength=256).astype(np.float64)
|
| 149 |
+
total = gray_uint8.size
|
| 150 |
+
sum_total = np.dot(np.arange(256), hist)
|
| 151 |
+
sum_b, w_b, max_var, threshold = 0.0, 0.0, 0.0, 127
|
| 152 |
+
for t in range(256):
|
| 153 |
+
w_b += hist[t]
|
| 154 |
+
if w_b == 0:
|
| 155 |
+
continue
|
| 156 |
+
w_f = total - w_b
|
| 157 |
+
if w_f == 0:
|
| 158 |
+
break
|
| 159 |
+
sum_b += t * hist[t]
|
| 160 |
+
m_b = sum_b / w_b
|
| 161 |
+
m_f = (sum_total - sum_b) / w_f
|
| 162 |
+
var_between = w_b * w_f * (m_b - m_f) ** 2
|
| 163 |
+
if var_between > max_var:
|
| 164 |
+
max_var = var_between
|
| 165 |
+
threshold = t
|
| 166 |
+
return threshold
|
| 167 |
+
|
| 168 |
+
# =============================================================================
|
| 169 |
+
# INK EXTRACTION WITH DIACRITICAL PRESERVATION
|
| 170 |
+
# =============================================================================
|
| 171 |
+
|
| 172 |
+
def build_word_cutout_with_baseline(img_pil):
|
| 173 |
+
"""Extract ink region with adaptive Otsu threshold (+20 for diacritical preservation)"""
|
| 174 |
+
img_rgb = img_pil.convert("RGB")
|
| 175 |
+
gray = np.array(img_rgb.convert("L"))
|
| 176 |
+
thr = otsu_threshold(gray)
|
| 177 |
+
|
| 178 |
+
thr_adjusted = min(thr + 20, 250)
|
| 179 |
+
ink = gray < thr_adjusted
|
| 180 |
+
if ink.mean() < 0.001 or ink.mean() > 0.8:
|
| 181 |
+
ink = gray > max(thr - 20, 5)
|
| 182 |
+
if ink.mean() < 0.001:
|
| 183 |
+
ink = gray < 250
|
| 184 |
+
|
| 185 |
+
rows = np.where(ink.any(axis=1))[0]
|
| 186 |
+
cols = np.where(ink.any(axis=0))[0]
|
| 187 |
+
if len(rows) == 0 or len(cols) == 0:
|
| 188 |
+
h, w = gray.shape
|
| 189 |
+
alpha = Image.new("L", (w, h), 0)
|
| 190 |
+
crop = img_rgb.crop((0, 0, w, h)).convert("RGBA")
|
| 191 |
+
crop.putalpha(alpha)
|
| 192 |
+
return crop, int(h * 0.8), (0, 0, w, h)
|
| 193 |
+
|
| 194 |
+
top, bottom = rows[0], rows[-1]
|
| 195 |
+
left, right = cols[0], cols[-1]
|
| 196 |
+
bbox = (left, top, right + 1, bottom + 1)
|
| 197 |
+
ink_crop = ink[top:bottom + 1, left:right + 1]
|
| 198 |
+
h, w = ink_crop.shape
|
| 199 |
+
|
| 200 |
+
bottoms = np.full(w, np.nan)
|
| 201 |
+
for x in range(w):
|
| 202 |
+
ys = np.where(ink_crop[:, x])[0]
|
| 203 |
+
if ys.size > 0:
|
| 204 |
+
bottoms[x] = ys[-1]
|
| 205 |
+
baseline = int(np.nanmedian(bottoms)) if np.isfinite(bottoms).any() else int(h * 0.8)
|
| 206 |
+
|
| 207 |
+
alpha = Image.fromarray((ink_crop.astype(np.uint8)) * 255, mode="L")
|
| 208 |
+
crop = img_rgb.crop(bbox).convert("RGBA")
|
| 209 |
+
crop.putalpha(alpha)
|
| 210 |
+
return crop, baseline, bbox
|
| 211 |
+
|
| 212 |
+
# =============================================================================
|
| 213 |
+
# SCALING HELPERS
|
| 214 |
+
# =============================================================================
|
| 215 |
+
|
| 216 |
+
def scale_word_to_text_height(word_rgba, baseline, target_h):
|
| 217 |
+
w, h = word_rgba.size
|
| 218 |
+
if h <= 0:
|
| 219 |
+
return word_rgba, baseline
|
| 220 |
+
s = target_h / float(h)
|
| 221 |
+
return (word_rgba.resize((max(1, int(round(w * s))), max(1, int(round(h * s)))),
|
| 222 |
+
Image.LANCZOS), int(round(baseline * s)))
|
| 223 |
+
|
| 224 |
+
def apply_uniform_scale(word_rgba, baseline, factor):
|
| 225 |
+
w, h = word_rgba.size
|
| 226 |
+
return (word_rgba.resize((max(1, int(round(w * factor))), max(1, int(round(h * factor)))),
|
| 227 |
+
Image.LANCZOS), int(round(baseline * factor)))
|
| 228 |
+
|
| 229 |
+
def calculate_scale_for_exact_words(words, cfg):
|
| 230 |
+
target_h = int(round(cfg.img_height * cfg.text_height_ratio))
|
| 231 |
+
usable = cfg.img_width - cfg.left_margin - cfg.right_margin
|
| 232 |
+
total_w = 0
|
| 233 |
+
for wp in words:
|
| 234 |
+
try:
|
| 235 |
+
img = Image.open(wp["path"])
|
| 236 |
+
rgba, bl, _ = build_word_cutout_with_baseline(img)
|
| 237 |
+
rgba_s, _ = scale_word_to_text_height(rgba, bl, target_h)
|
| 238 |
+
total_w += rgba_s.size[0]
|
| 239 |
+
except Exception:
|
| 240 |
+
return 1.0
|
| 241 |
+
total_w += np.mean(cfg.spacing_range) * (len(words) - 1)
|
| 242 |
+
return usable / total_w if total_w > 0 else 1.0
|
| 243 |
+
|
| 244 |
+
def calculate_standard_scale_factor(word_samples, cfg):
|
| 245 |
+
target_h = int(round(cfg.img_height * cfg.text_height_ratio))
|
| 246 |
+
usable = cfg.img_width - cfg.left_margin - cfg.right_margin
|
| 247 |
+
n = min(50, len(word_samples))
|
| 248 |
+
if n == 0:
|
| 249 |
+
return 1.0
|
| 250 |
+
widths = []
|
| 251 |
+
for wp in random.sample(word_samples, n):
|
| 252 |
+
try:
|
| 253 |
+
img = Image.open(wp["path"])
|
| 254 |
+
rgba, bl, _ = build_word_cutout_with_baseline(img)
|
| 255 |
+
rgba_s, _ = scale_word_to_text_height(rgba, bl, target_h)
|
| 256 |
+
widths.append(rgba_s.size[0])
|
| 257 |
+
except Exception:
|
| 258 |
+
continue
|
| 259 |
+
if not widths:
|
| 260 |
+
return 1.0
|
| 261 |
+
est = np.mean(widths) * cfg.max_words_for_scaling + np.mean(cfg.spacing_range) * (cfg.max_words_for_scaling - 1)
|
| 262 |
+
sf = (usable / est) if est > usable else 1.0
|
| 263 |
+
return sf * 0.95
|
| 264 |
+
|
| 265 |
+
# =============================================================================
|
| 266 |
+
# FILENAME PARSING AND GROUPING
|
| 267 |
+
# =============================================================================
|
| 268 |
+
|
| 269 |
+
def parse_writer_id(filename):
|
| 270 |
+
m = re.search(r"DNDK(\d+)_", filename)
|
| 271 |
+
return int(m.group(1)) if m else None
|
| 272 |
+
|
| 273 |
+
def parse_word_number(filename):
|
| 274 |
+
m = re.search(r"_(\d+)_(\d+)\.", filename)
|
| 275 |
+
return int(m.group(2)) if m else None
|
| 276 |
+
|
| 277 |
+
def get_unique_word_group(writer_id, group_size):
|
| 278 |
+
g = (writer_id - 1) // group_size
|
| 279 |
+
start = g * group_size + 1
|
| 280 |
+
return (start, start + group_size - 1)
|
| 281 |
+
|
| 282 |
+
def get_person_name_subgroup(writer_id, group_size):
|
| 283 |
+
g = (writer_id - 1) // group_size
|
| 284 |
+
start = g * group_size + 1
|
| 285 |
+
return (start, start + group_size - 1)
|
| 286 |
+
|
| 287 |
+
# =============================================================================
|
| 288 |
+
# LINE LENGTH SAMPLING (P(k) distribution)
|
| 289 |
+
# =============================================================================
|
| 290 |
+
|
| 291 |
+
def sample_line_length():
|
| 292 |
+
r = random.random()
|
| 293 |
+
if r < 0.25:
|
| 294 |
+
return 7
|
| 295 |
+
elif r < 0.75:
|
| 296 |
+
return 8
|
| 297 |
+
else:
|
| 298 |
+
return random.choice([4, 5, 6])
|
| 299 |
+
|
| 300 |
+
# =============================================================================
|
| 301 |
+
# WRITER FILE LOADING
|
| 302 |
+
# =============================================================================
|
| 303 |
+
|
| 304 |
+
def load_writer_names_from_file(filepath):
|
| 305 |
+
writers = set()
|
| 306 |
+
if filepath is None or not os.path.isfile(filepath):
|
| 307 |
+
return writers
|
| 308 |
+
with open(filepath, "r", encoding="utf-8") as f:
|
| 309 |
+
for line in f:
|
| 310 |
+
line = line.strip()
|
| 311 |
+
if not line:
|
| 312 |
+
continue
|
| 313 |
+
m = re.search(r"DNDK(\d+)", line)
|
| 314 |
+
if m:
|
| 315 |
+
writers.add(int(m.group(1)))
|
| 316 |
+
else:
|
| 317 |
+
try:
|
| 318 |
+
writers.add(int(line))
|
| 319 |
+
except ValueError:
|
| 320 |
+
pass
|
| 321 |
+
return writers
|
| 322 |
+
|
| 323 |
+
# =============================================================================
|
| 324 |
+
# WORD POOL LOADING (cross-subset search)
|
| 325 |
+
# =============================================================================
|
| 326 |
+
|
| 327 |
+
def load_word_pool_all_subsets(root_dir, source_tag, allowed_writers, source_subsets,
|
| 328 |
+
allowed_exts=(".png", ".jpg", ".jpeg", ".bmp", ".tif", ".tiff")):
|
| 329 |
+
"""Search all source subfolders for word samples belonging to allowed writers"""
|
| 330 |
+
if allowed_writers is not None and len(allowed_writers) == 0:
|
| 331 |
+
return {}, 0, {}
|
| 332 |
+
|
| 333 |
+
writer_words = defaultdict(dict)
|
| 334 |
+
total_loaded = 0
|
| 335 |
+
subset_counts = {}
|
| 336 |
+
|
| 337 |
+
for src_subset in source_subsets:
|
| 338 |
+
subset_dir = os.path.join(root_dir, src_subset)
|
| 339 |
+
if not os.path.exists(subset_dir):
|
| 340 |
+
continue
|
| 341 |
+
loaded = 0
|
| 342 |
+
for ext in allowed_exts:
|
| 343 |
+
for path in glob.glob(os.path.join(subset_dir, f"*{ext}")):
|
| 344 |
+
fn = os.path.basename(path)
|
| 345 |
+
txt_path = os.path.splitext(path)[0] + ".txt"
|
| 346 |
+
if not os.path.isfile(txt_path):
|
| 347 |
+
continue
|
| 348 |
+
wid = parse_writer_id(fn)
|
| 349 |
+
if wid is None:
|
| 350 |
+
continue
|
| 351 |
+
if allowed_writers is not None and wid not in allowed_writers:
|
| 352 |
+
continue
|
| 353 |
+
wn = parse_word_number(fn)
|
| 354 |
+
if wn is None:
|
| 355 |
+
continue
|
| 356 |
+
if wn in writer_words[wid]:
|
| 357 |
+
continue
|
| 358 |
+
try:
|
| 359 |
+
with open(txt_path, "r", encoding="utf-8") as f:
|
| 360 |
+
lbl = normalize_label(f.read())
|
| 361 |
+
except UnicodeDecodeError:
|
| 362 |
+
with open(txt_path, "r", encoding="utf-8-sig") as f:
|
| 363 |
+
lbl = normalize_label(f.read())
|
| 364 |
+
|
| 365 |
+
writer_words[wid][wn] = dict(
|
| 366 |
+
path=path, label=lbl, writer=f"DNDK{wid:05d}",
|
| 367 |
+
writer_id=wid, word_num=wn, source=source_tag, found_in=src_subset)
|
| 368 |
+
loaded += 1
|
| 369 |
+
|
| 370 |
+
subset_counts[src_subset] = loaded
|
| 371 |
+
total_loaded += loaded
|
| 372 |
+
|
| 373 |
+
return dict(writer_words), total_loaded, subset_counts
|
| 374 |
+
|
| 375 |
+
# =============================================================================
|
| 376 |
+
# BUILD HIERARCHICAL GROUPS
|
| 377 |
+
# =============================================================================
|
| 378 |
+
|
| 379 |
+
def build_big_groups(unique_data, person_data, cfg):
|
| 380 |
+
all_writers = set(unique_data.keys()) | set(person_data.keys())
|
| 381 |
+
groups = {}
|
| 382 |
+
for wid in all_writers:
|
| 383 |
+
bg = get_unique_word_group(wid, cfg.unique_group_size)
|
| 384 |
+
if bg not in groups:
|
| 385 |
+
groups[bg] = dict(writers=set(), unique_word_nums=set(), subgroups={}, pools={})
|
| 386 |
+
g = groups[bg]
|
| 387 |
+
g["writers"].add(wid)
|
| 388 |
+
g["pools"][wid] = dict(unique=unique_data.get(wid, {}), person_names=person_data.get(wid, {}))
|
| 389 |
+
if wid in unique_data:
|
| 390 |
+
g["unique_word_nums"].update(unique_data[wid].keys())
|
| 391 |
+
if wid in person_data:
|
| 392 |
+
sg = get_person_name_subgroup(wid, cfg.person_group_size)
|
| 393 |
+
if sg not in g["subgroups"]:
|
| 394 |
+
g["subgroups"][sg] = dict(writers=set(), pn_nums=set())
|
| 395 |
+
g["subgroups"][sg]["writers"].add(wid)
|
| 396 |
+
g["subgroups"][sg]["pn_nums"].update(person_data[wid].keys())
|
| 397 |
+
return groups
|
| 398 |
+
|
| 399 |
+
# =============================================================================
|
| 400 |
+
# SAVE HELPERS
|
| 401 |
+
# =============================================================================
|
| 402 |
+
|
| 403 |
+
def save_tiff_with_metadata(image, save_path):
|
| 404 |
+
if image.mode != "RGB":
|
| 405 |
+
image = image.convert("RGB")
|
| 406 |
+
info = TiffImagePlugin.ImageFileDirectory_v2()
|
| 407 |
+
info[317] = 2
|
| 408 |
+
image.save(save_path, format="TIFF", compression="tiff_lzw", dpi=(300, 300), tiffinfo=info)
|
| 409 |
+
|
| 410 |
+
def save_pair(out_dir, base_name, image, text):
|
| 411 |
+
os.makedirs(out_dir, exist_ok=True)
|
| 412 |
+
save_tiff_with_metadata(image, os.path.join(out_dir, base_name + ".tif"))
|
| 413 |
+
with open(os.path.join(out_dir, base_name + ".txt"), "w", encoding="utf-8") as f:
|
| 414 |
+
f.write(normalize_label(text))
|
| 415 |
+
|
| 416 |
+
# =============================================================================
|
| 417 |
+
# RTL LINE COMPOSITION
|
| 418 |
+
# =============================================================================
|
| 419 |
+
|
| 420 |
+
def compose_line(words, standard_scale, cfg):
|
| 421 |
+
"""Compose words right-to-left on a white canvas with baseline alignment"""
|
| 422 |
+
target_text_h = int(round(cfg.img_height * cfg.text_height_ratio))
|
| 423 |
+
target_baseline_y = int(round(cfg.img_height * cfg.baseline_ratio))
|
| 424 |
+
|
| 425 |
+
actual_scale = (calculate_scale_for_exact_words(words, cfg)
|
| 426 |
+
if len(words) == 8 else standard_scale)
|
| 427 |
+
|
| 428 |
+
def process_words(scale):
|
| 429 |
+
result = []
|
| 430 |
+
for wp in words:
|
| 431 |
+
img = Image.open(wp["path"])
|
| 432 |
+
rgba, bl, _ = build_word_cutout_with_baseline(img)
|
| 433 |
+
rgba_s, bl_s = scale_word_to_text_height(rgba, bl, target_text_h)
|
| 434 |
+
rgba_f, bl_f = apply_uniform_scale(rgba_s, bl_s, scale)
|
| 435 |
+
result.append(dict(img=rgba_f, baseline=bl_f, label=normalize_label(wp["label"])))
|
| 436 |
+
return result
|
| 437 |
+
|
| 438 |
+
processed = process_words(actual_scale)
|
| 439 |
+
word_widths = [p["img"].size[0] for p in processed]
|
| 440 |
+
gaps = [int(random.randint(*cfg.spacing_range) * actual_scale) for _ in range(max(0, len(processed) - 1))]
|
| 441 |
+
content_w = sum(word_widths) + sum(gaps)
|
| 442 |
+
usable = cfg.img_width - cfg.left_margin - cfg.right_margin
|
| 443 |
+
|
| 444 |
+
if content_w > usable:
|
| 445 |
+
actual_scale *= (usable / content_w) * 0.92
|
| 446 |
+
processed = process_words(actual_scale)
|
| 447 |
+
word_widths = [p["img"].size[0] for p in processed]
|
| 448 |
+
gaps = [int(random.randint(*cfg.spacing_range) * actual_scale) for _ in range(max(0, len(processed) - 1))]
|
| 449 |
+
content_w = sum(word_widths) + sum(gaps)
|
| 450 |
+
|
| 451 |
+
canvas = Image.new("RGB", (cfg.img_width, cfg.img_height), color=(255, 255, 255))
|
| 452 |
+
usable_right = cfg.img_width - cfg.right_margin
|
| 453 |
+
offset_x = max(cfg.left_margin, usable_right - content_w)
|
| 454 |
+
|
| 455 |
+
ordered = list(reversed(processed))
|
| 456 |
+
gaps_ordered = list(reversed(gaps)) if gaps else []
|
| 457 |
+
|
| 458 |
+
x = offset_x
|
| 459 |
+
for idx, p in enumerate(ordered):
|
| 460 |
+
w, h = p["img"].size
|
| 461 |
+
x = min(x, cfg.img_width - cfg.right_margin - w)
|
| 462 |
+
jitter = random.randint(-cfg.baseline_jitter, cfg.baseline_jitter)
|
| 463 |
+
y = max(0, min(target_baseline_y + jitter - p["baseline"], cfg.img_height - h))
|
| 464 |
+
canvas.paste(p["img"], (x, y), p["img"])
|
| 465 |
+
x += w
|
| 466 |
+
if idx < len(ordered) - 1 and idx < len(gaps_ordered):
|
| 467 |
+
x += gaps_ordered[idx]
|
| 468 |
+
x = min(x, cfg.img_width - cfg.right_margin)
|
| 469 |
+
|
| 470 |
+
return canvas, " ".join(p["label"] for p in processed)
|
| 471 |
+
|
| 472 |
+
# =============================================================================
|
| 473 |
+
# PROCESS ONE BIG GROUP
|
| 474 |
+
# =============================================================================
|
| 475 |
+
|
| 476 |
+
def process_big_group(group_range, group_data, out_dir, standard_scale, cfg, logger):
|
| 477 |
+
all_writers = sorted(group_data["writers"])
|
| 478 |
+
if not all_writers:
|
| 479 |
+
return 0
|
| 480 |
+
|
| 481 |
+
logger.subsection(f"Big Group {group_range[0]}-{group_range[1]} ({len(all_writers)} writers)")
|
| 482 |
+
|
| 483 |
+
# Build tagged word pool per writer
|
| 484 |
+
writer_tagged_pool = {}
|
| 485 |
+
for wid in all_writers:
|
| 486 |
+
pool = {}
|
| 487 |
+
for wn, rec in group_data["pools"][wid]["unique"].items():
|
| 488 |
+
pool[("u", wn)] = rec
|
| 489 |
+
sg = get_person_name_subgroup(wid, cfg.person_group_size)
|
| 490 |
+
for wn, rec in group_data["pools"][wid]["person_names"].items():
|
| 491 |
+
pool[("p", sg[0], wn)] = rec
|
| 492 |
+
writer_tagged_pool[wid] = pool
|
| 493 |
+
|
| 494 |
+
unique_tags = sorted([("u", wn) for wn in group_data["unique_word_nums"]])
|
| 495 |
+
sg_pn_tags = {}
|
| 496 |
+
for sg_range, sg_info in sorted(group_data["subgroups"].items()):
|
| 497 |
+
sg_pn_tags[sg_range] = sorted([("p", sg_range[0], wn) for wn in sg_info["pn_nums"]])
|
| 498 |
+
subgroup_list = sorted(group_data["subgroups"].keys())
|
| 499 |
+
|
| 500 |
+
# Estimate recipe count
|
| 501 |
+
total_samples = sum(len(p) for p in writer_tagged_pool.values())
|
| 502 |
+
recipe_attempts = max(1, total_samples // 6) * 2
|
| 503 |
+
|
| 504 |
+
logger.log(f" Unique tags: {len(unique_tags)} | Sub-groups: {len(subgroup_list)} | "
|
| 505 |
+
f"Samples: {total_samples} | Attempts: {recipe_attempts}")
|
| 506 |
+
|
| 507 |
+
# Generate recipes with signature-based uniqueness
|
| 508 |
+
used_signatures = set()
|
| 509 |
+
recipes = []
|
| 510 |
+
|
| 511 |
+
for _ in range(recipe_attempts):
|
| 512 |
+
length = sample_line_length()
|
| 513 |
+
include_pn = (random.random() < 0.40) and bool(subgroup_list)
|
| 514 |
+
|
| 515 |
+
if include_pn:
|
| 516 |
+
sg = random.choice(subgroup_list)
|
| 517 |
+
pn_tags = sg_pn_tags.get(sg, [])
|
| 518 |
+
if pn_tags and len(unique_tags) >= 1:
|
| 519 |
+
max_pn = min(3, len(pn_tags), length - 1)
|
| 520 |
+
if max_pn >= 1:
|
| 521 |
+
n_pn = random.randint(1, max_pn)
|
| 522 |
+
n_u = length - n_pn
|
| 523 |
+
if n_u > len(unique_tags):
|
| 524 |
+
n_u = len(unique_tags)
|
| 525 |
+
n_pn = length - n_u
|
| 526 |
+
if n_u >= 1 and 1 <= n_pn <= len(pn_tags):
|
| 527 |
+
sampled = random.sample(unique_tags, n_u) + random.sample(pn_tags, n_pn)
|
| 528 |
+
sig = tuple(sorted(sampled))
|
| 529 |
+
if sig not in used_signatures:
|
| 530 |
+
recipes.append(dict(tags=sampled, signature=sig, type="mixed", subgroup=sg))
|
| 531 |
+
used_signatures.add(sig)
|
| 532 |
+
continue
|
| 533 |
+
|
| 534 |
+
if len(unique_tags) >= length:
|
| 535 |
+
sampled = random.sample(unique_tags, length)
|
| 536 |
+
sig = tuple(sorted(sampled))
|
| 537 |
+
if sig not in used_signatures:
|
| 538 |
+
recipes.append(dict(tags=sampled, signature=sig, type="pure", subgroup=None))
|
| 539 |
+
used_signatures.add(sig)
|
| 540 |
+
|
| 541 |
+
# Assign recipes: mixed first, then pure
|
| 542 |
+
mixed = [r for r in recipes if r["type"] == "mixed"]
|
| 543 |
+
pure = [r for r in recipes if r["type"] == "pure"]
|
| 544 |
+
random.shuffle(mixed)
|
| 545 |
+
random.shuffle(pure)
|
| 546 |
+
|
| 547 |
+
writer_used = defaultdict(set)
|
| 548 |
+
writer_counter = defaultdict(int)
|
| 549 |
+
lines_created = 0
|
| 550 |
+
|
| 551 |
+
for recipe in mixed + pure:
|
| 552 |
+
eligible = (sorted(group_data["subgroups"].get(recipe["subgroup"], {}).get("writers", set()))
|
| 553 |
+
if recipe["type"] == "mixed" else all_writers)
|
| 554 |
+
|
| 555 |
+
for wid in eligible:
|
| 556 |
+
pool = writer_tagged_pool.get(wid, {})
|
| 557 |
+
tags = recipe["tags"]
|
| 558 |
+
if all(t in pool for t in tags) and all(t not in writer_used[wid] for t in tags):
|
| 559 |
+
word_records = [pool[t] for t in tags]
|
| 560 |
+
img, text = compose_line(word_records, standard_scale, cfg)
|
| 561 |
+
|
| 562 |
+
writer_counter[wid] += 1
|
| 563 |
+
base_name = f"DNDK{wid:05d}_6_{writer_counter[wid]}"
|
| 564 |
+
save_pair(out_dir, base_name, img, text)
|
| 565 |
+
|
| 566 |
+
for t in tags:
|
| 567 |
+
writer_used[wid].add(t)
|
| 568 |
+
|
| 569 |
+
lines_created += 1
|
| 570 |
+
if lines_created % 50 == 0:
|
| 571 |
+
logger.log(f" ... {lines_created} lines created")
|
| 572 |
+
break
|
| 573 |
+
|
| 574 |
+
total_used = sum(len(u) for u in writer_used.values())
|
| 575 |
+
pct = (total_used / total_samples * 100) if total_samples else 0
|
| 576 |
+
logger.log(f" Result: {lines_created} lines | {total_used}/{total_samples} words ({pct:.1f}%)")
|
| 577 |
+
|
| 578 |
+
return lines_created
|
| 579 |
+
|
| 580 |
+
# =============================================================================
|
| 581 |
+
# MAIN GENERATOR
|
| 582 |
+
# =============================================================================
|
| 583 |
+
|
| 584 |
+
def generate(args):
|
| 585 |
+
cfg = Config(args)
|
| 586 |
+
|
| 587 |
+
random.seed(args.seed)
|
| 588 |
+
np.random.seed(args.seed)
|
| 589 |
+
|
| 590 |
+
writer_files = {
|
| 591 |
+
"Training": args.training_writers,
|
| 592 |
+
"Validation": args.validation_writers,
|
| 593 |
+
"Testing": args.testing_writers,
|
| 594 |
+
}
|
| 595 |
+
|
| 596 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 597 |
+
logger = Logger(os.path.join(args.output_dir, "generation_log.txt"))
|
| 598 |
+
|
| 599 |
+
logger.section("CONFIGURATION")
|
| 600 |
+
logger.log(f" Canvas: {cfg.img_width} x {cfg.img_height}")
|
| 601 |
+
logger.log(f" Unique-word group: {cfg.unique_group_size} writers")
|
| 602 |
+
logger.log(f" Person-name group: {cfg.person_group_size} writers")
|
| 603 |
+
logger.log(f" Line lengths: 4-8 (50%->8, 25%->7, 25%->4|5|6)")
|
| 604 |
+
logger.log(f" Person-name mix: ~40% of recipes")
|
| 605 |
+
logger.log(f" Seed: {args.seed}")
|
| 606 |
+
|
| 607 |
+
overall = {}
|
| 608 |
+
for out_subset in ["Training", "Validation", "Testing"]:
|
| 609 |
+
logger.section(f"{out_subset.upper()} SUBSET")
|
| 610 |
+
|
| 611 |
+
wf = writer_files.get(out_subset)
|
| 612 |
+
if wf and os.path.isfile(wf):
|
| 613 |
+
allowed_writers = load_writer_names_from_file(wf)
|
| 614 |
+
logger.log(f" Writers: {len(allowed_writers)} from {wf}")
|
| 615 |
+
if not allowed_writers:
|
| 616 |
+
continue
|
| 617 |
+
else:
|
| 618 |
+
allowed_writers = None
|
| 619 |
+
logger.log(f" No writer file — using ALL writers")
|
| 620 |
+
|
| 621 |
+
unique_data, u_total, u_counts = load_word_pool_all_subsets(
|
| 622 |
+
args.unique_words_dir, "unique", allowed_writers, cfg.source_subsets)
|
| 623 |
+
person_data, p_total, p_counts = load_word_pool_all_subsets(
|
| 624 |
+
args.person_names_dir, "person_name", allowed_writers, cfg.source_subsets)
|
| 625 |
+
|
| 626 |
+
logger.log(f" Unique words: {u_total} from {len(unique_data)} writers")
|
| 627 |
+
logger.log(f" Person names: {p_total} from {len(person_data)} writers")
|
| 628 |
+
|
| 629 |
+
if not unique_data and not person_data:
|
| 630 |
+
continue
|
| 631 |
+
|
| 632 |
+
big_groups = build_big_groups(unique_data, person_data, cfg)
|
| 633 |
+
groups_sorted = sorted(big_groups.keys())
|
| 634 |
+
if args.max_groups:
|
| 635 |
+
groups_sorted = groups_sorted[:args.max_groups]
|
| 636 |
+
|
| 637 |
+
all_records = []
|
| 638 |
+
for gdata in big_groups.values():
|
| 639 |
+
for pools in gdata["pools"].values():
|
| 640 |
+
all_records.extend(pools["unique"].values())
|
| 641 |
+
all_records.extend(pools["person_names"].values())
|
| 642 |
+
|
| 643 |
+
if not all_records:
|
| 644 |
+
continue
|
| 645 |
+
|
| 646 |
+
standard_scale = calculate_standard_scale_factor(all_records, cfg)
|
| 647 |
+
subset_out = os.path.join(args.output_dir, out_subset)
|
| 648 |
+
|
| 649 |
+
total_lines = 0
|
| 650 |
+
for gi, g_range in enumerate(groups_sorted, 1):
|
| 651 |
+
n = process_big_group(g_range, big_groups[g_range], subset_out, standard_scale, cfg, logger)
|
| 652 |
+
total_lines += n
|
| 653 |
+
|
| 654 |
+
overall[out_subset] = total_lines
|
| 655 |
+
logger.log(f"\n {out_subset} total: {total_lines} lines")
|
| 656 |
+
|
| 657 |
+
logger.section("SUMMARY")
|
| 658 |
+
for subset, count in overall.items():
|
| 659 |
+
logger.log(f" {subset}: {count} lines")
|
| 660 |
+
logger.log(f" TOTAL: {sum(overall.values())} lines")
|
| 661 |
+
logger.close()
|
| 662 |
+
|
| 663 |
+
print(f"\nDone! {sum(overall.values())} lines generated.")
|
| 664 |
+
|
| 665 |
+
# =============================================================================
|
| 666 |
+
# ENTRY POINT
|
| 667 |
+
# =============================================================================
|
| 668 |
+
|
| 669 |
+
if __name__ == "__main__":
|
| 670 |
+
generate(parse_args())
|