viral-images / scoring /normalizers.py
Babajaan's picture
Full Viral Images v1.0 implementation - all modules and configs
6ceaa94 verified
"""Sub-score normalization utilities."""
import numpy as np
from typing import Dict, Any, Optional
def _clamp(x: float, lo: float = 0, hi: float = 100) -> float:
return float(np.clip(x, lo, hi))
def _rescale(x: float, min_val: float, max_val: float) -> float:
"""Rescale x from [min_val, max_val] to [0, 100]."""
if max_val == min_val:
return 50.0
return _clamp(100 * (x - min_val) / (max_val - min_val))
def _gaussian_score(x: float, optimal: float, sigma: float) -> float:
"""Score peaks at `optimal`, decreases with Gaussian falloff."""
return 100 * np.exp(-((x - optimal) ** 2) / (2 * sigma ** 2))
def _sigmoid_normalize(x: float, center: float = 0.5, scale: float = 0.1) -> float:
"""Map x to [0,1] with logistic sigmoid."""
return 1.0 / (1.0 + np.exp(-(x - center) / scale))
class ScoreNormalizer:
"""Normalizes raw feature values to 0–100 sub-scores."""
def __init__(self, config: Optional[Dict[str, Any]] = None):
self.config = config or {}
def normalize_concept_match(self, raw_cosine: float) -> float:
"""CLIP cosine similarity β†’ [0, 100]."""
norm_cfg = self.config.get("normalization", {}).get("clip_cosine", {})
min_val = norm_cfg.get("min_val", 0.10)
max_val = norm_cfg.get("max_val", 0.45)
return _rescale(raw_cosine, min_val, max_val)
def normalize_visual_focus(self, peak_saliency: float, center_saliency: float,
top20_fraction: float) -> float:
"""Saliency metrics β†’ visual focus [0, 100]."""
# Peak should be strong
peak_score = peak_saliency * 100
# Center saliency should be above average
center_score = min(center_saliency / (peak_saliency + 0.01) * 100, 100)
# Top20 fraction: ideal is 0.15-0.25 (not too diffuse, not too concentrated)
spread_score = 100 * np.exp(-((top20_fraction - 0.20) ** 2) / (2 * 0.08 ** 2))
return _clamp(0.4 * peak_score + 0.3 * center_score + 0.3 * spread_score)
def normalize_readability(self, ocr_confidence: float, text_coverage: float,
word_count: int, has_text: bool) -> float:
"""OCR metrics β†’ readability [0, 100]."""
if not has_text:
return 70.0 # neutral: no text is not a flaw
conf_score = ocr_confidence * 100
# Coverage penalty: prefer 5-20%
coverage_score = 100 * np.exp(-((text_coverage - 0.12) ** 2) / (2 * 0.08 ** 2))
# Word count: moderate is good
wc_score = 100 * np.exp(-((word_count - 8) ** 2) / (2 * 15 ** 2))
return _clamp(0.5 * conf_score + 0.3 * coverage_score + 0.2 * wc_score)
def normalize_complexity_balance(self, edge_density: float,
color_entropy: float) -> float:
"""Edge density + color entropy β†’ complexity balance [0, 100]."""
norm_cfg = self.config.get("normalization", {})
edge_cfg = norm_cfg.get("edge_density", {})
edge_score = _gaussian_score(
edge_density,
edge_cfg.get("optimal", 0.10),
edge_cfg.get("sigma", 0.06)
)
color_cfg = norm_cfg.get("color_entropy", {})
color_score = _gaussian_score(
color_entropy,
color_cfg.get("optimal", 4.5),
color_cfg.get("sigma", 1.0)
)
return _clamp(0.6 * edge_score + 0.4 * color_score)
def normalize_communication_clarity(self, whitespace: float, contrast: float,
symmetry_lr: float, sharpness: float) -> float:
"""Whitespace, contrast, symmetry, sharpness β†’ clarity [0, 100]."""
norm_cfg = self.config.get("normalization", {})
ws_cfg = norm_cfg.get("whitespace", {})
ws_score = _gaussian_score(
whitespace,
ws_cfg.get("optimal", 0.12),
ws_cfg.get("sigma", 0.08)
)
contrast_score = contrast * 100
sym_score = symmetry_lr * 100 # already in [0,1]
sharp_score = min(sharpness / 0.5 * 100, 100) # sharpness normalized
return _clamp(0.3 * ws_score + 0.3 * contrast_score + 0.2 * sym_score + 0.2 * sharp_score)
def normalize_neural_richness(self, proxy_score: float) -> float:
"""Neural richness proxy [0,100] is already normalized."""
return _clamp(proxy_score)
def normalize_memorability_proxy(self, aesthetic_proxy: float,
colorfulness: float,
sharpness: float) -> float:
"""Aesthetic + colorfulness + sharpness β†’ memorability [0, 100]."""
aesthetic_score = aesthetic_proxy * 100
color_score = colorfulness * 100
sharp_score = min(sharpness / 0.5 * 100, 100)
return _clamp(0.5 * aesthetic_score + 0.3 * color_score + 0.2 * sharp_score)
def normalize_improvement_potential(self, sub_scores: Dict[str, float],
target: float = 75.0) -> float:
"""Inverse of how far weakest scores are from target."""
gaps = [max(0, target - s) for s in sub_scores.values()]
# Top 3 gaps weighted
gaps_sorted = sorted(gaps, reverse=True)
top3 = gaps_sorted[:3]
avg_gap = np.mean(top3) if top3 else 0
return _clamp(avg_gap) # already 0-100