ysharma's picture
ysharma HF Staff
Upload app_v3.py
161740e verified
"""
PII Reveal - Document Privacy Explorer (v3 β€” "Inspector")
==========================================================
Redesigned frontend matching the mockup in pii_reveal_redesign.html,
addressing ui-critique-2.txt:
- Scanner/inspector aesthetic, not dashboard
- Three-way typography: serif for document body, mono for technical
values (IBAN, URLs, emails, phones, dates, secrets), sans for UI chrome
- Stats hierarchy: 22.7% is the hero, other stats step down
- Thin 4px distribution bar between numbers and legend
- Sidebar rows ARE the toggle (no checkboxes). Off = dimmed
- Speakers get neutral swatches so they don't read as a 9th category
- Actions footer: Redact and export (primary), Copy sanitized, Download report
- Harmonized category palette tuned for a privacy/security tool
Backend (model, server, endpoints) is identical to app.py.
"""
# ── stdlib ───────────────────────────────────────────────────────
import dataclasses
import functools
import json
import math
import os
import re
import tempfile
from bisect import bisect_left, bisect_right
from collections.abc import Sequence
from dataclasses import dataclass
from pathlib import Path
from typing import Final
# ── third-party ──────────────────────────────────────────────────
import gradio as gr
import spaces
import tiktoken
import torch
import torch.nn.functional as F
from fastapi import UploadFile, File
from fastapi.responses import HTMLResponse, JSONResponse
from huggingface_hub import snapshot_download
from safetensors import safe_open
# ── configuration ────────────────────────────────────────────────
MODEL_REPO = os.getenv("MODEL_ID", "charles-first-org/second-model")
HF_TOKEN = os.getenv("HF_TOKEN", None)
MODEL_DIR = Path(snapshot_download(MODEL_REPO, token=HF_TOKEN))
# Harmonized palette from the mockup. `mono` flags which categories should
# render in monospace (technical values: dates, IBANs, URLs, emails, phones,
# secrets). Names and addresses stay in serif prose.
CATEGORIES_META = {
"private_person": {"color": "#E24B4A", "cls": "hp", "label": "Person", "mono": False},
"private_date": {"color": "#7F77DD", "cls": "hd", "label": "Date", "mono": True},
"private_address": {"color": "#1D9E75", "cls": "ha", "label": "Address", "mono": False},
"private_email": {"color": "#378ADD", "cls": "he", "label": "Email", "mono": True},
"account_number": {"color": "#BA7517", "cls": "hac", "label": "Account", "mono": True},
"private_url": {"color": "#D85A30", "cls": "hu", "label": "URL", "mono": True},
"secret": {"color": "#D4537E", "cls": "hs", "label": "Secret", "mono": True},
"private_phone": {"color": "#639922", "cls": "hph", "label": "Phone", "mono": True},
}
# =====================================================================
# MODEL ARCHITECTURE + INFERENCE (from reference implementation)
# =====================================================================
PRIVACY_FILTER_MODEL_TYPE: Final[str] = "privacy_filter"
REQUIRED_MODEL_CONFIG_KEYS: Final[tuple[str, ...]] = (
"model_type", "encoding", "num_hidden_layers", "num_experts",
"experts_per_token", "vocab_size", "num_labels", "hidden_size",
"intermediate_size", "head_dim", "num_attention_heads",
"num_key_value_heads", "sliding_window", "bidirectional_context",
"bidirectional_left_context", "bidirectional_right_context",
"default_n_ctx", "initial_context_length", "rope_theta",
"rope_scaling_factor", "rope_ntk_alpha", "rope_ntk_beta", "param_dtype",
)
BACKGROUND_CLASS_LABEL: Final[str] = "O"
BOUNDARY_PREFIXES: Final[tuple[str, ...]] = ("B", "I", "E", "S")
SPAN_CLASS_NAMES: Final[tuple[str, ...]] = (
BACKGROUND_CLASS_LABEL,
"account_number", "private_address", "private_date", "private_email",
"private_person", "private_phone", "private_url", "secret",
)
NER_CLASS_NAMES: Final[tuple[str, ...]] = (BACKGROUND_CLASS_LABEL,) + tuple(
f"{prefix}-{base}"
for base in SPAN_CLASS_NAMES if base != BACKGROUND_CLASS_LABEL
for prefix in BOUNDARY_PREFIXES
)
VITERBI_TRANSITION_BIAS_KEYS: Final[tuple[str, ...]] = (
"transition_bias_background_stay", "transition_bias_background_to_start",
"transition_bias_inside_to_continue", "transition_bias_inside_to_end",
"transition_bias_end_to_background", "transition_bias_end_to_start",
)
DEFAULT_VITERBI_CALIBRATION_PRESET: Final[str] = "default"
def validate_model_config_contract(cfg: dict, *, context: str) -> None:
missing = [k for k in REQUIRED_MODEL_CONFIG_KEYS if k not in cfg]
if missing:
raise ValueError(f"{context} missing keys: {', '.join(missing)}")
if cfg.get("model_type") != PRIVACY_FILTER_MODEL_TYPE:
raise ValueError(f"{context} model_type must be {PRIVACY_FILTER_MODEL_TYPE!r}")
if cfg.get("bidirectional_context") is not True:
raise ValueError(f"{context} must use bidirectional_context=true")
lc, rc = cfg.get("bidirectional_left_context"), cfg.get("bidirectional_right_context")
if not isinstance(lc, int) or not isinstance(rc, int) or lc != rc or lc < 0:
raise ValueError(f"{context} bidirectional context must be equal non-negative ints")
sw = cfg.get("sliding_window")
if sw != 2 * lc + 1:
raise ValueError(f"{context} sliding_window must equal 2*context+1")
if cfg["num_labels"] != 33:
raise ValueError(f"{context} num_labels must be 33")
if cfg["param_dtype"] != "bfloat16":
raise ValueError(f"{context} param_dtype must be bfloat16")
# ── model helpers ────────────────────────────────────────────────
def expert_linear(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor | None) -> torch.Tensor:
n, e, k = x.shape
_, _, _, o = weight.shape
out = torch.bmm(x.reshape(n * e, 1, k), weight.reshape(n * e, k, o)).reshape(n, e, o)
return out + bias if bias is not None else out
@dataclass
class ModelConfig:
num_hidden_layers: int; num_experts: int; experts_per_token: int
vocab_size: int; num_labels: int; hidden_size: int; intermediate_size: int
head_dim: int; num_attention_heads: int; num_key_value_heads: int
bidirectional_context_size: int; initial_context_length: int
rope_theta: float; rope_scaling_factor: float; rope_ntk_alpha: float; rope_ntk_beta: float
@classmethod
def from_checkpoint_config(cls, cfg: dict, *, context: str) -> "ModelConfig":
cfg = dict(cfg)
cfg["bidirectional_context_size"] = cfg["bidirectional_left_context"]
fields = {f.name for f in dataclasses.fields(cls)}
return cls(**{k: v for k, v in cfg.items() if k in fields})
class RMSNorm(torch.nn.Module):
def __init__(self, n: int, eps: float = 1e-5, device=None):
super().__init__()
self.eps = eps
self.scale = torch.nn.Parameter(torch.ones(n, device=device, dtype=torch.float32))
def forward(self, x):
t = x.float()
return (t * torch.rsqrt(t.pow(2).mean(-1, keepdim=True) + self.eps) * self.scale).to(x.dtype)
def apply_rope(x, cos, sin):
cos = cos.unsqueeze(-2).to(x.dtype); sin = sin.unsqueeze(-2).to(x.dtype)
x1, x2 = x[..., ::2], x[..., 1::2]
return torch.stack((x1 * cos - x2 * sin, x2 * cos + x1 * sin), dim=-1).reshape(x.shape)
class RotaryEmbedding(torch.nn.Module):
def __init__(self, head_dim, base, dtype, *, initial_context_length=4096,
scaling_factor=1.0, ntk_alpha=1.0, ntk_beta=32.0, device=None):
super().__init__()
self.head_dim, self.base, self.dtype = head_dim, base, dtype
self.initial_context_length = initial_context_length
self.scaling_factor, self.ntk_alpha, self.ntk_beta = scaling_factor, ntk_alpha, ntk_beta
self.device = device
mp = max(int(initial_context_length * scaling_factor), initial_context_length)
self.max_position_embeddings = mp
cos, sin = self._compute(mp, device=torch.device("cpu"))
target = device or torch.device("cpu")
self.register_buffer("cos_cache", cos.to(target), persistent=False)
self.register_buffer("sin_cache", sin.to(target), persistent=False)
def _inv_freq(self, device=None):
device = device or self.device
freq = self.base ** (torch.arange(0, self.head_dim, 2, dtype=torch.float, device=device) / self.head_dim)
if self.scaling_factor > 1.0:
d_half = self.head_dim / 2
low = d_half * math.log(self.initial_context_length / (self.ntk_beta * 2 * math.pi)) / math.log(self.base)
high = d_half * math.log(self.initial_context_length / (self.ntk_alpha * 2 * math.pi)) / math.log(self.base)
interp = 1.0 / (self.scaling_factor * freq)
extrap = 1.0 / freq
ramp = (torch.arange(d_half, dtype=torch.float32, device=device) - low) / (high - low)
mask = 1 - ramp.clamp(0, 1)
return interp * (1 - mask) + extrap * mask
return 1.0 / freq
def _compute(self, n, device=None):
inv_freq = self._inv_freq(device)
t = torch.arange(n, dtype=torch.float32, device=device or self.device)
freqs = torch.einsum("i,j->ij", t, inv_freq)
c = 0.1 * math.log(self.scaling_factor) + 1.0 if self.scaling_factor > 1.0 else 1.0
return (freqs.cos() * c).to(self.dtype), (freqs.sin() * c).to(self.dtype)
def forward(self, q, k):
n = q.shape[0]
if n > self.cos_cache.shape[0]:
cos, sin = self._compute(n, torch.device("cpu"))
self.cos_cache, self.sin_cache = cos.to(q.device), sin.to(q.device)
cc = self.cos_cache.to(q.device) if self.cos_cache.device != q.device else self.cos_cache
sc = self.sin_cache.to(q.device) if self.sin_cache.device != q.device else self.sin_cache
cos, sin = cc[:n], sc[:n]
q = apply_rope(q.view(n, -1, self.head_dim), cos, sin).reshape(q.shape)
k = apply_rope(k.view(n, -1, self.head_dim), cos, sin).reshape(k.shape)
return q, k
def sdpa(Q, K, V, S, sm_scale, ctx):
n, nh, qm, hd = Q.shape
w = 2 * ctx + 1
Kp = F.pad(K, (0, 0, 0, 0, ctx, ctx)); Vp = F.pad(V, (0, 0, 0, 0, ctx, ctx))
Kw = Kp.unfold(0, w, 1).permute(0, 3, 1, 2); Vw = Vp.unfold(0, w, 1).permute(0, 3, 1, 2)
idx = torch.arange(w, device=Q.device) - ctx
pos = torch.arange(n, device=Q.device)[:, None] + idx[None, :]
valid = (pos >= 0) & (pos < n)
scores = torch.einsum("nhqd,nwhd->nhqw", Q, Kw).float() * sm_scale
scores = scores.masked_fill(~valid[:, None, None, :], -float("inf"))
sink = (S * math.log(2.0)).reshape(nh, qm)[None, :, :, None].expand(n, -1, -1, 1)
scores = torch.cat([scores, sink], dim=-1)
wt = torch.softmax(scores, dim=-1)[..., :-1].to(V.dtype)
return torch.einsum("nhqw,nwhd->nhqd", wt, Vw).reshape(n, -1)
class AttentionBlock(torch.nn.Module):
def __init__(self, cfg: ModelConfig, device=None):
super().__init__()
dt = torch.bfloat16
self.head_dim, self.nah, self.nkv = cfg.head_dim, cfg.num_attention_heads, cfg.num_key_value_heads
self.ctx = int(cfg.bidirectional_context_size)
self.sinks = torch.nn.Parameter(torch.empty(cfg.num_attention_heads, device=device, dtype=torch.float32))
self.norm = RMSNorm(cfg.hidden_size, device=device)
qkv_d = cfg.head_dim * (cfg.num_attention_heads + 2 * cfg.num_key_value_heads)
self.qkv = torch.nn.Linear(cfg.hidden_size, qkv_d, device=device, dtype=dt)
self.out = torch.nn.Linear(cfg.head_dim * cfg.num_attention_heads, cfg.hidden_size, device=device, dtype=dt)
self.qk_scale = 1 / math.sqrt(math.sqrt(cfg.head_dim))
self.rope = RotaryEmbedding(cfg.head_dim, int(cfg.rope_theta), torch.float32,
initial_context_length=cfg.initial_context_length,
scaling_factor=cfg.rope_scaling_factor,
ntk_alpha=cfg.rope_ntk_alpha, ntk_beta=cfg.rope_ntk_beta, device=device)
def forward(self, x):
t = self.norm(x).to(self.qkv.weight.dtype)
qkv = F.linear(t, self.qkv.weight, self.qkv.bias)
hd, nah, nkv = self.head_dim, self.nah, self.nkv
q = qkv[:, :nah * hd].contiguous()
k = qkv[:, nah * hd:(nah + nkv) * hd].contiguous()
v = qkv[:, (nah + nkv) * hd:(nah + 2 * nkv) * hd].contiguous()
q, k = self.rope(q, k)
q, k = q * self.qk_scale, k * self.qk_scale
n = q.shape[0]
q = q.view(n, nkv, nah // nkv, hd); k = k.view(n, nkv, hd); v = v.view(n, nkv, hd)
ao = sdpa(q, k, v, self.sinks, 1.0, self.ctx).to(self.out.weight.dtype)
return x + F.linear(ao, self.out.weight, self.out.bias).to(x.dtype)
def swiglu(x, alpha=1.702, limit=7.0):
g, l = x.chunk(2, dim=-1)
g, l = g.clamp(max=limit), l.clamp(-limit, limit)
return g * torch.sigmoid(alpha * g) * (l + 1)
class MLPBlock(torch.nn.Module):
def __init__(self, cfg: ModelConfig, device=None):
super().__init__()
dt = torch.bfloat16
self.ne, self.ept = cfg.num_experts, cfg.experts_per_token
self.norm = RMSNorm(cfg.hidden_size, device=device)
self.gate = torch.nn.Linear(cfg.hidden_size, cfg.num_experts, device=device, dtype=dt)
self.mlp1_weight = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.hidden_size, cfg.intermediate_size * 2, device=device, dtype=dt))
self.mlp1_bias = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.intermediate_size * 2, device=device, dtype=dt))
self.mlp2_weight = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.intermediate_size, cfg.hidden_size, device=device, dtype=dt))
self.mlp2_bias = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.hidden_size, device=device, dtype=dt))
def forward(self, x):
t = self.norm(x)
gs = F.linear(t.float(), self.gate.weight.float(), self.gate.bias.float())
top = torch.topk(gs, k=self.ept, dim=-1, sorted=True)
ew = torch.softmax(top.values, dim=-1) / self.ept
ei = top.indices
ept = self.ept
def _chunk(tc, eic, ewc):
o = expert_linear(tc.float().unsqueeze(1).expand(-1, eic.shape[1], -1),
self.mlp1_weight[eic].float(), self.mlp1_bias[eic].float())
o = swiglu(o)
o = expert_linear(o.float(), self.mlp2_weight[eic].float(), self.mlp2_bias[eic].float())
return (torch.einsum("bec,be->bc", o.to(ewc.dtype), ewc) * ept).to(x.dtype)
cs = 32
if t.shape[0] > cs:
parts = [_chunk(t[s:s+cs], ei[s:s+cs], ew[s:s+cs]) for s in range(0, t.shape[0], cs)]
return x + torch.cat(parts, 0)
return x + _chunk(t, ei, ew)
class TransformerBlock(torch.nn.Module):
def __init__(self, cfg, device=None):
super().__init__()
self.attn = AttentionBlock(cfg, device=device)
self.mlp = MLPBlock(cfg, device=device)
def forward(self, x):
return self.mlp(self.attn(x))
class Checkpoint:
@staticmethod
def build_param_name_map(n):
return ({f"block.{i}.mlp.mlp1_bias": f"block.{i}.mlp.swiglu.bias" for i in range(n)}
| {f"block.{i}.mlp.mlp1_weight": f"block.{i}.mlp.swiglu.weight" for i in range(n)}
| {f"block.{i}.mlp.mlp2_bias": f"block.{i}.mlp.out.bias" for i in range(n)}
| {f"block.{i}.mlp.mlp2_weight": f"block.{i}.mlp.out.weight" for i in range(n)})
def __init__(self, path, device, num_hidden_layers):
self.pnm = self.build_param_name_map(num_hidden_layers)
self.ds = device.type if device.index is None else f"{device.type}:{device.index}"
files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(".safetensors")]
self.map = {}
for sf in files:
with safe_open(sf, framework="pt", device=self.ds) as h:
for k in h.keys():
self.map[k] = sf
def get(self, name):
mapped = self.pnm.get(name, name)
with safe_open(self.map[mapped], framework="pt", device=self.ds) as h:
return h.get_tensor(mapped)
class Transformer(torch.nn.Module):
def __init__(self, cfg, device):
super().__init__()
dt = torch.bfloat16
self.embedding = torch.nn.Embedding(cfg.vocab_size, cfg.hidden_size, device=device, dtype=dt)
self.block = torch.nn.ModuleList([TransformerBlock(cfg, device=device) for _ in range(cfg.num_hidden_layers)])
self.norm = RMSNorm(cfg.hidden_size, device=device)
self.unembedding = torch.nn.Linear(cfg.hidden_size, cfg.num_labels, bias=False, device=device, dtype=dt)
def forward(self, token_ids):
x = self.embedding(token_ids)
for blk in self.block:
x = blk(x)
return F.linear(self.norm(x), self.unembedding.weight, None)
@classmethod
def from_checkpoint(cls, checkpoint_dir, *, device):
torch.backends.cuda.matmul.allow_tf32 = False
torch.backends.cudnn.allow_tf32 = False
torch.set_float32_matmul_precision("highest")
cp = json.loads((Path(checkpoint_dir) / "config.json").read_text())
validate_model_config_contract(cp, context=str(checkpoint_dir))
cfg = ModelConfig.from_checkpoint_config(cp, context=str(checkpoint_dir))
ckpt = Checkpoint(checkpoint_dir, device, cfg.num_hidden_layers)
m = cls(cfg, device); m.eval()
for name, param in m.named_parameters():
loaded = ckpt.get(name)
if param.shape != loaded.shape:
raise ValueError(f"Shape mismatch {name}: {param.shape} vs {loaded.shape}")
param.data.copy_(loaded)
return m
# ── label info + span decoding ───────────────────────────────────
@dataclass(frozen=True)
class LabelInfo:
boundary_label_lookup: dict[str, dict[str, int]]
token_to_span_label: dict[int, int]
token_boundary_tags: dict[int, str | None]
span_class_names: tuple[str, ...]
span_label_lookup: dict[str, int]
background_token_label: int
background_span_label: int
def labels_to_spans(labels_by_index, label_info):
spans, cur_label, start_idx, prev_idx = [], None, None, None
bg = label_info.background_span_label
for ti in sorted(labels_by_index):
lid = labels_by_index[ti]
sl = label_info.token_to_span_label.get(lid)
bt = label_info.token_boundary_tags.get(lid)
if prev_idx is not None and ti != prev_idx + 1:
if cur_label is not None and start_idx is not None:
spans.append((cur_label, start_idx, prev_idx + 1))
cur_label = start_idx = None
if sl is None:
prev_idx = ti; continue
if sl == bg:
if cur_label is not None and start_idx is not None:
spans.append((cur_label, start_idx, ti))
cur_label = start_idx = None; prev_idx = ti; continue
if bt == "S":
if cur_label is not None and start_idx is not None and prev_idx is not None:
spans.append((cur_label, start_idx, prev_idx + 1))
spans.append((sl, ti, ti + 1)); cur_label = start_idx = None
elif bt == "B":
if cur_label is not None and start_idx is not None and prev_idx is not None:
spans.append((cur_label, start_idx, prev_idx + 1))
cur_label, start_idx = sl, ti
elif bt == "I":
if cur_label is None or cur_label != sl:
if cur_label is not None and start_idx is not None and prev_idx is not None:
spans.append((cur_label, start_idx, prev_idx + 1))
cur_label, start_idx = sl, ti
elif bt == "E":
if cur_label is None or cur_label != sl or start_idx is None:
if cur_label is not None and start_idx is not None and prev_idx is not None:
spans.append((cur_label, start_idx, prev_idx + 1))
spans.append((sl, ti, ti + 1)); cur_label = start_idx = None
else:
spans.append((cur_label, start_idx, ti + 1)); cur_label = start_idx = None
else:
if cur_label is not None and start_idx is not None and prev_idx is not None:
spans.append((cur_label, start_idx, prev_idx + 1))
cur_label = start_idx = None
prev_idx = ti
if cur_label is not None and start_idx is not None and prev_idx is not None:
spans.append((cur_label, start_idx, prev_idx + 1))
return spans
def token_spans_to_char_spans(spans, cs, ce):
out = []
for li, ts, te in spans:
if not (0 <= ts < te <= len(cs)):
continue
s, e = cs[ts], ce[te - 1]
if e > s:
out.append((li, s, e))
return out
def trim_char_spans_whitespace(spans, text):
out = []
for li, s, e in spans:
if not (0 <= s < e <= len(text)):
continue
while s < e and text[s].isspace(): s += 1
while e > s and text[e - 1].isspace(): e -= 1
if e > s:
out.append((li, s, e))
return out
# ── viterbi decoder ──────────────────────────────────────────────
@functools.lru_cache(maxsize=1)
def get_viterbi_transition_biases():
cp = MODEL_DIR / "viterbi_calibration.json"
default = {k: 0.0 for k in VITERBI_TRANSITION_BIAS_KEYS}
if not cp.is_file():
return default
payload = json.loads(cp.read_text())
raw = payload
ops = payload.get("operating_points")
if isinstance(ops, dict):
preset = ops.get(DEFAULT_VITERBI_CALIBRATION_PRESET)
if isinstance(preset, dict):
raw = preset.get("biases", raw)
if not isinstance(raw, dict):
return default
return {k: float(raw.get(k, 0.0)) for k in VITERBI_TRANSITION_BIAS_KEYS}
class Decoder:
def __init__(self, label_info):
nc = len(label_info.token_to_span_label)
self._start = torch.full((nc,), -1e9, dtype=torch.float32)
self._end = torch.full((nc,), -1e9, dtype=torch.float32)
self._trans = torch.full((nc, nc), -1e9, dtype=torch.float32)
biases = get_viterbi_transition_biases()
bg_tok, bg_sp = label_info.background_token_label, label_info.background_span_label
ttsl, tbt = label_info.token_to_span_label, label_info.token_boundary_tags
for i in range(nc):
tag, sl = tbt.get(i), ttsl.get(i)
if tag in {"B", "S"} or i == bg_tok: self._start[i] = 0.0
if tag in {"E", "S"} or i == bg_tok: self._end[i] = 0.0
for j in range(nc):
nt, ns = tbt.get(j), ttsl.get(j)
if self._valid(tag, sl, nt, ns, bg_tok, bg_sp, j):
self._trans[i, j] = self._bias(tag, sl, nt, ns, bg_sp, biases)
@staticmethod
def _valid(pt, ps, nt, ns, bti, bsi, ni):
nb = ns == bsi or ni == bti
if (ns is None or nt is None) and not nb: return False
if pt is None or ps is None: return nb or nt in {"B", "S"}
if ps == bsi or pt in {"E", "S"}: return nb or nt in {"B", "S"}
if pt in {"B", "I"}: return ps == ns and nt in {"I", "E"}
return False
@staticmethod
def _bias(pt, ps, nt, ns, bsi, b):
nb, pb = ns == bsi, ps == bsi
if pb: return b["transition_bias_background_stay"] if nb else b["transition_bias_background_to_start"]
if pt in {"B", "I"}: return b["transition_bias_inside_to_continue"] if nt == "I" else b["transition_bias_inside_to_end"]
return b["transition_bias_end_to_background"] if nb else b["transition_bias_end_to_start"]
def decode(self, lp):
sl, nc = lp.shape
if sl == 0: return []
st = self._start.to(lp.device, lp.dtype)
en = self._end.to(lp.device, lp.dtype)
tr = self._trans.to(lp.device, lp.dtype)
scores = lp[0] + st
bp = torch.empty((sl - 1, nc), device=lp.device, dtype=torch.int64)
for i in range(1, sl):
t = scores.unsqueeze(1) + tr
bs, bi = t.max(dim=0)
scores = bs + lp[i]; bp[i - 1] = bi
if not torch.isfinite(scores).any(): return lp.argmax(dim=1).tolist()
scores += en
path = torch.empty(sl, device=lp.device, dtype=torch.int64)
path[-1] = scores.argmax()
for i in range(sl - 2, -1, -1): path[i] = bp[i, path[i + 1]]
return path.tolist()
# ── runtime singleton ────────────────────────────────────────────
@dataclass(frozen=True)
class InferenceRuntime:
model: Transformer; encoding: tiktoken.Encoding; label_info: LabelInfo
device: torch.device; n_ctx: int
@functools.lru_cache(maxsize=1)
def get_runtime():
cp = MODEL_DIR
cfg = json.loads((cp / "config.json").read_text())
validate_model_config_contract(cfg, context=str(cp))
device = torch.device("cuda")
encoding = tiktoken.get_encoding(str(cfg["encoding"]).strip())
scn = [BACKGROUND_CLASS_LABEL]; sll = {BACKGROUND_CLASS_LABEL: 0}
bll, ttsl, tbt = {}, {}, {}
bg_idx = None
for idx, name in enumerate(NER_CLASS_NAMES):
if name == BACKGROUND_CLASS_LABEL:
bg_idx = idx; ttsl[idx] = 0; tbt[idx] = None; continue
bnd, base = name.split("-", 1)
si = sll.get(base)
if si is None:
si = len(scn); scn.append(base); sll[base] = si
ttsl[idx] = si; tbt[idx] = bnd
bll.setdefault(base, {})[bnd] = idx
li = LabelInfo(bll, ttsl, tbt, tuple(scn), sll, bg_idx, 0)
m = Transformer.from_checkpoint(str(cp), device=device)
return InferenceRuntime(m, encoding, li, device, int(cfg["default_n_ctx"]))
@torch.inference_mode()
def predict_text(runtime, text, decoder):
tids = tuple(int(t) for t in runtime.encoding.encode(text, allowed_special="all"))
if not tids: return text, []
scores = []
for s in range(0, len(tids), runtime.n_ctx):
e = min(s + runtime.n_ctx, len(tids))
wt = torch.tensor(tids[s:e], device=runtime.device, dtype=torch.int32)
lp = F.log_softmax(runtime.model(wt).float(), dim=-1)
scores.extend(lp.unbind(0))
stacked = torch.stack(scores, 0)
dl = decoder.decode(stacked)
if len(dl) != len(tids): dl = stacked.argmax(dim=1).tolist()
pli = {i: int(l) for i, l in enumerate(dl)}
pts = labels_to_spans(pli, runtime.label_info)
tb = [runtime.encoding.decode_single_token_bytes(t) for t in tids]
dt = b"".join(tb).decode("utf-8", errors="replace")
cbs, cbe = [], []
bc = 0
for ch in dt: cbs.append(bc); bc += len(ch.encode("utf-8")); cbe.append(bc)
cs, ce = [], []
tbc = 0
for rb in tb:
tbs = tbc; tbe = tbs + len(rb); tbc = tbe
cs.append(bisect_right(cbe, tbs)); ce.append(bisect_left(cbs, tbe))
pcs = token_spans_to_char_spans(pts, cs, ce)
pcs = trim_char_spans_whitespace(pcs, dt if dt != text else text)
src = dt if dt != text else text
detected = []
for li, s, e in pcs:
if 0 <= li < len(runtime.label_info.span_class_names):
lbl = runtime.label_info.span_class_names[li]
else:
lbl = f"label_{li}"
detected.append({"label": lbl, "start": s, "end": e, "text": src[s:e]})
return src, detected
# =====================================================================
# APPLICATION LAYER
# =====================================================================
def extract_text(file_path: str) -> str:
suffix = Path(file_path).suffix.lower()
if suffix == ".pdf":
import fitz
doc = fitz.open(file_path)
pages = [page.get_text() for page in doc]
doc.close()
return "\n\n".join(pages)
elif suffix in (".docx", ".doc"):
from docx import Document
doc = Document(file_path)
return "\n\n".join(p.text for p in doc.paragraphs if p.text.strip())
raise ValueError(f"Unsupported file type: {suffix}")
def compute_stats(text, spans):
total = len(text)
pii_chars = sum(s["end"] - s["start"] for s in spans)
by_cat = {}
for s in spans:
c = s["label"]
by_cat.setdefault(c, {"count": 0, "chars": 0})
by_cat[c]["count"] += 1; by_cat[c]["chars"] += s["end"] - s["start"]
return {
"total_chars": total, "pii_chars": pii_chars,
"pii_percentage": round(pii_chars / total * 100, 1) if total else 0,
"total_spans": len(spans), "categories": by_cat, "num_categories": len(by_cat),
"total_lines": text.count("\n") + 1 if total else 0,
}
def detect_speakers(text, spans):
patterns = [r"^([A-Z][a-zA-Z ]{1,30}):\s", r"^\[([^\]]{1,30})\]\s", r"^(Speaker\s*\d+):\s"]
line_sp, pos, cur = [], 0, None
for line in text.split("\n"):
for p in patterns:
m = re.match(p, line)
if m: cur = m.group(1).strip(); break
line_sp.append((pos, pos + len(line), cur)); pos += len(line) + 1
result = {}
for span in spans:
mid = (span["start"] + span["end"]) // 2
speaker = "Document"
for ls, le, sp in line_sp:
if ls <= mid <= le and sp: speaker = sp; break
result[speaker] = result.get(speaker, 0) + 1
return {} if list(result.keys()) == ["Document"] else result
@spaces.GPU
def run_pii_analysis(text: str):
"""GPU-accelerated PII detection."""
runtime = get_runtime()
decoder = Decoder(label_info=runtime.label_info)
source_text, detected = predict_text(runtime, text, decoder)
return source_text, detected
# ── Gradio Server ────────────────────────────────────────────────
server = gr.Server()
@server.get("/", response_class=HTMLResponse)
async def homepage():
return FRONTEND_HTML
@server.post("/api/analyze")
async def analyze_document(file: UploadFile = File(...)):
suffix = Path(file.filename).suffix.lower()
if suffix not in (".pdf", ".doc", ".docx"):
return JSONResponse({"error": f"Unsupported: {suffix}. Use PDF, DOC, or DOCX."}, 400)
with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
tmp.write(await file.read()); tmp_path = tmp.name
try:
text = extract_text(tmp_path)
if not text.strip():
return JSONResponse({"error": "No text content found."}, 400)
source_text, spans = run_pii_analysis(text)
stats = compute_stats(source_text, spans)
speakers = detect_speakers(source_text, spans)
return JSONResponse({
"filename": file.filename, "text": source_text, "spans": spans,
"stats": stats, "speakers": speakers,
"categories_meta": {k: {"color": v["color"], "cls": v["cls"],
"label": v["label"], "mono": v["mono"]}
for k, v in CATEGORIES_META.items()},
})
except Exception as e:
return JSONResponse({"error": str(e)}, 500)
finally:
if os.path.exists(tmp_path): os.unlink(tmp_path)
@server.api(name="analyze_text")
def analyze_text_api(text: str) -> str:
"""Gradio API: analyze raw text for PII."""
source_text, spans = run_pii_analysis(text)
stats = compute_stats(source_text, spans)
return json.dumps({"text": source_text, "spans": spans, "stats": stats}, ensure_ascii=False)
# ── Frontend HTML (v3 β€” Inspector) ───────────────────────────────
FRONTEND_HTML = r"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width,initial-scale=1">
<title>PII Reveal β€” Inspector</title>
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&family=JetBrains+Mono:wght@400;500&family=Source+Serif+4:opsz,wght@8..60,400;8..60,500&display=swap" rel="stylesheet">
<style>
*,*::before,*::after{box-sizing:border-box;margin:0;padding:0}
:root{
/* neutral, paper-leaning palette */
--color-background-primary: #faf9f6;
--color-background-secondary: #f3f2ed;
--color-text-primary: #17171a;
--color-text-secondary: #555560;
--color-text-tertiary: #9a9aa2;
--color-border-tertiary: rgba(23,23,26,0.08);
--color-border-secondary: rgba(23,23,26,0.16);
--border-radius-lg: 10px;
--border-radius-md: 6px;
--border-radius-sm: 4px;
--font-sans: 'Inter', system-ui, -apple-system, Segoe UI, sans-serif;
--font-mono: 'JetBrains Mono', ui-monospace, SFMono-Regular, Menlo, Consolas, monospace;
--font-serif: 'Source Serif 4', 'Source Serif Pro', 'Iowan Old Style', Georgia, serif;
}
html,body{height:100%}
body{
font-family:var(--font-sans);
background:var(--color-background-secondary);
color:var(--color-text-primary);
font-size:13px;line-height:1.5;
-webkit-font-smoothing:antialiased;
font-feature-settings:"cv11","ss01";
}
button{font:inherit;color:inherit}
.sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border:0}
/* ============ UPLOAD VIEW ============ */
#upload-view{min-height:100vh;display:flex;align-items:center;justify-content:center;padding:32px}
.u-card{
width:100%;max-width:520px;
background:var(--color-background-primary);
border:0.5px solid var(--color-border-tertiary);
border-radius:var(--border-radius-lg);
padding:40px 36px;
}
.u-brand{display:flex;align-items:center;gap:10px;margin-bottom:28px}
.u-brand svg{color:var(--color-text-primary)}
.u-brand-name{font-size:13px;font-weight:500}
.u-brand-name .sub{color:var(--color-text-tertiary);font-weight:400;margin-left:4px}
.u-title{
font-family:var(--font-serif);
font-size:28px;font-weight:400;letter-spacing:-0.015em;
line-height:1.15;margin-bottom:8px;
}
.u-sub{color:var(--color-text-secondary);font-size:13px;margin-bottom:24px}
.u-drop{
border:1px dashed var(--color-border-secondary);
border-radius:var(--border-radius-md);
padding:32px 20px;
cursor:pointer;text-align:center;
background:var(--color-background-primary);
transition:all .15s;
position:relative;
}
.u-drop:hover,.u-drop.dragover{
border-color:var(--color-text-primary);
background:var(--color-background-secondary);
}
.u-drop-icon{margin:0 auto 10px;color:var(--color-text-tertiary)}
.u-drop-title{font-size:13px;font-weight:500;margin-bottom:3px}
.u-drop-sub{font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary)}
.u-drop input{position:absolute;inset:0;opacity:0;cursor:pointer}
.u-meta{
display:flex;gap:10px;margin-top:20px;
font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary);
}
.u-meta span + span::before{content:'Β·';margin-right:10px;color:var(--color-border-secondary)}
/* ============ RESULTS VIEW ============ */
#results-view{display:none;min-height:100vh;padding:14px}
.pr-app{
font-family:var(--font-sans);
border:0.5px solid var(--color-border-tertiary);
border-radius:var(--border-radius-lg);
overflow:hidden;
background:var(--color-background-primary);
color:var(--color-text-primary);
max-width:1240px;margin:0 auto;
}
/* ── top bar ── */
.pr-top{
display:flex;align-items:center;gap:10px;
padding:11px 14px;
border-bottom:0.5px solid var(--color-border-tertiary);
}
.pr-logo{display:flex;align-items:center;gap:8px}
.pr-name{font-size:13px;font-weight:500}
.pr-name-sub{color:var(--color-text-tertiary);font-weight:400;margin-left:4px}
.pr-file-chip{
font-family:var(--font-mono);font-size:11.5px;
color:var(--color-text-secondary);
padding:4px 8px;
background:var(--color-background-secondary);
border-radius:5px;margin-left:4px;
max-width:280px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;
}
.pr-grow{flex:1}
.pr-status{font-size:11.5px;color:var(--color-text-secondary);display:flex;align-items:center;gap:6px}
.pr-status-dot{width:6px;height:6px;border-radius:50%;background:#1D9E75;box-shadow:0 0 0 3px rgba(29,158,117,.14)}
.pr-new{
font-family:var(--font-mono);font-size:11px;
color:var(--color-text-secondary);
background:transparent;border:0.5px solid var(--color-border-secondary);
padding:4px 8px;border-radius:5px;cursor:pointer;margin-left:4px;
}
.pr-new:hover{background:var(--color-background-secondary)}
/* ── stats ── */
.pr-stats{padding:18px 18px 16px;border-bottom:0.5px solid var(--color-border-tertiary)}
.pr-stats-row{display:flex;align-items:flex-end;gap:26px;margin-bottom:14px;flex-wrap:wrap}
.pr-hero{
font-size:32px;font-weight:500;line-height:1;letter-spacing:-0.025em;
font-variant-numeric:tabular-nums;
}
.pr-hero-pct{font-size:17px;opacity:0.55;margin-left:1px;font-weight:400}
.pr-num{font-size:20px;font-weight:500;line-height:1;letter-spacing:-0.01em;font-variant-numeric:tabular-nums}
.pr-lab{font-size:11px;color:var(--color-text-tertiary);margin-top:7px}
.pr-bar{display:flex;height:4px;gap:2px;margin-bottom:12px;border-radius:2px;overflow:hidden}
.pr-bar > span{display:block;height:100%;border-radius:1px;min-width:4px;transition:opacity .15s}
.pr-bar > span:hover{opacity:.82}
.pr-legend{display:flex;flex-wrap:wrap;gap:8px 14px;font-size:12px}
.pr-leg{display:flex;align-items:center;gap:6px;color:var(--color-text-secondary);cursor:pointer;user-select:none}
.pr-leg-sw{width:8px;height:8px;border-radius:2px}
.pr-leg-ct{font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary);margin-left:1px}
.pr-leg.off{opacity:.45}
.pr-leg.off .pr-leg-sw{opacity:.35}
/* ── body ── */
.pr-body{display:grid;grid-template-columns:minmax(0,1fr) 188px}
/* ── doc pane ── */
.pr-doc-pane{
padding:18px 22px 26px;
border-right:0.5px solid var(--color-border-tertiary);
min-width:0;max-height:calc(100vh - 280px);overflow-y:auto;
}
.pr-doc-meta{
font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary);
margin-bottom:14px;display:flex;gap:10px;flex-wrap:wrap;
}
.pr-doc-meta span + span::before{content:'Β·';margin-right:10px;color:var(--color-border-secondary)}
.pr-text{
font-family:var(--font-serif);
font-size:14.5px;line-height:1.85;
color:var(--color-text-primary);
white-space:pre-wrap;word-wrap:break-word;
font-feature-settings:"liga","calt";
}
/* highlights β€” tinted bg + 1.5px underline, like Notion/Linear inline annotations */
.h{
padding:1px 1px;
border-bottom:1.5px solid;
transition:background .15s,opacity .15s;
cursor:pointer;
}
.h:hover{filter:brightness(0.97)}
.h.off{
background:transparent !important;
border-color:transparent !important;
color:inherit;opacity:.9;
}
.hp{background:rgba(226,75,74,.09); border-color:#E24B4A}
.hd{background:rgba(127,119,221,.10);border-color:#7F77DD}
.ha{background:rgba(29,158,117,.09); border-color:#1D9E75}
.he{background:rgba(55,138,221,.09); border-color:#378ADD}
.hac{background:rgba(186,117,23,.11);border-color:#BA7517}
.hu{background:rgba(216,90,48,.10); border-color:#D85A30}
.hs{background:rgba(212,83,126,.11); border-color:#D4537E}
.hph{background:rgba(99,153,34,.11); border-color:#639922}
.m{font-family:var(--font-mono);font-size:12.5px}
/* ── sidebar ── */
.pr-side{
background:var(--color-background-secondary);
padding:16px 14px;
display:flex;flex-direction:column;gap:18px;
min-width:0;
max-height:calc(100vh - 280px);
}
.pr-side-h{font-size:11px;color:var(--color-text-tertiary);font-weight:500;margin:0 0 10px 0;letter-spacing:.02em}
.pr-cat{
display:flex;align-items:center;gap:8px;
padding:5px 2px;font-size:12.5px;
cursor:pointer;user-select:none;
transition:opacity .15s;
}
.pr-cat:hover{opacity:.8}
.pr-cat-sw{width:9px;height:9px;border-radius:2px;flex-shrink:0}
.pr-cat-nm{flex:1;color:var(--color-text-primary)}
.pr-cat-ct{font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary)}
.pr-cat.off .pr-cat-nm,
.pr-cat.off .pr-cat-ct{opacity:.45}
.pr-cat.off .pr-cat-sw{opacity:.3}
.pr-speakers .pr-cat-sw{background:var(--color-text-tertiary);opacity:.35;cursor:default}
.pr-speakers .pr-cat{cursor:default}
.pr-speakers .pr-cat:hover{opacity:1}
.pr-acts{
display:flex;flex-direction:column;gap:6px;
margin-top:auto;padding-top:14px;
border-top:0.5px solid var(--color-border-tertiary);
}
.pr-btn{
font-size:12px;padding:8px 10px;
border:0.5px solid var(--color-border-secondary);
border-radius:5px;
background:transparent;color:var(--color-text-primary);
cursor:pointer;text-align:left;
font-family:inherit;
display:flex;align-items:center;justify-content:space-between;
transition:all .12s;
}
.pr-btn:hover{background:var(--color-background-primary)}
.pr-btn-prim{
background:var(--color-text-primary);
color:var(--color-background-primary);
border-color:var(--color-text-primary);
}
.pr-btn-prim:hover{background:#000;border-color:#000}
.pr-btn-arr{font-family:var(--font-mono);font-size:11px;opacity:0.55}
/* empty state */
.empty-rail{color:var(--color-text-tertiary);font-size:12px;font-style:italic}
/* loading */
#loading{
position:fixed;inset:0;
background:rgba(250,249,246,.88);
backdrop-filter:blur(8px);
display:none;flex-direction:column;align-items:center;justify-content:center;
gap:10px;z-index:9999;
}
.l-ring{
width:26px;height:26px;
border:1.5px solid var(--color-border-secondary);
border-top-color:var(--color-text-primary);
border-radius:50%;
animation:sp .7s linear infinite;
}
@keyframes sp{to{transform:rotate(360deg)}}
.l-label{font-family:var(--font-mono);font-size:11.5px;color:var(--color-text-secondary)}
.error-banner{
margin:14px 18px 0;padding:10px 14px;
background:rgba(226,75,74,.08);border:0.5px solid rgba(226,75,74,.35);
border-radius:var(--border-radius-md);
color:#8a2423;font-size:12.5px;display:none;
}
/* tooltip */
.tip{
position:fixed;z-index:9998;
font-family:var(--font-mono);font-size:11px;
color:var(--color-background-primary);
background:var(--color-text-primary);
padding:4px 8px;border-radius:4px;
pointer-events:none;white-space:nowrap;
max-width:420px;overflow:hidden;text-overflow:ellipsis;
}
@media(max-width:840px){
.pr-body{grid-template-columns:1fr}
.pr-doc-pane{border-right:none;border-bottom:0.5px solid var(--color-border-tertiary);max-height:none}
.pr-side{max-height:none}
}
</style>
</head>
<body>
<!-- ============ UPLOAD VIEW ============ -->
<div id="upload-view">
<div class="u-card">
<div class="u-brand">
<svg width="20" height="20" viewBox="0 0 20 20" fill="none">
<rect x="0" y="0" width="20" height="20" rx="5" fill="currentColor"/>
<circle cx="8.5" cy="8.5" r="3.2" stroke="var(--color-background-primary)" stroke-width="1.4" fill="none"/>
<line x1="11.2" y1="11.2" x2="14.2" y2="14.2" stroke="var(--color-background-primary)" stroke-width="1.4" stroke-linecap="round"/>
</svg>
<span class="u-brand-name">PII Reveal<span class="sub">/ inspector</span></span>
</div>
<h1 class="u-title">Reveal what&rsquo;s hidden in your documents.</h1>
<p class="u-sub">Scan PDFs, DOC and DOCX files for names, accounts, secrets and seven other entity types.</p>
<div class="u-drop" id="dropzone">
<div class="u-drop-icon">
<svg width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round">
<path d="M12 3v13"/><path d="m6 9 6-6 6 6"/><path d="M4 17v2a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2v-2"/>
</svg>
</div>
<div class="u-drop-title">Drop a document, or click to browse</div>
<div class="u-drop-sub">pdf &middot; doc &middot; docx &middot; up to 128k tokens</div>
<input type="file" id="file-input" accept=".pdf,.doc,.docx">
</div>
<div class="u-meta">
<span>openai privacy filter</span>
<span>128k ctx</span>
<span>bfloat16</span>
<span>apache 2.0</span>
</div>
</div>
</div>
<!-- ============ RESULTS VIEW ============ -->
<div id="results-view">
<div class="pr-app" aria-label="PII Reveal inspector">
<div class="pr-top">
<div class="pr-logo">
<svg width="20" height="20" viewBox="0 0 20 20" fill="none" style="color: var(--color-text-primary);">
<rect x="0" y="0" width="20" height="20" rx="5" fill="currentColor"/>
<circle cx="8.5" cy="8.5" r="3.2" stroke="var(--color-background-primary)" stroke-width="1.4" fill="none"/>
<line x1="11.2" y1="11.2" x2="14.2" y2="14.2" stroke="var(--color-background-primary)" stroke-width="1.4" stroke-linecap="round"/>
</svg>
<span class="pr-name">PII Reveal<span class="pr-name-sub">/ inspector</span></span>
</div>
<span class="pr-file-chip" id="file-chip"></span>
<div class="pr-grow"></div>
<div class="pr-status" id="scan-status"><span class="pr-status-dot"></span>Scan complete</div>
<button class="pr-new" id="btn-new">new file</button>
</div>
<div class="error-banner" id="error-banner"></div>
<div class="pr-stats">
<div class="pr-stats-row">
<div>
<div class="pr-hero"><span id="hero-val">0</span><span class="pr-hero-pct">%</span></div>
<div class="pr-lab">PII content</div>
</div>
<div>
<div class="pr-num" id="num-spans">0</div>
<div class="pr-lab">Spans detected</div>
</div>
<div>
<div class="pr-num" id="num-cats">0 / 8</div>
<div class="pr-lab">Categories present</div>
</div>
<div>
<div class="pr-num" id="num-speakers">0</div>
<div class="pr-lab">Speakers identified</div>
</div>
</div>
<div class="pr-bar" id="dist-bar"></div>
<div class="pr-legend" id="legend"></div>
</div>
<div class="pr-body">
<div class="pr-doc-pane">
<div class="pr-doc-meta" id="doc-meta"></div>
<div class="pr-text" id="doc-text"></div>
</div>
<aside class="pr-side">
<div>
<div class="pr-side-h">Filter categories</div>
<div id="cat-list"></div>
</div>
<div id="speakers-block" style="display:none">
<div class="pr-side-h">Speakers</div>
<div class="pr-speakers" id="speakers-list"></div>
</div>
<div class="pr-acts">
<button class="pr-btn pr-btn-prim" id="act-redact">Redact and export <span class="pr-btn-arr">&rarr;</span></button>
<button class="pr-btn" id="act-copy">Copy sanitized</button>
<button class="pr-btn" id="act-report">Download report</button>
</div>
</aside>
</div>
</div>
</div>
<div id="loading">
<div class="l-ring"></div>
<div class="l-label">scanning document&hellip;</div>
</div>
<div class="tip" id="tip" style="display:none"></div>
<script>
/* ===== state ===== */
const S = {
text:'', spans:[], stats:{}, speakers:{}, catMeta:{}, filename:'',
activeCats:new Set(), scanMs:0, sortedSpans:[],
};
/* defaults (fallback when backend meta missing) */
const DEFAULT_META = {
private_person: {color:'#E24B4A', cls:'hp', label:'Person', mono:false},
private_date: {color:'#7F77DD', cls:'hd', label:'Date', mono:true},
private_address: {color:'#1D9E75', cls:'ha', label:'Address', mono:false},
private_email: {color:'#378ADD', cls:'he', label:'Email', mono:true},
account_number: {color:'#BA7517', cls:'hac', label:'Account', mono:true},
private_url: {color:'#D85A30', cls:'hu', label:'URL', mono:true},
secret: {color:'#D4537E', cls:'hs', label:'Secret', mono:true},
private_phone: {color:'#639922', cls:'hph', label:'Phone', mono:true},
};
const ORDER = ['private_person','private_address','private_email','private_phone',
'private_url','private_date','account_number','secret'];
const metaFor = c => ({...(DEFAULT_META[c]||{color:'#999',cls:'',label:c,mono:false}), ...(S.catMeta[c]||{})});
/* ===== upload flow ===== */
const dz = document.getElementById('dropzone');
const fi = document.getElementById('file-input');
['dragenter','dragover'].forEach(e => dz.addEventListener(e, ev => { ev.preventDefault(); dz.classList.add('dragover'); }));
['dragleave','drop'].forEach(e => dz.addEventListener(e, ev => { ev.preventDefault(); dz.classList.remove('dragover'); }));
dz.addEventListener('drop', ev => { if (ev.dataTransfer.files[0]) uploadFile(ev.dataTransfer.files[0]); });
fi.addEventListener('change', ev => { if (ev.target.files[0]) uploadFile(ev.target.files[0]); });
async function uploadFile(file){
const ext = file.name.split('.').pop().toLowerCase();
if (!['pdf','doc','docx'].includes(ext)) { showError('Unsupported file type.'); return; }
document.getElementById('loading').style.display='flex';
document.getElementById('upload-view').style.display='none';
const form = new FormData(); form.append('file', file);
const t0 = performance.now();
try{
const r = await fetch('/api/analyze', {method:'POST', body:form});
const d = await r.json();
if (d.error) { showError(d.error); return; }
S.scanMs = performance.now() - t0;
S.text = d.text; S.spans = d.spans; S.stats = d.stats;
S.speakers = d.speakers||{}; S.catMeta = d.categories_meta||{};
S.filename = d.filename;
S.activeCats = new Set(Object.keys(d.stats.categories));
S.sortedSpans = [...S.spans].sort((a,b) => a.start - b.start);
renderResults();
} catch(e){ showError('Analysis failed: '+e.message); }
finally { document.getElementById('loading').style.display='none'; }
}
function showError(m){
document.getElementById('loading').style.display='none';
document.getElementById('upload-view').style.display='flex';
document.getElementById('results-view').style.display='none';
alert(m);
}
function resetView(){
document.getElementById('results-view').style.display='none';
document.getElementById('upload-view').style.display='flex';
fi.value = '';
}
document.getElementById('btn-new').addEventListener('click', resetView);
/* ===== render ===== */
function renderResults(){
document.getElementById('results-view').style.display='block';
document.getElementById('file-chip').textContent = S.filename;
document.getElementById('scan-status').innerHTML =
`<span class="pr-status-dot"></span>Scan complete &middot; ${(S.scanMs/1000).toFixed(1)}s`;
renderStats();
renderBar();
renderLegend();
renderDocMeta();
renderDoc();
renderCats();
renderSpeakers();
}
function renderStats(){
const s = S.stats;
document.getElementById('hero-val').textContent = (s.pii_percentage ?? 0).toFixed(1);
document.getElementById('num-spans').textContent = s.total_spans;
document.getElementById('num-cats').textContent = `${s.num_categories} / 8`;
const n = Object.keys(S.speakers).length;
document.getElementById('num-speakers').textContent = n || 'β€”';
}
function renderBar(){
const bar = document.getElementById('dist-bar');
bar.innerHTML = '';
const cats = S.stats.categories;
const total = Object.values(cats).reduce((a,b) => a + b.chars, 0) || 1;
const ordered = ORDER.filter(c => cats[c]);
if (!ordered.length) {
const span = document.createElement('span');
span.style.cssText = 'flex:1;background:var(--color-border-tertiary);opacity:.4';
bar.appendChild(span); return;
}
for (const c of ordered) {
const m = metaFor(c);
const span = document.createElement('span');
span.style.background = m.color;
span.style.flex = cats[c].chars / total;
span.dataset.cat = c;
span.title = `${m.label} β€” ${cats[c].count} span${cats[c].count===1?'':'s'}`;
span.addEventListener('mouseenter', ev => showTip(ev, `${m.label} Β· ${cats[c].count}`));
span.addEventListener('mousemove', moveTip);
span.addEventListener('mouseleave', hideTip);
if (!S.activeCats.has(c)) span.style.opacity = '.25';
bar.appendChild(span);
}
}
function renderLegend(){
const leg = document.getElementById('legend');
leg.innerHTML = '';
const cats = S.stats.categories;
const ordered = ORDER.filter(c => cats[c]);
for (const c of ordered) {
const m = metaFor(c);
const el = document.createElement('span');
el.className = 'pr-leg' + (S.activeCats.has(c) ? '' : ' off');
el.dataset.cat = c;
el.innerHTML = `<span class="pr-leg-sw" style="background:${m.color}"></span>${m.label}<span class="pr-leg-ct">${cats[c].count}</span>`;
el.addEventListener('click', () => toggleCat(c));
leg.appendChild(el);
}
}
function renderDocMeta(){
const s = S.stats;
const meta = document.getElementById('doc-meta');
const parts = [
`${s.total_chars.toLocaleString()} characters`,
`${s.total_lines.toLocaleString()} lines`,
`scanned in ${(S.scanMs/1000).toFixed(1)}s`,
];
meta.innerHTML = parts.map(p => `<span>${p}</span>`).join('');
}
function esc(s){ const d=document.createElement('div'); d.textContent=s; return d.innerHTML; }
function renderDoc(){
const { text, sortedSpans, activeCats } = S;
const el = document.getElementById('doc-text');
let html = '', pos = 0;
for (const sp of sortedSpans) {
if (sp.start < pos) continue;
if (sp.start > pos) html += esc(text.substring(pos, sp.start));
const m = metaFor(sp.label);
const cls = ['h', m.cls];
if (m.mono) cls.push('m');
if (!activeCats.has(sp.label)) cls.push('off');
html += `<span class="${cls.join(' ')}" data-cat="${sp.label}">${esc(text.substring(sp.start, sp.end))}</span>`;
pos = sp.end;
}
if (pos < text.length) html += esc(text.substring(pos));
// preserve paragraph feel β€” serif font + white-space:pre-wrap handles this naturally
el.innerHTML = html;
// span tooltips
el.querySelectorAll('.h').forEach(span => {
const cat = span.dataset.cat, m = metaFor(cat);
span.addEventListener('mouseenter', ev => showTip(ev, `${m.label}: ${span.textContent.trim()}`));
span.addEventListener('mousemove', moveTip);
span.addEventListener('mouseleave', hideTip);
});
}
function renderCats(){
const box = document.getElementById('cat-list');
box.innerHTML = '';
const cats = S.stats.categories;
const ordered = ORDER.filter(c => cats[c]);
if (!ordered.length) { box.innerHTML = '<div class="empty-rail">No entities detected.</div>'; return; }
for (const c of ordered) {
const m = metaFor(c);
const el = document.createElement('div');
el.className = 'pr-cat' + (S.activeCats.has(c) ? '' : ' off');
el.dataset.cat = c;
el.innerHTML = `<span class="pr-cat-sw" style="background:${m.color}"></span><span class="pr-cat-nm">${m.label}</span><span class="pr-cat-ct">${cats[c].count}</span>`;
el.addEventListener('click', () => toggleCat(c));
box.appendChild(el);
}
}
function renderSpeakers(){
const names = Object.keys(S.speakers);
const block = document.getElementById('speakers-block');
const box = document.getElementById('speakers-list');
if (!names.length) { block.style.display = 'none'; return; }
block.style.display = 'block';
box.innerHTML = '';
for (const n of names) {
const el = document.createElement('div');
el.className = 'pr-cat';
el.innerHTML = `<span class="pr-cat-sw"></span><span class="pr-cat-nm">${esc(n)}</span><span class="pr-cat-ct">${S.speakers[n]}</span>`;
box.appendChild(el);
}
}
function toggleCat(c){
if (S.activeCats.has(c)) S.activeCats.delete(c);
else S.activeCats.add(c);
// targeted toggles β€” avoid full re-render to keep scroll position
document.querySelectorAll(`.pr-cat[data-cat="${c}"]`).forEach(el => el.classList.toggle('off', !S.activeCats.has(c)));
document.querySelectorAll(`.pr-leg[data-cat="${c}"]`).forEach(el => el.classList.toggle('off', !S.activeCats.has(c)));
document.querySelectorAll(`.h[data-cat="${c}"]`).forEach(el => el.classList.toggle('off', !S.activeCats.has(c)));
document.querySelectorAll(`.pr-bar span[data-cat="${c}"]`).forEach(el => el.style.opacity = S.activeCats.has(c) ? '1' : '.25');
}
/* tooltip */
function showTip(ev, text){ const t = document.getElementById('tip'); t.textContent = text; t.style.display = 'block'; moveTip(ev); }
function moveTip(ev){ const t = document.getElementById('tip'); t.style.left = (ev.clientX + 12) + 'px'; t.style.top = (ev.clientY - 26) + 'px'; }
function hideTip(){ document.getElementById('tip').style.display = 'none'; }
/* ===== actions ===== */
function sanitizedText(){
const parts = []; let pos = 0;
for (const sp of S.sortedSpans) {
if (sp.start < pos) continue;
if (sp.start > pos) parts.push(S.text.substring(pos, sp.start));
const m = metaFor(sp.label);
parts.push(S.activeCats.has(sp.label) ? `[${m.label.toUpperCase()}]` : S.text.substring(sp.start, sp.end));
pos = sp.end;
}
if (pos < S.text.length) parts.push(S.text.substring(pos));
return parts.join('');
}
function download(name, content, type){
const blob = new Blob([content], { type: type || 'text/plain' });
const a = document.createElement('a');
a.href = URL.createObjectURL(blob); a.download = name;
document.body.appendChild(a); a.click(); a.remove();
setTimeout(() => URL.revokeObjectURL(a.href), 1000);
}
function baseName(){
const f = S.filename || 'document';
const i = f.lastIndexOf('.');
return i > 0 ? f.slice(0, i) : f;
}
document.getElementById('act-redact').addEventListener('click', () => {
download(baseName() + '.redacted.txt', sanitizedText(), 'text/plain');
flash('act-redact', 'Exported &rarr;');
});
document.getElementById('act-copy').addEventListener('click', async () => {
try { await navigator.clipboard.writeText(sanitizedText()); flash('act-copy', 'Copied'); }
catch { flash('act-copy', 'Copy failed'); }
});
document.getElementById('act-report').addEventListener('click', () => {
const report = {
filename: S.filename,
scanned_in_ms: Math.round(S.scanMs),
stats: S.stats,
speakers: S.speakers,
active_categories: [...S.activeCats],
spans: S.spans,
};
download(baseName() + '.report.json', JSON.stringify(report, null, 2), 'application/json');
flash('act-report', 'Downloaded');
});
const _flashTimers = {};
function flash(id, msg){
const btn = document.getElementById(id);
const prev = btn.innerHTML;
btn.innerHTML = msg;
clearTimeout(_flashTimers[id]);
_flashTimers[id] = setTimeout(() => { btn.innerHTML = prev; }, 1300);
}
</script>
</body>
</html>"""
# ── launch ───────────────────────────────────────────────────────
if __name__ == "__main__":
server.launch(server_name="0.0.0.0", server_port=7860)