Spaces:
Running on Zero
Running on Zero
| """ | |
| PII Reveal - Document Privacy Explorer (v5) | |
| ============================================ | |
| Changes from v4 (feedback): | |
| 1. Light theme refresh | |
| Crisper, higher-contrast neutral palette: bright white cards on a very | |
| light grey body, near-black body text, and subdued text bumped one | |
| stop darker so labels stop disappearing. Category highlight alpha | |
| bumped from 12% to 16% on light, 15% on dark. | |
| 2. PDF redaction export | |
| New POST /api/redact-pdf endpoint. Accepts the original PDF plus the | |
| list of spans + active labels the client is viewing, applies true | |
| PyMuPDF redactions (black fill, underlying text removed β not just a | |
| visual overlay), and streams the result back. The inspector gets an | |
| "Export PDF" primary button when the input file is a PDF. | |
| 3. Performance | |
| Two code-level fixes for the 100k-token slowness on T4: | |
| a) predict_text: dropped the unbind(0) -> list -> stack(0) roundtrip | |
| in favour of a single torch.cat. It was allocating 100k separate | |
| 33-wide tensors and re-stacking them for no reason. | |
| b) Decoder.decode: the Viterbi loop is inherently sequential and | |
| launches O(seq_len) CUDA kernels β on Turing (T4, compute 7.5) | |
| kernel-launch overhead dominated because the state space is tiny | |
| (33 classes). It now runs on CPU, which is bandwidth-bound on a | |
| 33x33 matrix and completes in a couple of seconds for 100k tokens. | |
| Also cached the Decoder itself with lru_cache (was being rebuilt | |
| per request). | |
| Hardware note: T4 is pre-Ampere and has no native bf16 support, so | |
| every attention matmul is emulated. Code-level changes help the | |
| decoder, but the model's attention pass will still be faster on L4 / | |
| A10 / A100 by a large factor. The v5 fixes were validated against the | |
| "is it code or hardware" question: both, but the decoder path was the | |
| dominant contribution for a 100k document. | |
| """ | |
| # ββ stdlib βββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| import dataclasses | |
| import functools | |
| import io | |
| import json | |
| import math | |
| import os | |
| import re | |
| import tempfile | |
| from bisect import bisect_left, bisect_right | |
| from collections.abc import Sequence | |
| from dataclasses import dataclass | |
| from pathlib import Path | |
| from typing import Final | |
| # ββ third-party ββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| import gradio as gr | |
| import spaces | |
| import tiktoken | |
| import torch | |
| import torch.nn.functional as F | |
| from fastapi import File, Form, UploadFile | |
| from fastapi.responses import HTMLResponse, JSONResponse, StreamingResponse | |
| from huggingface_hub import snapshot_download | |
| from safetensors import safe_open | |
| # ββ configuration ββββββββββββββββββββββββββββββββββββββββββββββββ | |
| MODEL_REPO = os.getenv("MODEL_ID", "charles-first-org/second-model") | |
| HF_TOKEN = os.getenv("HF_TOKEN", None) | |
| MODEL_DIR = Path(snapshot_download(MODEL_REPO, token=HF_TOKEN)) | |
| CATEGORIES_META = { | |
| "private_person": {"color": "#E24B4A", "cls": "hp", "label": "Person", "mono": False}, | |
| "private_date": {"color": "#7F77DD", "cls": "hd", "label": "Date", "mono": True}, | |
| "private_address": {"color": "#1D9E75", "cls": "ha", "label": "Address", "mono": False}, | |
| "private_email": {"color": "#378ADD", "cls": "he", "label": "Email", "mono": True}, | |
| "account_number": {"color": "#BA7517", "cls": "hac", "label": "Account", "mono": True}, | |
| "private_url": {"color": "#D85A30", "cls": "hu", "label": "URL", "mono": True}, | |
| "secret": {"color": "#D4537E", "cls": "hs", "label": "Secret", "mono": True}, | |
| "private_phone": {"color": "#639922", "cls": "hph", "label": "Phone", "mono": True}, | |
| } | |
| # ===================================================================== | |
| # MODEL ARCHITECTURE + INFERENCE (from reference implementation) | |
| # ===================================================================== | |
| PRIVACY_FILTER_MODEL_TYPE: Final[str] = "privacy_filter" | |
| REQUIRED_MODEL_CONFIG_KEYS: Final[tuple[str, ...]] = ( | |
| "model_type", "encoding", "num_hidden_layers", "num_experts", | |
| "experts_per_token", "vocab_size", "num_labels", "hidden_size", | |
| "intermediate_size", "head_dim", "num_attention_heads", | |
| "num_key_value_heads", "sliding_window", "bidirectional_context", | |
| "bidirectional_left_context", "bidirectional_right_context", | |
| "default_n_ctx", "initial_context_length", "rope_theta", | |
| "rope_scaling_factor", "rope_ntk_alpha", "rope_ntk_beta", "param_dtype", | |
| ) | |
| BACKGROUND_CLASS_LABEL: Final[str] = "O" | |
| BOUNDARY_PREFIXES: Final[tuple[str, ...]] = ("B", "I", "E", "S") | |
| SPAN_CLASS_NAMES: Final[tuple[str, ...]] = ( | |
| BACKGROUND_CLASS_LABEL, | |
| "account_number", "private_address", "private_date", "private_email", | |
| "private_person", "private_phone", "private_url", "secret", | |
| ) | |
| NER_CLASS_NAMES: Final[tuple[str, ...]] = (BACKGROUND_CLASS_LABEL,) + tuple( | |
| f"{prefix}-{base}" | |
| for base in SPAN_CLASS_NAMES if base != BACKGROUND_CLASS_LABEL | |
| for prefix in BOUNDARY_PREFIXES | |
| ) | |
| VITERBI_TRANSITION_BIAS_KEYS: Final[tuple[str, ...]] = ( | |
| "transition_bias_background_stay", "transition_bias_background_to_start", | |
| "transition_bias_inside_to_continue", "transition_bias_inside_to_end", | |
| "transition_bias_end_to_background", "transition_bias_end_to_start", | |
| ) | |
| DEFAULT_VITERBI_CALIBRATION_PRESET: Final[str] = "default" | |
| def validate_model_config_contract(cfg: dict, *, context: str) -> None: | |
| missing = [k for k in REQUIRED_MODEL_CONFIG_KEYS if k not in cfg] | |
| if missing: | |
| raise ValueError(f"{context} missing keys: {', '.join(missing)}") | |
| if cfg.get("model_type") != PRIVACY_FILTER_MODEL_TYPE: | |
| raise ValueError(f"{context} model_type must be {PRIVACY_FILTER_MODEL_TYPE!r}") | |
| if cfg.get("bidirectional_context") is not True: | |
| raise ValueError(f"{context} must use bidirectional_context=true") | |
| lc, rc = cfg.get("bidirectional_left_context"), cfg.get("bidirectional_right_context") | |
| if not isinstance(lc, int) or not isinstance(rc, int) or lc != rc or lc < 0: | |
| raise ValueError(f"{context} bidirectional context must be equal non-negative ints") | |
| sw = cfg.get("sliding_window") | |
| if sw != 2 * lc + 1: | |
| raise ValueError(f"{context} sliding_window must equal 2*context+1") | |
| if cfg["num_labels"] != 33: | |
| raise ValueError(f"{context} num_labels must be 33") | |
| if cfg["param_dtype"] != "bfloat16": | |
| raise ValueError(f"{context} param_dtype must be bfloat16") | |
| # ββ model helpers ββββββββββββββββββββββββββββββββββββββββββββββββ | |
| def expert_linear(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor | None) -> torch.Tensor: | |
| n, e, k = x.shape | |
| _, _, _, o = weight.shape | |
| out = torch.bmm(x.reshape(n * e, 1, k), weight.reshape(n * e, k, o)).reshape(n, e, o) | |
| return out + bias if bias is not None else out | |
| class ModelConfig: | |
| num_hidden_layers: int; num_experts: int; experts_per_token: int | |
| vocab_size: int; num_labels: int; hidden_size: int; intermediate_size: int | |
| head_dim: int; num_attention_heads: int; num_key_value_heads: int | |
| bidirectional_context_size: int; initial_context_length: int | |
| rope_theta: float; rope_scaling_factor: float; rope_ntk_alpha: float; rope_ntk_beta: float | |
| def from_checkpoint_config(cls, cfg: dict, *, context: str) -> "ModelConfig": | |
| cfg = dict(cfg) | |
| cfg["bidirectional_context_size"] = cfg["bidirectional_left_context"] | |
| fields = {f.name for f in dataclasses.fields(cls)} | |
| return cls(**{k: v for k, v in cfg.items() if k in fields}) | |
| class RMSNorm(torch.nn.Module): | |
| def __init__(self, n: int, eps: float = 1e-5, device=None): | |
| super().__init__() | |
| self.eps = eps | |
| self.scale = torch.nn.Parameter(torch.ones(n, device=device, dtype=torch.float32)) | |
| def forward(self, x): | |
| t = x.float() | |
| return (t * torch.rsqrt(t.pow(2).mean(-1, keepdim=True) + self.eps) * self.scale).to(x.dtype) | |
| def apply_rope(x, cos, sin): | |
| cos = cos.unsqueeze(-2).to(x.dtype); sin = sin.unsqueeze(-2).to(x.dtype) | |
| x1, x2 = x[..., ::2], x[..., 1::2] | |
| return torch.stack((x1 * cos - x2 * sin, x2 * cos + x1 * sin), dim=-1).reshape(x.shape) | |
| class RotaryEmbedding(torch.nn.Module): | |
| def __init__(self, head_dim, base, dtype, *, initial_context_length=4096, | |
| scaling_factor=1.0, ntk_alpha=1.0, ntk_beta=32.0, device=None): | |
| super().__init__() | |
| self.head_dim, self.base, self.dtype = head_dim, base, dtype | |
| self.initial_context_length = initial_context_length | |
| self.scaling_factor, self.ntk_alpha, self.ntk_beta = scaling_factor, ntk_alpha, ntk_beta | |
| self.device = device | |
| mp = max(int(initial_context_length * scaling_factor), initial_context_length) | |
| self.max_position_embeddings = mp | |
| cos, sin = self._compute(mp, device=torch.device("cpu")) | |
| target = device or torch.device("cpu") | |
| self.register_buffer("cos_cache", cos.to(target), persistent=False) | |
| self.register_buffer("sin_cache", sin.to(target), persistent=False) | |
| def _inv_freq(self, device=None): | |
| device = device or self.device | |
| freq = self.base ** (torch.arange(0, self.head_dim, 2, dtype=torch.float, device=device) / self.head_dim) | |
| if self.scaling_factor > 1.0: | |
| d_half = self.head_dim / 2 | |
| low = d_half * math.log(self.initial_context_length / (self.ntk_beta * 2 * math.pi)) / math.log(self.base) | |
| high = d_half * math.log(self.initial_context_length / (self.ntk_alpha * 2 * math.pi)) / math.log(self.base) | |
| interp = 1.0 / (self.scaling_factor * freq) | |
| extrap = 1.0 / freq | |
| ramp = (torch.arange(d_half, dtype=torch.float32, device=device) - low) / (high - low) | |
| mask = 1 - ramp.clamp(0, 1) | |
| return interp * (1 - mask) + extrap * mask | |
| return 1.0 / freq | |
| def _compute(self, n, device=None): | |
| inv_freq = self._inv_freq(device) | |
| t = torch.arange(n, dtype=torch.float32, device=device or self.device) | |
| freqs = torch.einsum("i,j->ij", t, inv_freq) | |
| c = 0.1 * math.log(self.scaling_factor) + 1.0 if self.scaling_factor > 1.0 else 1.0 | |
| return (freqs.cos() * c).to(self.dtype), (freqs.sin() * c).to(self.dtype) | |
| def forward(self, q, k): | |
| n = q.shape[0] | |
| if n > self.cos_cache.shape[0]: | |
| cos, sin = self._compute(n, torch.device("cpu")) | |
| self.cos_cache, self.sin_cache = cos.to(q.device), sin.to(q.device) | |
| cc = self.cos_cache.to(q.device) if self.cos_cache.device != q.device else self.cos_cache | |
| sc = self.sin_cache.to(q.device) if self.sin_cache.device != q.device else self.sin_cache | |
| cos, sin = cc[:n], sc[:n] | |
| q = apply_rope(q.view(n, -1, self.head_dim), cos, sin).reshape(q.shape) | |
| k = apply_rope(k.view(n, -1, self.head_dim), cos, sin).reshape(k.shape) | |
| return q, k | |
| def sdpa(Q, K, V, S, sm_scale, ctx): | |
| n, nh, qm, hd = Q.shape | |
| w = 2 * ctx + 1 | |
| Kp = F.pad(K, (0, 0, 0, 0, ctx, ctx)); Vp = F.pad(V, (0, 0, 0, 0, ctx, ctx)) | |
| Kw = Kp.unfold(0, w, 1).permute(0, 3, 1, 2); Vw = Vp.unfold(0, w, 1).permute(0, 3, 1, 2) | |
| idx = torch.arange(w, device=Q.device) - ctx | |
| pos = torch.arange(n, device=Q.device)[:, None] + idx[None, :] | |
| valid = (pos >= 0) & (pos < n) | |
| scores = torch.einsum("nhqd,nwhd->nhqw", Q, Kw).float() * sm_scale | |
| scores = scores.masked_fill(~valid[:, None, None, :], -float("inf")) | |
| sink = (S * math.log(2.0)).reshape(nh, qm)[None, :, :, None].expand(n, -1, -1, 1) | |
| scores = torch.cat([scores, sink], dim=-1) | |
| wt = torch.softmax(scores, dim=-1)[..., :-1].to(V.dtype) | |
| return torch.einsum("nhqw,nwhd->nhqd", wt, Vw).reshape(n, -1) | |
| class AttentionBlock(torch.nn.Module): | |
| def __init__(self, cfg: ModelConfig, device=None): | |
| super().__init__() | |
| dt = torch.bfloat16 | |
| self.head_dim, self.nah, self.nkv = cfg.head_dim, cfg.num_attention_heads, cfg.num_key_value_heads | |
| self.ctx = int(cfg.bidirectional_context_size) | |
| self.sinks = torch.nn.Parameter(torch.empty(cfg.num_attention_heads, device=device, dtype=torch.float32)) | |
| self.norm = RMSNorm(cfg.hidden_size, device=device) | |
| qkv_d = cfg.head_dim * (cfg.num_attention_heads + 2 * cfg.num_key_value_heads) | |
| self.qkv = torch.nn.Linear(cfg.hidden_size, qkv_d, device=device, dtype=dt) | |
| self.out = torch.nn.Linear(cfg.head_dim * cfg.num_attention_heads, cfg.hidden_size, device=device, dtype=dt) | |
| self.qk_scale = 1 / math.sqrt(math.sqrt(cfg.head_dim)) | |
| self.rope = RotaryEmbedding(cfg.head_dim, int(cfg.rope_theta), torch.float32, | |
| initial_context_length=cfg.initial_context_length, | |
| scaling_factor=cfg.rope_scaling_factor, | |
| ntk_alpha=cfg.rope_ntk_alpha, ntk_beta=cfg.rope_ntk_beta, device=device) | |
| def forward(self, x): | |
| t = self.norm(x).to(self.qkv.weight.dtype) | |
| qkv = F.linear(t, self.qkv.weight, self.qkv.bias) | |
| hd, nah, nkv = self.head_dim, self.nah, self.nkv | |
| q = qkv[:, :nah * hd].contiguous() | |
| k = qkv[:, nah * hd:(nah + nkv) * hd].contiguous() | |
| v = qkv[:, (nah + nkv) * hd:(nah + 2 * nkv) * hd].contiguous() | |
| q, k = self.rope(q, k) | |
| q, k = q * self.qk_scale, k * self.qk_scale | |
| n = q.shape[0] | |
| q = q.view(n, nkv, nah // nkv, hd); k = k.view(n, nkv, hd); v = v.view(n, nkv, hd) | |
| ao = sdpa(q, k, v, self.sinks, 1.0, self.ctx).to(self.out.weight.dtype) | |
| return x + F.linear(ao, self.out.weight, self.out.bias).to(x.dtype) | |
| def swiglu(x, alpha=1.702, limit=7.0): | |
| g, l = x.chunk(2, dim=-1) | |
| g, l = g.clamp(max=limit), l.clamp(-limit, limit) | |
| return g * torch.sigmoid(alpha * g) * (l + 1) | |
| class MLPBlock(torch.nn.Module): | |
| def __init__(self, cfg: ModelConfig, device=None): | |
| super().__init__() | |
| dt = torch.bfloat16 | |
| self.ne, self.ept = cfg.num_experts, cfg.experts_per_token | |
| self.norm = RMSNorm(cfg.hidden_size, device=device) | |
| self.gate = torch.nn.Linear(cfg.hidden_size, cfg.num_experts, device=device, dtype=dt) | |
| self.mlp1_weight = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.hidden_size, cfg.intermediate_size * 2, device=device, dtype=dt)) | |
| self.mlp1_bias = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.intermediate_size * 2, device=device, dtype=dt)) | |
| self.mlp2_weight = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.intermediate_size, cfg.hidden_size, device=device, dtype=dt)) | |
| self.mlp2_bias = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.hidden_size, device=device, dtype=dt)) | |
| def forward(self, x): | |
| t = self.norm(x) | |
| gs = F.linear(t.float(), self.gate.weight.float(), self.gate.bias.float()) | |
| top = torch.topk(gs, k=self.ept, dim=-1, sorted=True) | |
| ew = torch.softmax(top.values, dim=-1) / self.ept | |
| ei = top.indices | |
| ept = self.ept | |
| def _chunk(tc, eic, ewc): | |
| o = expert_linear(tc.float().unsqueeze(1).expand(-1, eic.shape[1], -1), | |
| self.mlp1_weight[eic].float(), self.mlp1_bias[eic].float()) | |
| o = swiglu(o) | |
| o = expert_linear(o.float(), self.mlp2_weight[eic].float(), self.mlp2_bias[eic].float()) | |
| return (torch.einsum("bec,be->bc", o.to(ewc.dtype), ewc) * ept).to(x.dtype) | |
| cs = 32 | |
| if t.shape[0] > cs: | |
| parts = [_chunk(t[s:s+cs], ei[s:s+cs], ew[s:s+cs]) for s in range(0, t.shape[0], cs)] | |
| return x + torch.cat(parts, 0) | |
| return x + _chunk(t, ei, ew) | |
| class TransformerBlock(torch.nn.Module): | |
| def __init__(self, cfg, device=None): | |
| super().__init__() | |
| self.attn = AttentionBlock(cfg, device=device) | |
| self.mlp = MLPBlock(cfg, device=device) | |
| def forward(self, x): | |
| return self.mlp(self.attn(x)) | |
| class Checkpoint: | |
| def build_param_name_map(n): | |
| return ({f"block.{i}.mlp.mlp1_bias": f"block.{i}.mlp.swiglu.bias" for i in range(n)} | |
| | {f"block.{i}.mlp.mlp1_weight": f"block.{i}.mlp.swiglu.weight" for i in range(n)} | |
| | {f"block.{i}.mlp.mlp2_bias": f"block.{i}.mlp.out.bias" for i in range(n)} | |
| | {f"block.{i}.mlp.mlp2_weight": f"block.{i}.mlp.out.weight" for i in range(n)}) | |
| def __init__(self, path, device, num_hidden_layers): | |
| self.pnm = self.build_param_name_map(num_hidden_layers) | |
| self.ds = device.type if device.index is None else f"{device.type}:{device.index}" | |
| files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(".safetensors")] | |
| self.map = {} | |
| for sf in files: | |
| with safe_open(sf, framework="pt", device=self.ds) as h: | |
| for k in h.keys(): | |
| self.map[k] = sf | |
| def get(self, name): | |
| mapped = self.pnm.get(name, name) | |
| with safe_open(self.map[mapped], framework="pt", device=self.ds) as h: | |
| return h.get_tensor(mapped) | |
| class Transformer(torch.nn.Module): | |
| def __init__(self, cfg, device): | |
| super().__init__() | |
| dt = torch.bfloat16 | |
| self.embedding = torch.nn.Embedding(cfg.vocab_size, cfg.hidden_size, device=device, dtype=dt) | |
| self.block = torch.nn.ModuleList([TransformerBlock(cfg, device=device) for _ in range(cfg.num_hidden_layers)]) | |
| self.norm = RMSNorm(cfg.hidden_size, device=device) | |
| self.unembedding = torch.nn.Linear(cfg.hidden_size, cfg.num_labels, bias=False, device=device, dtype=dt) | |
| def forward(self, token_ids): | |
| x = self.embedding(token_ids) | |
| for blk in self.block: | |
| x = blk(x) | |
| return F.linear(self.norm(x), self.unembedding.weight, None) | |
| def from_checkpoint(cls, checkpoint_dir, *, device): | |
| torch.backends.cuda.matmul.allow_tf32 = False | |
| torch.backends.cudnn.allow_tf32 = False | |
| torch.set_float32_matmul_precision("highest") | |
| cp = json.loads((Path(checkpoint_dir) / "config.json").read_text()) | |
| validate_model_config_contract(cp, context=str(checkpoint_dir)) | |
| cfg = ModelConfig.from_checkpoint_config(cp, context=str(checkpoint_dir)) | |
| ckpt = Checkpoint(checkpoint_dir, device, cfg.num_hidden_layers) | |
| m = cls(cfg, device); m.eval() | |
| for name, param in m.named_parameters(): | |
| loaded = ckpt.get(name) | |
| if param.shape != loaded.shape: | |
| raise ValueError(f"Shape mismatch {name}: {param.shape} vs {loaded.shape}") | |
| param.data.copy_(loaded) | |
| return m | |
| # ββ label info + span decoding βββββββββββββββββββββββββββββββββββ | |
| class LabelInfo: | |
| boundary_label_lookup: dict[str, dict[str, int]] | |
| token_to_span_label: dict[int, int] | |
| token_boundary_tags: dict[int, str | None] | |
| span_class_names: tuple[str, ...] | |
| span_label_lookup: dict[str, int] | |
| background_token_label: int | |
| background_span_label: int | |
| def labels_to_spans(labels_by_index, label_info): | |
| spans, cur_label, start_idx, prev_idx = [], None, None, None | |
| bg = label_info.background_span_label | |
| for ti in sorted(labels_by_index): | |
| lid = labels_by_index[ti] | |
| sl = label_info.token_to_span_label.get(lid) | |
| bt = label_info.token_boundary_tags.get(lid) | |
| if prev_idx is not None and ti != prev_idx + 1: | |
| if cur_label is not None and start_idx is not None: | |
| spans.append((cur_label, start_idx, prev_idx + 1)) | |
| cur_label = start_idx = None | |
| if sl is None: | |
| prev_idx = ti; continue | |
| if sl == bg: | |
| if cur_label is not None and start_idx is not None: | |
| spans.append((cur_label, start_idx, ti)) | |
| cur_label = start_idx = None; prev_idx = ti; continue | |
| if bt == "S": | |
| if cur_label is not None and start_idx is not None and prev_idx is not None: | |
| spans.append((cur_label, start_idx, prev_idx + 1)) | |
| spans.append((sl, ti, ti + 1)); cur_label = start_idx = None | |
| elif bt == "B": | |
| if cur_label is not None and start_idx is not None and prev_idx is not None: | |
| spans.append((cur_label, start_idx, prev_idx + 1)) | |
| cur_label, start_idx = sl, ti | |
| elif bt == "I": | |
| if cur_label is None or cur_label != sl: | |
| if cur_label is not None and start_idx is not None and prev_idx is not None: | |
| spans.append((cur_label, start_idx, prev_idx + 1)) | |
| cur_label, start_idx = sl, ti | |
| elif bt == "E": | |
| if cur_label is None or cur_label != sl or start_idx is None: | |
| if cur_label is not None and start_idx is not None and prev_idx is not None: | |
| spans.append((cur_label, start_idx, prev_idx + 1)) | |
| spans.append((sl, ti, ti + 1)); cur_label = start_idx = None | |
| else: | |
| spans.append((cur_label, start_idx, ti + 1)); cur_label = start_idx = None | |
| else: | |
| if cur_label is not None and start_idx is not None and prev_idx is not None: | |
| spans.append((cur_label, start_idx, prev_idx + 1)) | |
| cur_label = start_idx = None | |
| prev_idx = ti | |
| if cur_label is not None and start_idx is not None and prev_idx is not None: | |
| spans.append((cur_label, start_idx, prev_idx + 1)) | |
| return spans | |
| def token_spans_to_char_spans(spans, cs, ce): | |
| out = [] | |
| for li, ts, te in spans: | |
| if not (0 <= ts < te <= len(cs)): | |
| continue | |
| s, e = cs[ts], ce[te - 1] | |
| if e > s: | |
| out.append((li, s, e)) | |
| return out | |
| def trim_char_spans_whitespace(spans, text): | |
| out = [] | |
| for li, s, e in spans: | |
| if not (0 <= s < e <= len(text)): | |
| continue | |
| while s < e and text[s].isspace(): s += 1 | |
| while e > s and text[e - 1].isspace(): e -= 1 | |
| if e > s: | |
| out.append((li, s, e)) | |
| return out | |
| # ββ viterbi decoder ββββββββββββββββββββββββββββββββββββββββββββββ | |
| def get_viterbi_transition_biases(): | |
| cp = MODEL_DIR / "viterbi_calibration.json" | |
| default = {k: 0.0 for k in VITERBI_TRANSITION_BIAS_KEYS} | |
| if not cp.is_file(): | |
| return default | |
| payload = json.loads(cp.read_text()) | |
| raw = payload | |
| ops = payload.get("operating_points") | |
| if isinstance(ops, dict): | |
| preset = ops.get(DEFAULT_VITERBI_CALIBRATION_PRESET) | |
| if isinstance(preset, dict): | |
| raw = preset.get("biases", raw) | |
| if not isinstance(raw, dict): | |
| return default | |
| return {k: float(raw.get(k, 0.0)) for k in VITERBI_TRANSITION_BIAS_KEYS} | |
| class Decoder: | |
| def __init__(self, label_info): | |
| nc = len(label_info.token_to_span_label) | |
| self._start = torch.full((nc,), -1e9, dtype=torch.float32) | |
| self._end = torch.full((nc,), -1e9, dtype=torch.float32) | |
| self._trans = torch.full((nc, nc), -1e9, dtype=torch.float32) | |
| biases = get_viterbi_transition_biases() | |
| bg_tok, bg_sp = label_info.background_token_label, label_info.background_span_label | |
| ttsl, tbt = label_info.token_to_span_label, label_info.token_boundary_tags | |
| for i in range(nc): | |
| tag, sl = tbt.get(i), ttsl.get(i) | |
| if tag in {"B", "S"} or i == bg_tok: self._start[i] = 0.0 | |
| if tag in {"E", "S"} or i == bg_tok: self._end[i] = 0.0 | |
| for j in range(nc): | |
| nt, ns = tbt.get(j), ttsl.get(j) | |
| if self._valid(tag, sl, nt, ns, bg_tok, bg_sp, j): | |
| self._trans[i, j] = self._bias(tag, sl, nt, ns, bg_sp, biases) | |
| def _valid(pt, ps, nt, ns, bti, bsi, ni): | |
| nb = ns == bsi or ni == bti | |
| if (ns is None or nt is None) and not nb: return False | |
| if pt is None or ps is None: return nb or nt in {"B", "S"} | |
| if ps == bsi or pt in {"E", "S"}: return nb or nt in {"B", "S"} | |
| if pt in {"B", "I"}: return ps == ns and nt in {"I", "E"} | |
| return False | |
| def _bias(pt, ps, nt, ns, bsi, b): | |
| nb, pb = ns == bsi, ps == bsi | |
| if pb: return b["transition_bias_background_stay"] if nb else b["transition_bias_background_to_start"] | |
| if pt in {"B", "I"}: return b["transition_bias_inside_to_continue"] if nt == "I" else b["transition_bias_inside_to_end"] | |
| return b["transition_bias_end_to_background"] if nb else b["transition_bias_end_to_start"] | |
| def decode(self, lp): | |
| # Sequential Viterbi over a tiny (33-class) state space. On T4 the | |
| # per-step CUDA kernel launches dominated runtime for 100k+ tokens, | |
| # so run on CPU unconditionally β it's bandwidth-bound on 33x33 and | |
| # avoids one CUDA sync per timestep. | |
| if lp.is_cuda: | |
| lp = lp.to("cpu", dtype=torch.float32, non_blocking=True) | |
| else: | |
| lp = lp.to(dtype=torch.float32) | |
| sl, nc = lp.shape | |
| if sl == 0: return [] | |
| st, en, tr = self._start, self._end, self._trans # already CPU/fp32 | |
| scores = lp[0] + st | |
| bp = torch.empty((sl - 1, nc), dtype=torch.int64) | |
| for i in range(1, sl): | |
| t = scores.unsqueeze(1) + tr | |
| bs, bi = t.max(dim=0) | |
| scores = bs + lp[i]; bp[i - 1] = bi | |
| if not torch.isfinite(scores).any(): return lp.argmax(dim=1).tolist() | |
| scores = scores + en | |
| path = torch.empty(sl, dtype=torch.int64) | |
| path[-1] = scores.argmax() | |
| for i in range(sl - 2, -1, -1): path[i] = bp[i, path[i + 1]] | |
| return path.tolist() | |
| # ββ runtime singleton ββββββββββββββββββββββββββββββββββββββββββββ | |
| class InferenceRuntime: | |
| model: Transformer; encoding: tiktoken.Encoding; label_info: LabelInfo | |
| device: torch.device; n_ctx: int | |
| def get_runtime(): | |
| cp = MODEL_DIR | |
| cfg = json.loads((cp / "config.json").read_text()) | |
| validate_model_config_contract(cfg, context=str(cp)) | |
| device = torch.device("cuda") | |
| encoding = tiktoken.get_encoding(str(cfg["encoding"]).strip()) | |
| scn = [BACKGROUND_CLASS_LABEL]; sll = {BACKGROUND_CLASS_LABEL: 0} | |
| bll, ttsl, tbt = {}, {}, {} | |
| bg_idx = None | |
| for idx, name in enumerate(NER_CLASS_NAMES): | |
| if name == BACKGROUND_CLASS_LABEL: | |
| bg_idx = idx; ttsl[idx] = 0; tbt[idx] = None; continue | |
| bnd, base = name.split("-", 1) | |
| si = sll.get(base) | |
| if si is None: | |
| si = len(scn); scn.append(base); sll[base] = si | |
| ttsl[idx] = si; tbt[idx] = bnd | |
| bll.setdefault(base, {})[bnd] = idx | |
| li = LabelInfo(bll, ttsl, tbt, tuple(scn), sll, bg_idx, 0) | |
| m = Transformer.from_checkpoint(str(cp), device=device) | |
| return InferenceRuntime(m, encoding, li, device, int(cfg["default_n_ctx"])) | |
| def get_decoder(): | |
| return Decoder(label_info=get_runtime().label_info) | |
| def predict_text(runtime, text, decoder): | |
| tids = tuple(int(t) for t in runtime.encoding.encode(text, allowed_special="all")) | |
| if not tids: return text, [] | |
| # Run the model per-chunk and concat once. The v4 code built a Python | |
| # list via `.unbind(0)` and then rebuilt the same tensor via stack β a | |
| # no-op that paid 100k small allocations on long inputs. | |
| chunks = [] | |
| for s in range(0, len(tids), runtime.n_ctx): | |
| e = min(s + runtime.n_ctx, len(tids)) | |
| wt = torch.tensor(tids[s:e], device=runtime.device, dtype=torch.int32) | |
| lp = F.log_softmax(runtime.model(wt).float(), dim=-1) | |
| chunks.append(lp) | |
| stacked = chunks[0] if len(chunks) == 1 else torch.cat(chunks, dim=0) | |
| dl = decoder.decode(stacked) # Decoder pulls to CPU internally | |
| if len(dl) != len(tids): dl = stacked.argmax(dim=1).tolist() | |
| pli = {i: int(l) for i, l in enumerate(dl)} | |
| pts = labels_to_spans(pli, runtime.label_info) | |
| tb = [runtime.encoding.decode_single_token_bytes(t) for t in tids] | |
| dt = b"".join(tb).decode("utf-8", errors="replace") | |
| cbs, cbe = [], [] | |
| bc = 0 | |
| for ch in dt: cbs.append(bc); bc += len(ch.encode("utf-8")); cbe.append(bc) | |
| cs, ce = [], [] | |
| tbc = 0 | |
| for rb in tb: | |
| tbs = tbc; tbe = tbs + len(rb); tbc = tbe | |
| cs.append(bisect_right(cbe, tbs)); ce.append(bisect_left(cbs, tbe)) | |
| pcs = token_spans_to_char_spans(pts, cs, ce) | |
| pcs = trim_char_spans_whitespace(pcs, dt if dt != text else text) | |
| src = dt if dt != text else text | |
| detected = [] | |
| for li, s, e in pcs: | |
| if 0 <= li < len(runtime.label_info.span_class_names): | |
| lbl = runtime.label_info.span_class_names[li] | |
| else: | |
| lbl = f"label_{li}" | |
| detected.append({"label": lbl, "start": s, "end": e, "text": src[s:e]}) | |
| return src, detected | |
| # ===================================================================== | |
| # APPLICATION LAYER | |
| # ===================================================================== | |
| def extract_text(file_path: str) -> str: | |
| suffix = Path(file_path).suffix.lower() | |
| if suffix == ".pdf": | |
| import fitz | |
| doc = fitz.open(file_path) | |
| pages = [page.get_text() for page in doc] | |
| doc.close() | |
| return "\n\n".join(pages) | |
| elif suffix in (".docx", ".doc"): | |
| from docx import Document | |
| doc = Document(file_path) | |
| return "\n\n".join(p.text for p in doc.paragraphs if p.text.strip()) | |
| raise ValueError(f"Unsupported file type: {suffix}") | |
| def compute_stats(text, spans): | |
| total = len(text) | |
| pii_chars = sum(s["end"] - s["start"] for s in spans) | |
| by_cat = {} | |
| for s in spans: | |
| c = s["label"] | |
| by_cat.setdefault(c, {"count": 0, "chars": 0}) | |
| by_cat[c]["count"] += 1; by_cat[c]["chars"] += s["end"] - s["start"] | |
| return { | |
| "total_chars": total, "pii_chars": pii_chars, | |
| "pii_percentage": round(pii_chars / total * 100, 1) if total else 0, | |
| "total_spans": len(spans), "categories": by_cat, "num_categories": len(by_cat), | |
| "total_lines": text.count("\n") + 1 if total else 0, | |
| } | |
| def detect_speakers(text, spans): | |
| patterns = [r"^([A-Z][a-zA-Z ]{1,30}):\s", r"^\[([^\]]{1,30})\]\s", r"^(Speaker\s*\d+):\s"] | |
| line_sp, pos, cur = [], 0, None | |
| for line in text.split("\n"): | |
| for p in patterns: | |
| m = re.match(p, line) | |
| if m: cur = m.group(1).strip(); break | |
| line_sp.append((pos, pos + len(line), cur)); pos += len(line) + 1 | |
| result = {} | |
| for span in spans: | |
| mid = (span["start"] + span["end"]) // 2 | |
| speaker = "Document" | |
| for ls, le, sp in line_sp: | |
| if ls <= mid <= le and sp: speaker = sp; break | |
| result[speaker] = result.get(speaker, 0) + 1 | |
| return {} if list(result.keys()) == ["Document"] else result | |
| def run_pii_analysis(text: str): | |
| """GPU-accelerated PII detection.""" | |
| runtime = get_runtime() | |
| decoder = get_decoder() # cached, not rebuilt per request | |
| source_text, detected = predict_text(runtime, text, decoder) | |
| return source_text, detected | |
| def build_redacted_pdf_bytes(pdf_path: str, pii_texts: list[str]) -> bytes: | |
| """ | |
| True PyMuPDF redaction: draws a black fill rectangle over the target | |
| text AND removes the underlying text stream. Longer strings are | |
| redacted first so fuller matches win over their substrings. | |
| """ | |
| import fitz | |
| # Longest first: "Dr. Margaret Holloway" before "Margaret" | |
| ordered = sorted({t.strip() for t in pii_texts if t and len(t.strip()) >= 2}, | |
| key=len, reverse=True) | |
| doc = fitz.open(pdf_path) | |
| try: | |
| for page in doc: | |
| for needle in ordered: | |
| for rect in page.search_for(needle): | |
| page.add_redact_annot(rect, fill=(0, 0, 0)) | |
| page.apply_redactions() | |
| buf = io.BytesIO() | |
| doc.save(buf, garbage=4, deflate=True) | |
| return buf.getvalue() | |
| finally: | |
| doc.close() | |
| # ββ Gradio Server ββββββββββββββββββββββββββββββββββββββββββββββββ | |
| server = gr.Server() | |
| async def homepage(): | |
| return FRONTEND_HTML | |
| async def analyze_document(file: UploadFile = File(...)): | |
| suffix = Path(file.filename).suffix.lower() | |
| if suffix not in (".pdf", ".doc", ".docx"): | |
| return JSONResponse({"error": f"Unsupported: {suffix}. Use PDF, DOC, or DOCX."}, 400) | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp: | |
| tmp.write(await file.read()); tmp_path = tmp.name | |
| try: | |
| text = extract_text(tmp_path) | |
| if not text.strip(): | |
| return JSONResponse({"error": "No text content found."}, 400) | |
| source_text, spans = run_pii_analysis(text) | |
| stats = compute_stats(source_text, spans) | |
| speakers = detect_speakers(source_text, spans) | |
| return JSONResponse({ | |
| "filename": file.filename, "text": source_text, "spans": spans, | |
| "stats": stats, "speakers": speakers, | |
| "categories_meta": {k: {"color": v["color"], "cls": v["cls"], | |
| "label": v["label"], "mono": v["mono"]} | |
| for k, v in CATEGORIES_META.items()}, | |
| }) | |
| except Exception as e: | |
| return JSONResponse({"error": str(e)}, 500) | |
| finally: | |
| if os.path.exists(tmp_path): os.unlink(tmp_path) | |
| async def redact_pdf_endpoint( | |
| file: UploadFile = File(...), | |
| spans: str = Form(...), | |
| active: str = Form(...), | |
| ): | |
| suffix = Path(file.filename).suffix.lower() | |
| if suffix != ".pdf": | |
| return JSONResponse({"error": "PDF redaction only accepts PDF input."}, 400) | |
| try: | |
| span_list = json.loads(spans) | |
| active_set = set(json.loads(active)) | |
| except Exception as e: | |
| return JSONResponse({"error": f"Invalid payload: {e}"}, 400) | |
| pii_texts = [ | |
| s.get("text", "") for s in span_list | |
| if s.get("label") in active_set | |
| ] | |
| if not pii_texts: | |
| return JSONResponse({"error": "No active categories selected β nothing to redact."}, 400) | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp: | |
| tmp.write(await file.read()); tmp_path = tmp.name | |
| try: | |
| pdf_bytes = build_redacted_pdf_bytes(tmp_path, pii_texts) | |
| out_name = (Path(file.filename).stem or "document") + ".redacted.pdf" | |
| return StreamingResponse( | |
| io.BytesIO(pdf_bytes), | |
| media_type="application/pdf", | |
| headers={"Content-Disposition": f'attachment; filename="{out_name}"'}, | |
| ) | |
| except Exception as e: | |
| return JSONResponse({"error": str(e)}, 500) | |
| finally: | |
| if os.path.exists(tmp_path): os.unlink(tmp_path) | |
| def analyze_text_api(text: str) -> str: | |
| """Gradio API: analyze raw text for PII.""" | |
| source_text, spans = run_pii_analysis(text) | |
| stats = compute_stats(source_text, spans) | |
| return json.dumps({"text": source_text, "spans": spans, "stats": stats}, ensure_ascii=False) | |
| # ββ Frontend HTML (v5) βββββββββββββββββββββββββββββββββββββββββββ | |
| FRONTEND_HTML = r"""<!DOCTYPE html> | |
| <html lang="en"> | |
| <head> | |
| <meta charset="UTF-8"> | |
| <meta name="viewport" content="width=device-width,initial-scale=1"> | |
| <title>PII Reveal β Inspector</title> | |
| <link rel="preconnect" href="https://fonts.googleapis.com"> | |
| <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> | |
| <link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700&family=JetBrains+Mono:wght@400;500&family=Source+Serif+4:opsz,wght@8..60,400;8..60,500;8..60,600&display=swap" rel="stylesheet"> | |
| <style> | |
| /* ========================================================= | |
| Theme tokens β light refresh: brighter cards, darker copy, | |
| cleaner borders. Dark tokens unchanged from v4. | |
| ========================================================= */ | |
| :root{ | |
| /* Light */ | |
| --body-background-fill: #f6f6f7; | |
| --block-background-fill: #ffffff; | |
| --block-background-fill-2: #f1f1f3; | |
| --body-text-color: #0a0a0a; | |
| --body-text-color-subdued: #3f3f46; | |
| --body-text-color-faint: #6b7280; | |
| --border-color-primary: #e4e4e7; | |
| --border-color-accent: #d4d4d8; | |
| --primary-bg: #18181b; | |
| --primary-fg: #ffffff; | |
| --h-alpha: 16%; | |
| --shadow-xs: 0 1px 1.5px rgba(10,10,10,.04); | |
| --shadow-sm: 0 1px 3px rgba(10,10,10,.06), 0 1px 2px rgba(10,10,10,.04); | |
| --shadow-md: 0 4px 14px rgba(10,10,10,.07), 0 1px 3px rgba(10,10,10,.04); | |
| --border-radius-lg: 10px; | |
| --border-radius-md: 6px; | |
| --border-radius-sm: 4px; | |
| --font-sans: 'Inter', system-ui, -apple-system, 'Segoe UI', sans-serif; | |
| --font-mono: 'JetBrains Mono', ui-monospace, SFMono-Regular, Menlo, Consolas, monospace; | |
| --font-serif: 'Source Serif 4', 'Source Serif Pro', 'Iowan Old Style', Georgia, serif; | |
| } | |
| @media (prefers-color-scheme: dark){ | |
| :root{ | |
| --body-background-fill: #0e0e11; | |
| --block-background-fill: #18181c; | |
| --block-background-fill-2: #1f1f24; | |
| --body-text-color: #e8e8ea; | |
| --body-text-color-subdued: #a8a8ae; | |
| --body-text-color-faint: #70707a; | |
| --border-color-primary: rgba(255,255,255,0.08); | |
| --border-color-accent: rgba(255,255,255,0.18); | |
| --primary-bg: #f0f0f2; | |
| --primary-fg: #0e0e11; | |
| --h-alpha: 15%; | |
| --shadow-xs: none; | |
| --shadow-sm: none; | |
| --shadow-md: none; | |
| } | |
| } | |
| .dark, .dark :root, html.dark, body.dark{ | |
| --body-background-fill: #0e0e11; | |
| --block-background-fill: #18181c; | |
| --block-background-fill-2: #1f1f24; | |
| --body-text-color: #e8e8ea; | |
| --body-text-color-subdued: #a8a8ae; | |
| --body-text-color-faint: #70707a; | |
| --border-color-primary: rgba(255,255,255,0.08); | |
| --border-color-accent: rgba(255,255,255,0.18); | |
| --primary-bg: #f0f0f2; | |
| --primary-fg: #0e0e11; | |
| --h-alpha: 15%; | |
| --shadow-xs: none; | |
| --shadow-sm: none; | |
| --shadow-md: none; | |
| } | |
| *,*::before,*::after{box-sizing:border-box;margin:0;padding:0} | |
| html,body{height:100%} | |
| body{ | |
| font-family:var(--font-sans); | |
| background:var(--body-background-fill); | |
| color:var(--body-text-color); | |
| font-size:13.5px;line-height:1.5; | |
| -webkit-font-smoothing:antialiased; | |
| font-feature-settings:"cv11","ss01"; | |
| } | |
| button{font:inherit;color:inherit;background:transparent;border:0;cursor:pointer} | |
| .sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border:0} | |
| /* shared small-caps label treatment */ | |
| .caps{ | |
| font-size:11px;font-weight:500; | |
| letter-spacing:0.06em;text-transform:uppercase; | |
| color:var(--body-text-color-subdued); | |
| } | |
| .shell{max-width:1080px;margin:0 auto;padding:40px 16px 48px} | |
| /* ========================================================= | |
| Upload / landing view | |
| ========================================================= */ | |
| #upload-view{min-height:100vh;display:flex;align-items:center;justify-content:center} | |
| #upload-view .shell{width:100%} | |
| .u-card{ | |
| display:grid;grid-template-columns:1.05fr 0.95fr;gap:0; | |
| background:var(--block-background-fill); | |
| border:0.5px solid var(--border-color-primary); | |
| border-radius:var(--border-radius-lg); | |
| overflow:hidden; | |
| box-shadow:var(--shadow-md); | |
| } | |
| .u-left{padding:40px 36px 34px} | |
| .u-right{ | |
| padding:40px 36px 34px; | |
| background:var(--block-background-fill-2); | |
| border-left:0.5px solid var(--border-color-primary); | |
| display:flex;flex-direction:column;gap:14px; | |
| } | |
| .u-brand{display:flex;align-items:center;gap:10px;margin-bottom:24px} | |
| .u-brand svg{color:var(--body-text-color)} | |
| .u-brand-name{font-size:13.5px;font-weight:500} | |
| .u-brand-name .sub{color:var(--body-text-color-faint);font-weight:400;margin-left:4px} | |
| .u-title{ | |
| font-family:var(--font-serif); | |
| font-size:30px;font-weight:500;letter-spacing:-0.018em; | |
| line-height:1.15;margin-bottom:10px; | |
| color:var(--body-text-color); | |
| } | |
| .u-sub{color:var(--body-text-color-subdued);font-size:14px;margin-bottom:20px;max-width:42ch} | |
| .u-chips{display:flex;flex-wrap:wrap;gap:6px 12px;margin-bottom:24px} | |
| .u-chip{ | |
| display:inline-flex;align-items:center;gap:6px; | |
| font-size:12px;color:var(--body-text-color-subdued);font-weight:500; | |
| } | |
| .u-chip-dot{width:7px;height:7px;border-radius:2px} | |
| .u-drop{ | |
| border:1px solid var(--border-color-primary); | |
| background:color-mix(in srgb, var(--body-text-color) 2.5%, transparent); | |
| border-radius:var(--border-radius-md); | |
| padding:30px 20px; | |
| cursor:pointer;text-align:center; | |
| transition:background .15s,border-color .15s; | |
| position:relative; | |
| } | |
| .u-drop:hover,.u-drop.dragover{ | |
| background:color-mix(in srgb, var(--body-text-color) 5%, transparent); | |
| border-color:var(--border-color-accent); | |
| } | |
| .u-drop-icon{margin:0 auto 8px;color:var(--body-text-color-subdued)} | |
| .u-drop-title{font-size:13.5px;font-weight:500;margin-bottom:3px;color:var(--body-text-color)} | |
| .u-drop-sub{font-family:var(--font-mono);font-size:11px;color:var(--body-text-color-faint)} | |
| .u-drop input{position:absolute;inset:0;opacity:0;cursor:pointer} | |
| .u-meta{ | |
| display:flex;flex-wrap:wrap;align-items:center;margin-top:22px; | |
| font-family:var(--font-mono);font-size:11px;color:var(--body-text-color-faint); | |
| } | |
| .u-meta > span{padding:0 12px;border-right:1px solid var(--border-color-primary);line-height:1} | |
| .u-meta > span:first-child{padding-left:0} | |
| .u-meta > span:last-child{border-right:0;padding-right:0} | |
| .prev-h{margin-bottom:8px} | |
| .prev-row{display:grid;grid-template-columns:1fr 16px 1fr;gap:10px;align-items:stretch} | |
| .prev-arrow{align-self:center;color:var(--body-text-color-faint);font-family:var(--font-mono);font-size:12px;text-align:center} | |
| .prev-card{ | |
| background:var(--block-background-fill); | |
| border:0.5px solid var(--border-color-primary); | |
| border-radius:var(--border-radius-md); | |
| padding:14px 14px 12px; | |
| font-family:var(--font-serif); | |
| font-size:12.5px;line-height:1.65; | |
| color:var(--body-text-color); | |
| min-height:148px; | |
| box-shadow:var(--shadow-xs); | |
| } | |
| .prev-label{ | |
| font-family:var(--font-sans);font-size:10px;font-weight:500; | |
| letter-spacing:0.08em;text-transform:uppercase; | |
| color:var(--body-text-color-faint); | |
| display:block;margin-bottom:8px; | |
| } | |
| .prev-card p{margin:0 0 6px} | |
| .prev-card p:last-child{margin-bottom:0} | |
| .prev-bar{ | |
| display:inline-block;vertical-align:middle; | |
| height:0.85em;border-radius:2px; | |
| background:var(--body-text-color);opacity:.88;margin:0 1px; | |
| } | |
| .u-stat{ | |
| margin-top:auto;padding-top:14px;border-top:0.5px solid var(--border-color-primary); | |
| display:flex;align-items:baseline;gap:8px; | |
| color:var(--body-text-color-subdued);font-size:12px; | |
| } | |
| .u-stat b{ | |
| font-family:var(--font-serif);font-weight:500;font-size:18px; | |
| color:var(--body-text-color);letter-spacing:-0.01em; | |
| } | |
| /* ========================================================= | |
| Results / inspector view | |
| ========================================================= */ | |
| #results-view{display:none;min-height:100vh} | |
| .pr-app{ | |
| font-family:var(--font-sans); | |
| border:0.5px solid var(--border-color-primary); | |
| border-radius:var(--border-radius-lg); | |
| overflow:hidden; | |
| background:var(--block-background-fill); | |
| color:var(--body-text-color); | |
| box-shadow:var(--shadow-md); | |
| } | |
| /* ββ top bar ββ */ | |
| .pr-top{ | |
| display:flex;align-items:center;gap:10px;flex-wrap:wrap; | |
| padding:11px 14px; | |
| border-bottom:0.5px solid var(--border-color-primary); | |
| } | |
| .pr-logo{display:flex;align-items:center;gap:8px} | |
| .pr-name{font-size:13.5px;font-weight:500} | |
| .pr-name-sub{color:var(--body-text-color-faint);font-weight:400;margin-left:4px} | |
| .pr-file-chip{ | |
| font-family:var(--font-mono);font-size:11.5px; | |
| color:var(--body-text-color-subdued); | |
| padding:4px 8px; | |
| background:var(--block-background-fill-2); | |
| border:0.5px solid var(--border-color-primary); | |
| border-radius:5px;margin-left:4px; | |
| max-width:220px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap; | |
| } | |
| .pr-grow{flex:1} | |
| .pr-status{font-size:11.5px;color:var(--body-text-color-subdued);display:flex;align-items:center;gap:6px} | |
| .pr-status-dot{width:6px;height:6px;border-radius:50%;background:#1D9E75;box-shadow:0 0 0 3px color-mix(in srgb, #1D9E75 18%, transparent)} | |
| .pr-top-actions{display:flex;align-items:center;gap:6px;flex-wrap:wrap} | |
| .pr-btn{ | |
| font-size:12px;padding:6px 10px; | |
| border:0.5px solid var(--border-color-accent); | |
| border-radius:5px; | |
| background:var(--block-background-fill); | |
| color:var(--body-text-color); | |
| cursor:pointer; | |
| font-family:inherit;font-weight:500; | |
| display:inline-flex;align-items:center;gap:6px; | |
| transition:background .12s,border-color .12s,color .12s; | |
| } | |
| .pr-btn:hover:not(:disabled){background:color-mix(in srgb, var(--body-text-color) 4%, var(--block-background-fill));border-color:var(--body-text-color-subdued)} | |
| .pr-btn:disabled{opacity:.5;cursor:not-allowed} | |
| .pr-btn-ghost{border-color:var(--border-color-primary);color:var(--body-text-color-subdued);background:transparent;font-weight:400} | |
| .pr-btn-ghost:hover:not(:disabled){color:var(--body-text-color);border-color:var(--border-color-accent);background:color-mix(in srgb, var(--body-text-color) 3%, transparent)} | |
| .pr-btn-prim{ | |
| background:var(--primary-bg);color:var(--primary-fg); | |
| border-color:var(--primary-bg);font-weight:500; | |
| } | |
| .pr-btn-prim:hover:not(:disabled){background:color-mix(in srgb, var(--primary-bg) 88%, var(--body-text-color));border-color:var(--primary-bg)} | |
| .pr-btn-arr{font-family:var(--font-mono);font-size:11px;opacity:0.6} | |
| /* ββ stats ββ */ | |
| .pr-stats{padding:18px 18px 16px;border-bottom:0.5px solid var(--border-color-primary)} | |
| .pr-stats-row{display:flex;align-items:flex-end;gap:34px;margin-bottom:14px;flex-wrap:wrap} | |
| .pr-hero{ | |
| font-size:34px;font-weight:600;line-height:1;letter-spacing:-0.028em; | |
| font-variant-numeric:tabular-nums;color:var(--body-text-color); | |
| } | |
| .pr-hero-pct{font-size:18px;opacity:0.5;margin-left:1px;font-weight:400} | |
| .pr-num{font-size:21px;font-weight:600;line-height:1;letter-spacing:-0.015em;font-variant-numeric:tabular-nums} | |
| .pr-lab{margin-top:10px} | |
| .pr-bar{display:flex;height:4px;gap:2px;margin-bottom:12px;border-radius:2px;overflow:hidden} | |
| .pr-bar > span{display:block;height:100%;border-radius:1px;min-width:4px;transition:opacity .15s} | |
| .pr-bar > span:hover{opacity:.82} | |
| .pr-legend{display:flex;flex-wrap:wrap;gap:8px 14px;font-size:12px} | |
| .pr-leg{display:flex;align-items:center;gap:6px;color:var(--body-text-color-subdued);cursor:pointer;user-select:none;font-weight:500} | |
| .pr-leg-sw{width:8px;height:8px;border-radius:2px} | |
| .pr-leg-ct{font-family:var(--font-mono);font-size:11px;color:var(--body-text-color-faint);margin-left:1px;font-weight:500} | |
| .pr-leg.off{opacity:.4} | |
| .pr-leg.off .pr-leg-sw{opacity:.3} | |
| /* ββ body ββ */ | |
| .pr-body{display:grid;grid-template-columns:minmax(0,1fr) 220px} | |
| /* ββ doc pane ββ */ | |
| .pr-doc-pane{ | |
| padding:20px 24px 28px; | |
| border-right:0.5px solid var(--border-color-primary); | |
| min-width:0;max-height:calc(100vh - 260px);overflow-y:auto; | |
| } | |
| .pr-doc-meta{ | |
| font-family:var(--font-mono);font-size:11px;color:var(--body-text-color-faint); | |
| margin-bottom:16px;display:flex;gap:10px;flex-wrap:wrap; | |
| } | |
| .pr-doc-meta span + span::before{content:'Β·';margin-right:10px;color:var(--border-color-accent)} | |
| .pr-text{ | |
| font-family:var(--font-serif); | |
| font-size:15px;line-height:1.9; | |
| color:var(--body-text-color); | |
| white-space:pre-wrap;word-wrap:break-word; | |
| font-feature-settings:"liga","calt"; | |
| } | |
| .h{padding:1px 1px;border-bottom:1.5px solid;transition:background .15s,opacity .15s;cursor:pointer} | |
| .h:hover{filter:brightness(0.96) saturate(1.12)} | |
| .h.off{background:transparent !important;border-color:transparent !important;color:inherit;opacity:.9} | |
| .hp {background:color-mix(in srgb, #E24B4A var(--h-alpha), transparent); border-color:#E24B4A} | |
| .hd {background:color-mix(in srgb, #7F77DD var(--h-alpha), transparent); border-color:#7F77DD} | |
| .ha {background:color-mix(in srgb, #1D9E75 var(--h-alpha), transparent); border-color:#1D9E75} | |
| .he {background:color-mix(in srgb, #378ADD var(--h-alpha), transparent); border-color:#378ADD} | |
| .hac {background:color-mix(in srgb, #BA7517 var(--h-alpha), transparent); border-color:#BA7517} | |
| .hu {background:color-mix(in srgb, #D85A30 var(--h-alpha), transparent); border-color:#D85A30} | |
| .hs {background:color-mix(in srgb, #D4537E var(--h-alpha), transparent); border-color:#D4537E} | |
| .hph {background:color-mix(in srgb, #639922 var(--h-alpha), transparent); border-color:#639922} | |
| .m{font-family:var(--font-mono);font-size:13px} | |
| /* ββ sidebar ββ */ | |
| .pr-side{background:var(--block-background-fill-2);padding:16px 14px;display:flex;flex-direction:column;gap:20px;min-width:0} | |
| .pr-side-head{display:flex;align-items:baseline;justify-content:space-between;gap:8px;margin-bottom:8px} | |
| .pr-side-link{font-size:11px;color:var(--body-text-color-subdued);cursor:pointer;background:transparent;border:0;padding:0;font-family:inherit;font-weight:500} | |
| .pr-side-link:hover{color:var(--body-text-color);text-decoration:underline} | |
| .pr-cat{ | |
| position:relative; | |
| display:grid;grid-template-columns:9px 1fr auto; | |
| column-gap:8px;row-gap:4px;align-items:center; | |
| padding:8px 10px 7px; | |
| border-radius:var(--border-radius-sm); | |
| background:color-mix(in srgb, var(--body-text-color) 3%, transparent); | |
| border:0.5px solid transparent; | |
| cursor:pointer;user-select:none; | |
| transition:background .12s,border-color .12s,opacity .15s; | |
| margin-bottom:4px;overflow:hidden; | |
| } | |
| .pr-cat:hover{border-color:var(--border-color-accent)} | |
| .pr-cat-sw{width:9px;height:9px;border-radius:2px;flex-shrink:0;grid-row:1} | |
| .pr-cat-nm{grid-row:1;color:var(--body-text-color);font-size:12.5px;font-weight:500} | |
| .pr-cat-ct{grid-row:1;font-family:var(--font-mono);font-size:11px;color:var(--body-text-color-faint);text-align:right;font-weight:500} | |
| .pr-cat-mini{grid-column:2/4;grid-row:2;height:1.5px;width:100%;background:color-mix(in srgb, var(--body-text-color) 6%, transparent);border-radius:1px;overflow:hidden} | |
| .pr-cat-mini > span{display:block;height:100%;border-radius:1px;transition:width .2s,background .15s} | |
| .pr-cat.on{background:color-mix(in srgb, var(--cat) 9%, transparent);box-shadow:inset 3px 0 0 0 var(--cat);padding-left:13px} | |
| .pr-cat.on .pr-cat-nm{color:var(--body-text-color)} | |
| .pr-cat.off{opacity:.42;filter:saturate(.35)} | |
| .pr-cat.off .pr-cat-nm{text-decoration:line-through} | |
| .pr-cat.off .pr-cat-mini > span{background:var(--body-text-color-faint) !important} | |
| .pr-speakers .pr-cat{cursor:default;background:transparent;border-color:transparent;padding:4px 2px} | |
| .pr-speakers .pr-cat:hover{background:transparent;border-color:transparent} | |
| .pr-speakers .pr-cat-sw{background:var(--body-text-color-faint);opacity:.55} | |
| .pr-speakers .pr-cat-mini{display:none} | |
| .empty-rail{color:var(--body-text-color-faint);font-size:12px;font-style:italic} | |
| /* loading */ | |
| #loading{ | |
| position:fixed;inset:0; | |
| background:color-mix(in srgb, var(--body-background-fill) 88%, transparent); | |
| backdrop-filter:blur(8px); | |
| display:none;flex-direction:column;align-items:center;justify-content:center;gap:10px;z-index:9999; | |
| } | |
| .l-ring{width:26px;height:26px;border:1.5px solid var(--border-color-accent);border-top-color:var(--body-text-color);border-radius:50%;animation:sp .7s linear infinite} | |
| @keyframes sp{to{transform:rotate(360deg)}} | |
| .l-label{font-family:var(--font-mono);font-size:11.5px;color:var(--body-text-color-subdued)} | |
| .error-banner{ | |
| margin:14px 18px 0;padding:10px 14px; | |
| background:color-mix(in srgb, #E24B4A 10%, transparent); | |
| border:0.5px solid color-mix(in srgb, #E24B4A 45%, transparent); | |
| border-radius:var(--border-radius-md); | |
| color:#C43A39;font-size:12.5px;display:none;font-weight:500; | |
| } | |
| .tip{ | |
| position:fixed;z-index:9998; | |
| font-family:var(--font-mono);font-size:11px; | |
| color:var(--primary-fg);background:var(--primary-bg); | |
| padding:4px 8px;border-radius:4px; | |
| pointer-events:none;white-space:nowrap; | |
| max-width:420px;overflow:hidden;text-overflow:ellipsis; | |
| } | |
| @media(max-width:880px){ | |
| .u-card{grid-template-columns:1fr} | |
| .u-right{border-left:0;border-top:0.5px solid var(--border-color-primary)} | |
| .pr-body{grid-template-columns:1fr} | |
| .pr-doc-pane{border-right:none;border-bottom:0.5px solid var(--border-color-primary);max-height:none} | |
| } | |
| @media(max-width:640px){ | |
| .shell{padding:24px 12px} | |
| } | |
| </style> | |
| </head> | |
| <body> | |
| <!-- ============ UPLOAD VIEW ============ --> | |
| <div id="upload-view"> | |
| <div class="shell"> | |
| <div class="u-card"> | |
| <div class="u-left"> | |
| <div class="u-brand"> | |
| <svg width="20" height="20" viewBox="0 0 20 20" fill="none"> | |
| <rect x="0" y="0" width="20" height="20" rx="5" fill="currentColor"/> | |
| <circle cx="8.5" cy="8.5" r="3.2" stroke="var(--block-background-fill)" stroke-width="1.4" fill="none"/> | |
| <line x1="11.2" y1="11.2" x2="14.2" y2="14.2" stroke="var(--block-background-fill)" stroke-width="1.4" stroke-linecap="round"/> | |
| </svg> | |
| <span class="u-brand-name">PII Reveal<span class="sub">/ inspector</span></span> | |
| </div> | |
| <h1 class="u-title">See what your documents are leaking.</h1> | |
| <p class="u-sub">Find every PII span in a PDF, DOC or DOCX β names, accounts, secrets and five other entity types β then export a fully redacted copy.</p> | |
| <div class="u-chips"> | |
| <span class="u-chip"><span class="u-chip-dot" style="background:#E24B4A"></span>Person</span> | |
| <span class="u-chip"><span class="u-chip-dot" style="background:#378ADD"></span>Email</span> | |
| <span class="u-chip"><span class="u-chip-dot" style="background:#7F77DD"></span>Date</span> | |
| <span class="u-chip"><span class="u-chip-dot" style="background:#1D9E75"></span>Address</span> | |
| <span class="u-chip"><span class="u-chip-dot" style="background:#BA7517"></span>Account</span> | |
| <span class="u-chip"><span class="u-chip-dot" style="background:#D85A30"></span>URL</span> | |
| <span class="u-chip"><span class="u-chip-dot" style="background:#639922"></span>Phone</span> | |
| <span class="u-chip"><span class="u-chip-dot" style="background:#D4537E"></span>Secret</span> | |
| </div> | |
| <div class="u-drop" id="dropzone"> | |
| <div class="u-drop-icon"> | |
| <svg width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round"> | |
| <path d="M12 3v13"/><path d="m6 9 6-6 6 6"/><path d="M4 17v2a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2v-2"/> | |
| </svg> | |
| </div> | |
| <div class="u-drop-title">Drop a document, or click to browse</div> | |
| <div class="u-drop-sub">pdf · doc · docx · up to 128k tokens</div> | |
| <input type="file" id="file-input" accept=".pdf,.doc,.docx"> | |
| </div> | |
| <div class="u-meta"> | |
| <span>openai privacy filter</span> | |
| <span>128k ctx</span> | |
| <span>bfloat16</span> | |
| <span>apache 2.0</span> | |
| </div> | |
| </div> | |
| <div class="u-right" aria-hidden="true"> | |
| <div class="prev-h caps">Before → after</div> | |
| <div class="prev-row"> | |
| <div class="prev-card"> | |
| <span class="prev-label">detected</span> | |
| <p>Reporter: <span class="h hp">Dr. Margaret Holloway-Chen</span> called at <span class="h hd m">03:42 GMT</span>.</p> | |
| <p>Email: <span class="h he m">margaret.h@protomail.co.uk</span>.</p> | |
| <p>Token: <span class="h hs m">sk_live_T3sT4zN9pQ2v</span>.</p> | |
| </div> | |
| <div class="prev-arrow">→</div> | |
| <div class="prev-card"> | |
| <span class="prev-label">redacted</span> | |
| <p>Reporter: <span class="prev-bar" style="width:11em"></span> called at <span class="prev-bar" style="width:3.5em"></span>.</p> | |
| <p>Email: <span class="prev-bar" style="width:9em"></span>.</p> | |
| <p>Token: <span class="prev-bar" style="width:7em"></span>.</p> | |
| </div> | |
| </div> | |
| <div class="u-stat"> | |
| <b>PDF-ready</b> | |
| <span>export a redacted PDF or .txt with one click</span> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| <!-- ============ RESULTS VIEW ============ --> | |
| <div id="results-view"> | |
| <div class="shell"> | |
| <div class="pr-app" aria-label="PII Reveal inspector"> | |
| <div class="pr-top"> | |
| <div class="pr-logo"> | |
| <svg width="20" height="20" viewBox="0 0 20 20" fill="none" style="color: var(--body-text-color);"> | |
| <rect x="0" y="0" width="20" height="20" rx="5" fill="currentColor"/> | |
| <circle cx="8.5" cy="8.5" r="3.2" stroke="var(--block-background-fill)" stroke-width="1.4" fill="none"/> | |
| <line x1="11.2" y1="11.2" x2="14.2" y2="14.2" stroke="var(--block-background-fill)" stroke-width="1.4" stroke-linecap="round"/> | |
| </svg> | |
| <span class="pr-name">PII Reveal<span class="pr-name-sub">/ inspector</span></span> | |
| </div> | |
| <span class="pr-file-chip" id="file-chip"></span> | |
| <span class="pr-status" id="scan-status"><span class="pr-status-dot"></span>Scan complete</span> | |
| <div class="pr-grow"></div> | |
| <div class="pr-top-actions"> | |
| <button class="pr-btn pr-btn-ghost" id="act-copy" title="Copy masked text to clipboard"><span>Copy masked</span></button> | |
| <button class="pr-btn pr-btn-ghost" id="act-report" title="Download JSON report"><span>Report</span></button> | |
| <button class="pr-btn" id="act-txt" title="Download sanitized .txt"><span>.txt</span></button> | |
| <button class="pr-btn pr-btn-prim" id="act-pdf" title="Download redacted PDF"><span>Redact PDF</span><span class="pr-btn-arr">→</span></button> | |
| <button class="pr-btn pr-btn-ghost" id="btn-new"><span>New file</span></button> | |
| </div> | |
| </div> | |
| <div class="error-banner" id="error-banner"></div> | |
| <div class="pr-stats"> | |
| <div class="pr-stats-row"> | |
| <div> | |
| <div class="pr-hero"><span id="hero-val">0</span><span class="pr-hero-pct">%</span></div> | |
| <div class="caps pr-lab">PII content</div> | |
| </div> | |
| <div> | |
| <div class="pr-num" id="num-spans">0</div> | |
| <div class="caps pr-lab">Spans detected</div> | |
| </div> | |
| <div> | |
| <div class="pr-num" id="num-cats">0 / 8</div> | |
| <div class="caps pr-lab">Categories present</div> | |
| </div> | |
| <div> | |
| <div class="pr-num" id="num-speakers">0</div> | |
| <div class="caps pr-lab">Speakers identified</div> | |
| </div> | |
| </div> | |
| <div class="pr-bar" id="dist-bar"></div> | |
| <div class="pr-legend" id="legend"></div> | |
| </div> | |
| <div class="pr-body"> | |
| <div class="pr-doc-pane"> | |
| <div class="pr-doc-meta" id="doc-meta"></div> | |
| <div class="pr-text" id="doc-text"></div> | |
| </div> | |
| <aside class="pr-side"> | |
| <div> | |
| <div class="pr-side-head"> | |
| <span class="caps">Filter categories</span> | |
| <button class="pr-side-link" id="cat-toggle-all">Clear all</button> | |
| </div> | |
| <div id="cat-list"></div> | |
| </div> | |
| <div id="speakers-block" style="display:none"> | |
| <div class="pr-side-head"><span class="caps">Speakers</span></div> | |
| <div class="pr-speakers" id="speakers-list"></div> | |
| </div> | |
| </aside> | |
| </div> | |
| </div> | |
| </div> | |
| </div> | |
| <div id="loading"> | |
| <div class="l-ring"></div> | |
| <div class="l-label" id="loading-label">scanning document…</div> | |
| </div> | |
| <div class="tip" id="tip" style="display:none"></div> | |
| <script> | |
| /* ===== state ===== */ | |
| const S = { | |
| text:'', spans:[], stats:{}, speakers:{}, catMeta:{}, filename:'', file:null, | |
| activeCats:new Set(), scanMs:0, sortedSpans:[], | |
| }; | |
| const DEFAULT_META = { | |
| private_person: {color:'#E24B4A', cls:'hp', label:'Person', mono:false}, | |
| private_date: {color:'#7F77DD', cls:'hd', label:'Date', mono:true}, | |
| private_address: {color:'#1D9E75', cls:'ha', label:'Address', mono:false}, | |
| private_email: {color:'#378ADD', cls:'he', label:'Email', mono:true}, | |
| account_number: {color:'#BA7517', cls:'hac', label:'Account', mono:true}, | |
| private_url: {color:'#D85A30', cls:'hu', label:'URL', mono:true}, | |
| secret: {color:'#D4537E', cls:'hs', label:'Secret', mono:true}, | |
| private_phone: {color:'#639922', cls:'hph', label:'Phone', mono:true}, | |
| }; | |
| const ORDER = ['private_person','private_address','private_email','private_phone', | |
| 'private_url','private_date','account_number','secret']; | |
| const metaFor = c => ({...(DEFAULT_META[c]||{color:'#999',cls:'',label:c,mono:false}), ...(S.catMeta[c]||{})}); | |
| const isPdf = () => (S.filename||'').toLowerCase().endsWith('.pdf'); | |
| /* ===== upload flow ===== */ | |
| const dz = document.getElementById('dropzone'); | |
| const fi = document.getElementById('file-input'); | |
| ['dragenter','dragover'].forEach(e => dz.addEventListener(e, ev => { ev.preventDefault(); dz.classList.add('dragover'); })); | |
| ['dragleave','drop'].forEach(e => dz.addEventListener(e, ev => { ev.preventDefault(); dz.classList.remove('dragover'); })); | |
| dz.addEventListener('drop', ev => { if (ev.dataTransfer.files[0]) uploadFile(ev.dataTransfer.files[0]); }); | |
| fi.addEventListener('change', ev => { if (ev.target.files[0]) uploadFile(ev.target.files[0]); }); | |
| async function uploadFile(file){ | |
| const ext = file.name.split('.').pop().toLowerCase(); | |
| if (!['pdf','doc','docx'].includes(ext)) { showError('Unsupported file type.'); return; } | |
| S.file = file; // keep for redact-pdf round-trip | |
| document.getElementById('loading-label').textContent = 'scanning documentβ¦'; | |
| document.getElementById('loading').style.display='flex'; | |
| document.getElementById('upload-view').style.display='none'; | |
| const form = new FormData(); form.append('file', file); | |
| const t0 = performance.now(); | |
| try{ | |
| const r = await fetch('/api/analyze', {method:'POST', body:form}); | |
| const d = await r.json(); | |
| if (d.error) { showError(d.error); return; } | |
| S.scanMs = performance.now() - t0; | |
| S.text = d.text; S.spans = d.spans; S.stats = d.stats; | |
| S.speakers = d.speakers||{}; S.catMeta = d.categories_meta||{}; | |
| S.filename = d.filename; | |
| S.activeCats = new Set(Object.keys(d.stats.categories)); | |
| S.sortedSpans = [...S.spans].sort((a,b) => a.start - b.start); | |
| renderResults(); | |
| } catch(e){ showError('Analysis failed: '+e.message); } | |
| finally { document.getElementById('loading').style.display='none'; } | |
| } | |
| function showError(m){ | |
| document.getElementById('loading').style.display='none'; | |
| document.getElementById('upload-view').style.display='flex'; | |
| document.getElementById('results-view').style.display='none'; | |
| alert(m); | |
| } | |
| function resetView(){ | |
| document.getElementById('results-view').style.display='none'; | |
| document.getElementById('upload-view').style.display='flex'; | |
| fi.value = ''; S.file = null; | |
| } | |
| document.getElementById('btn-new').addEventListener('click', resetView); | |
| /* ===== render ===== */ | |
| function renderResults(){ | |
| document.getElementById('results-view').style.display='block'; | |
| document.getElementById('file-chip').textContent = S.filename; | |
| document.getElementById('scan-status').innerHTML = | |
| `<span class="pr-status-dot"></span>Scan complete · ${(S.scanMs/1000).toFixed(1)}s`; | |
| renderStats(); | |
| renderBar(); | |
| renderLegend(); | |
| renderDocMeta(); | |
| renderDoc(); | |
| renderCats(); | |
| renderSpeakers(); | |
| updateToggleAllLabel(); | |
| updatePrimaryAction(); | |
| } | |
| function updatePrimaryAction(){ | |
| // If the input is a PDF, "Redact PDF" is primary; otherwise hide it and | |
| // promote the .txt export to primary. | |
| const pdfBtn = document.getElementById('act-pdf'); | |
| const txtBtn = document.getElementById('act-txt'); | |
| if (isPdf()) { | |
| pdfBtn.style.display = ''; | |
| pdfBtn.classList.add('pr-btn-prim'); | |
| txtBtn.classList.remove('pr-btn-prim'); | |
| } else { | |
| pdfBtn.style.display = 'none'; | |
| pdfBtn.classList.remove('pr-btn-prim'); | |
| txtBtn.classList.add('pr-btn-prim'); | |
| } | |
| } | |
| function renderStats(){ | |
| const s = S.stats; | |
| document.getElementById('hero-val').textContent = (s.pii_percentage ?? 0).toFixed(1); | |
| document.getElementById('num-spans').textContent = s.total_spans; | |
| document.getElementById('num-cats').textContent = `${s.num_categories} / 8`; | |
| const n = Object.keys(S.speakers).length; | |
| document.getElementById('num-speakers').textContent = n || 'β'; | |
| } | |
| function renderBar(){ | |
| const bar = document.getElementById('dist-bar'); | |
| bar.innerHTML = ''; | |
| const cats = S.stats.categories; | |
| const total = Object.values(cats).reduce((a,b) => a + b.chars, 0) || 1; | |
| const ordered = ORDER.filter(c => cats[c]); | |
| if (!ordered.length) { | |
| const span = document.createElement('span'); | |
| span.style.cssText = 'flex:1;background:var(--border-color-primary);opacity:.4'; | |
| bar.appendChild(span); return; | |
| } | |
| for (const c of ordered) { | |
| const m = metaFor(c); | |
| const span = document.createElement('span'); | |
| span.style.background = m.color; | |
| span.style.flex = cats[c].chars / total; | |
| span.dataset.cat = c; | |
| span.addEventListener('mouseenter', ev => showTip(ev, `${m.label} Β· ${cats[c].count}`)); | |
| span.addEventListener('mousemove', moveTip); | |
| span.addEventListener('mouseleave', hideTip); | |
| if (!S.activeCats.has(c)) span.style.opacity = '.25'; | |
| bar.appendChild(span); | |
| } | |
| } | |
| function renderLegend(){ | |
| const leg = document.getElementById('legend'); | |
| leg.innerHTML = ''; | |
| const cats = S.stats.categories; | |
| const ordered = ORDER.filter(c => cats[c]); | |
| for (const c of ordered) { | |
| const m = metaFor(c); | |
| const el = document.createElement('span'); | |
| el.className = 'pr-leg' + (S.activeCats.has(c) ? '' : ' off'); | |
| el.dataset.cat = c; | |
| el.innerHTML = `<span class="pr-leg-sw" style="background:${m.color}"></span>${m.label}<span class="pr-leg-ct">${cats[c].count}</span>`; | |
| el.addEventListener('click', () => toggleCat(c)); | |
| leg.appendChild(el); | |
| } | |
| } | |
| function renderDocMeta(){ | |
| const s = S.stats; | |
| const meta = document.getElementById('doc-meta'); | |
| const parts = [ | |
| `${s.total_chars.toLocaleString()} characters`, | |
| `${s.total_lines.toLocaleString()} lines`, | |
| `scanned in ${(S.scanMs/1000).toFixed(1)}s`, | |
| ]; | |
| meta.innerHTML = parts.map(p => `<span>${p}</span>`).join(''); | |
| } | |
| function esc(s){ const d=document.createElement('div'); d.textContent=s; return d.innerHTML; } | |
| function renderDoc(){ | |
| const { text, sortedSpans, activeCats } = S; | |
| const el = document.getElementById('doc-text'); | |
| let html = '', pos = 0; | |
| for (const sp of sortedSpans) { | |
| if (sp.start < pos) continue; | |
| if (sp.start > pos) html += esc(text.substring(pos, sp.start)); | |
| const m = metaFor(sp.label); | |
| const cls = ['h', m.cls]; | |
| if (m.mono) cls.push('m'); | |
| if (!activeCats.has(sp.label)) cls.push('off'); | |
| html += `<span class="${cls.join(' ')}" data-cat="${sp.label}">${esc(text.substring(sp.start, sp.end))}</span>`; | |
| pos = sp.end; | |
| } | |
| if (pos < text.length) html += esc(text.substring(pos)); | |
| el.innerHTML = html; | |
| el.querySelectorAll('.h').forEach(span => { | |
| const cat = span.dataset.cat, m = metaFor(cat); | |
| span.addEventListener('mouseenter', ev => showTip(ev, `${m.label}: ${span.textContent.trim()}`)); | |
| span.addEventListener('mousemove', moveTip); | |
| span.addEventListener('mouseleave', hideTip); | |
| }); | |
| } | |
| function renderCats(){ | |
| const box = document.getElementById('cat-list'); | |
| box.innerHTML = ''; | |
| const cats = S.stats.categories; | |
| const ordered = ORDER.filter(c => cats[c]); | |
| if (!ordered.length) { box.innerHTML = '<div class="empty-rail">No entities detected.</div>'; return; } | |
| const totalSpans = S.stats.total_spans || 1; | |
| for (const c of ordered) { | |
| const m = metaFor(c); | |
| const count = cats[c].count; | |
| const share = (count / totalSpans) * 100; | |
| const active = S.activeCats.has(c); | |
| const el = document.createElement('div'); | |
| el.className = 'pr-cat' + (active ? ' on' : ' off'); | |
| el.dataset.cat = c; | |
| el.style.setProperty('--cat', m.color); | |
| el.innerHTML = ` | |
| <span class="pr-cat-sw" style="background:${m.color}"></span> | |
| <span class="pr-cat-nm">${m.label}</span> | |
| <span class="pr-cat-ct">${count}</span> | |
| <span class="pr-cat-mini"><span style="width:${share.toFixed(1)}%;background:${m.color}"></span></span>`; | |
| el.addEventListener('click', () => toggleCat(c)); | |
| box.appendChild(el); | |
| } | |
| } | |
| function renderSpeakers(){ | |
| const names = Object.keys(S.speakers); | |
| const block = document.getElementById('speakers-block'); | |
| const box = document.getElementById('speakers-list'); | |
| if (!names.length) { block.style.display = 'none'; return; } | |
| block.style.display = 'block'; | |
| box.innerHTML = ''; | |
| for (const n of names) { | |
| const el = document.createElement('div'); | |
| el.className = 'pr-cat'; | |
| el.innerHTML = `<span class="pr-cat-sw"></span><span class="pr-cat-nm">${esc(n)}</span><span class="pr-cat-ct">${S.speakers[n]}</span>`; | |
| box.appendChild(el); | |
| } | |
| } | |
| function toggleCat(c){ | |
| if (S.activeCats.has(c)) S.activeCats.delete(c); | |
| else S.activeCats.add(c); | |
| const on = S.activeCats.has(c); | |
| document.querySelectorAll(`.pr-cat[data-cat="${c}"]`).forEach(el => { el.classList.toggle('on', on); el.classList.toggle('off', !on); }); | |
| document.querySelectorAll(`.pr-leg[data-cat="${c}"]`).forEach(el => el.classList.toggle('off', !on)); | |
| document.querySelectorAll(`.h[data-cat="${c}"]`).forEach(el => el.classList.toggle('off', !on)); | |
| document.querySelectorAll(`.pr-bar span[data-cat="${c}"]`).forEach(el => el.style.opacity = on ? '1' : '.25'); | |
| updateToggleAllLabel(); | |
| } | |
| function updateToggleAllLabel(){ | |
| const btn = document.getElementById('cat-toggle-all'); | |
| if (!btn) return; | |
| const all = Object.keys(S.stats.categories||{}); | |
| const allOn = all.length > 0 && all.every(c => S.activeCats.has(c)); | |
| btn.textContent = allOn ? 'Clear all' : 'Select all'; | |
| } | |
| document.getElementById('cat-toggle-all').addEventListener('click', () => { | |
| const all = Object.keys(S.stats.categories||{}); | |
| const allOn = all.every(c => S.activeCats.has(c)); | |
| all.forEach(c => { | |
| const want = !allOn; | |
| if (want !== S.activeCats.has(c)) toggleCat(c); | |
| }); | |
| }); | |
| /* tooltip */ | |
| function showTip(ev, text){ const t = document.getElementById('tip'); t.textContent = text; t.style.display = 'block'; moveTip(ev); } | |
| function moveTip(ev){ const t = document.getElementById('tip'); t.style.left = (ev.clientX + 12) + 'px'; t.style.top = (ev.clientY - 26) + 'px'; } | |
| function hideTip(){ document.getElementById('tip').style.display = 'none'; } | |
| /* ===== actions ===== */ | |
| function sanitizedText(){ | |
| const parts = []; let pos = 0; | |
| for (const sp of S.sortedSpans) { | |
| if (sp.start < pos) continue; | |
| if (sp.start > pos) parts.push(S.text.substring(pos, sp.start)); | |
| const m = metaFor(sp.label); | |
| parts.push(S.activeCats.has(sp.label) ? `[${m.label.toUpperCase()}]` : S.text.substring(sp.start, sp.end)); | |
| pos = sp.end; | |
| } | |
| if (pos < S.text.length) parts.push(S.text.substring(pos)); | |
| return parts.join(''); | |
| } | |
| function download(name, content, type){ | |
| const blob = content instanceof Blob ? content : new Blob([content], { type: type || 'text/plain' }); | |
| const a = document.createElement('a'); | |
| a.href = URL.createObjectURL(blob); a.download = name; | |
| document.body.appendChild(a); a.click(); a.remove(); | |
| setTimeout(() => URL.revokeObjectURL(a.href), 1000); | |
| } | |
| function baseName(){ | |
| const f = S.filename || 'document'; | |
| const i = f.lastIndexOf('.'); | |
| return i > 0 ? f.slice(0, i) : f; | |
| } | |
| document.getElementById('act-txt').addEventListener('click', () => { | |
| download(baseName() + '.redacted.txt', sanitizedText(), 'text/plain'); | |
| flash('act-txt', 'Exported'); | |
| }); | |
| document.getElementById('act-copy').addEventListener('click', async () => { | |
| try { await navigator.clipboard.writeText(sanitizedText()); flash('act-copy', 'Copied'); } | |
| catch { flash('act-copy', 'Copy failed'); } | |
| }); | |
| document.getElementById('act-report').addEventListener('click', () => { | |
| const report = { | |
| filename: S.filename, | |
| scanned_in_ms: Math.round(S.scanMs), | |
| stats: S.stats, | |
| speakers: S.speakers, | |
| active_categories: [...S.activeCats], | |
| spans: S.spans, | |
| }; | |
| download(baseName() + '.report.json', JSON.stringify(report, null, 2), 'application/json'); | |
| flash('act-report', 'Downloaded'); | |
| }); | |
| document.getElementById('act-pdf').addEventListener('click', async () => { | |
| if (!isPdf()) return; | |
| if (!S.file) { | |
| alert('Original PDF reference lost β upload again to export a redacted PDF.'); | |
| return; | |
| } | |
| if (!S.activeCats.size) { | |
| alert('No categories selected β enable at least one category in the sidebar before redacting.'); | |
| return; | |
| } | |
| const btn = document.getElementById('act-pdf'); | |
| const labelSpan = btn.querySelector('span'); | |
| const original = labelSpan.textContent; | |
| labelSpan.textContent = 'Generatingβ¦'; | |
| btn.disabled = true; | |
| try { | |
| const form = new FormData(); | |
| form.append('file', S.file); | |
| form.append('spans', JSON.stringify(S.spans)); | |
| form.append('active', JSON.stringify([...S.activeCats])); | |
| const r = await fetch('/api/redact-pdf', { method:'POST', body: form }); | |
| if (!r.ok) { | |
| let err = `Redaction failed (${r.status})`; | |
| try { const j = await r.json(); err = j.error || err; } catch {} | |
| throw new Error(err); | |
| } | |
| const blob = await r.blob(); | |
| download(baseName() + '.redacted.pdf', blob, 'application/pdf'); | |
| labelSpan.textContent = 'Downloaded'; | |
| } catch (e) { | |
| labelSpan.textContent = 'Failed'; | |
| alert(e.message || 'Redaction failed'); | |
| } finally { | |
| btn.disabled = false; | |
| setTimeout(() => { labelSpan.textContent = original; }, 1500); | |
| } | |
| }); | |
| const _flashTimers = {}; | |
| function flash(id, msg){ | |
| const btn = document.getElementById(id); | |
| const span = btn.querySelector('span'); | |
| const prev = span ? span.textContent : btn.textContent; | |
| if (span) span.textContent = msg; else btn.textContent = msg; | |
| clearTimeout(_flashTimers[id]); | |
| _flashTimers[id] = setTimeout(() => { if (span) span.textContent = prev; else btn.textContent = prev; }, 1300); | |
| } | |
| </script> | |
| </body> | |
| </html>""" | |
| # ββ launch βββββββββββββββββββββββββββββββββββββββββββββββββββββββ | |
| if __name__ == "__main__": | |
| server.launch(server_name="0.0.0.0", server_port=7860) | |