ysharma HF Staff commited on
Commit
161740e
Β·
verified Β·
1 Parent(s): d6e2d28

Upload app_v3.py

Browse files
Files changed (1) hide show
  1. app_v3.py +1381 -0
app_v3.py ADDED
@@ -0,0 +1,1381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ PII Reveal - Document Privacy Explorer (v3 β€” "Inspector")
3
+ ==========================================================
4
+ Redesigned frontend matching the mockup in pii_reveal_redesign.html,
5
+ addressing ui-critique-2.txt:
6
+ - Scanner/inspector aesthetic, not dashboard
7
+ - Three-way typography: serif for document body, mono for technical
8
+ values (IBAN, URLs, emails, phones, dates, secrets), sans for UI chrome
9
+ - Stats hierarchy: 22.7% is the hero, other stats step down
10
+ - Thin 4px distribution bar between numbers and legend
11
+ - Sidebar rows ARE the toggle (no checkboxes). Off = dimmed
12
+ - Speakers get neutral swatches so they don't read as a 9th category
13
+ - Actions footer: Redact and export (primary), Copy sanitized, Download report
14
+ - Harmonized category palette tuned for a privacy/security tool
15
+
16
+ Backend (model, server, endpoints) is identical to app.py.
17
+ """
18
+
19
+ # ── stdlib ───────────────────────────────────────────────────────
20
+ import dataclasses
21
+ import functools
22
+ import json
23
+ import math
24
+ import os
25
+ import re
26
+ import tempfile
27
+ from bisect import bisect_left, bisect_right
28
+ from collections.abc import Sequence
29
+ from dataclasses import dataclass
30
+ from pathlib import Path
31
+ from typing import Final
32
+
33
+ # ── third-party ──────────────────────────────────────────────────
34
+ import gradio as gr
35
+ import spaces
36
+ import tiktoken
37
+ import torch
38
+ import torch.nn.functional as F
39
+ from fastapi import UploadFile, File
40
+ from fastapi.responses import HTMLResponse, JSONResponse
41
+ from huggingface_hub import snapshot_download
42
+ from safetensors import safe_open
43
+
44
+ # ── configuration ────────────────────────────────────────────────
45
+ MODEL_REPO = os.getenv("MODEL_ID", "charles-first-org/second-model")
46
+ HF_TOKEN = os.getenv("HF_TOKEN", None)
47
+ MODEL_DIR = Path(snapshot_download(MODEL_REPO, token=HF_TOKEN))
48
+
49
+ # Harmonized palette from the mockup. `mono` flags which categories should
50
+ # render in monospace (technical values: dates, IBANs, URLs, emails, phones,
51
+ # secrets). Names and addresses stay in serif prose.
52
+ CATEGORIES_META = {
53
+ "private_person": {"color": "#E24B4A", "cls": "hp", "label": "Person", "mono": False},
54
+ "private_date": {"color": "#7F77DD", "cls": "hd", "label": "Date", "mono": True},
55
+ "private_address": {"color": "#1D9E75", "cls": "ha", "label": "Address", "mono": False},
56
+ "private_email": {"color": "#378ADD", "cls": "he", "label": "Email", "mono": True},
57
+ "account_number": {"color": "#BA7517", "cls": "hac", "label": "Account", "mono": True},
58
+ "private_url": {"color": "#D85A30", "cls": "hu", "label": "URL", "mono": True},
59
+ "secret": {"color": "#D4537E", "cls": "hs", "label": "Secret", "mono": True},
60
+ "private_phone": {"color": "#639922", "cls": "hph", "label": "Phone", "mono": True},
61
+ }
62
+
63
+ # =====================================================================
64
+ # MODEL ARCHITECTURE + INFERENCE (from reference implementation)
65
+ # =====================================================================
66
+
67
+ PRIVACY_FILTER_MODEL_TYPE: Final[str] = "privacy_filter"
68
+ REQUIRED_MODEL_CONFIG_KEYS: Final[tuple[str, ...]] = (
69
+ "model_type", "encoding", "num_hidden_layers", "num_experts",
70
+ "experts_per_token", "vocab_size", "num_labels", "hidden_size",
71
+ "intermediate_size", "head_dim", "num_attention_heads",
72
+ "num_key_value_heads", "sliding_window", "bidirectional_context",
73
+ "bidirectional_left_context", "bidirectional_right_context",
74
+ "default_n_ctx", "initial_context_length", "rope_theta",
75
+ "rope_scaling_factor", "rope_ntk_alpha", "rope_ntk_beta", "param_dtype",
76
+ )
77
+ BACKGROUND_CLASS_LABEL: Final[str] = "O"
78
+ BOUNDARY_PREFIXES: Final[tuple[str, ...]] = ("B", "I", "E", "S")
79
+ SPAN_CLASS_NAMES: Final[tuple[str, ...]] = (
80
+ BACKGROUND_CLASS_LABEL,
81
+ "account_number", "private_address", "private_date", "private_email",
82
+ "private_person", "private_phone", "private_url", "secret",
83
+ )
84
+ NER_CLASS_NAMES: Final[tuple[str, ...]] = (BACKGROUND_CLASS_LABEL,) + tuple(
85
+ f"{prefix}-{base}"
86
+ for base in SPAN_CLASS_NAMES if base != BACKGROUND_CLASS_LABEL
87
+ for prefix in BOUNDARY_PREFIXES
88
+ )
89
+ VITERBI_TRANSITION_BIAS_KEYS: Final[tuple[str, ...]] = (
90
+ "transition_bias_background_stay", "transition_bias_background_to_start",
91
+ "transition_bias_inside_to_continue", "transition_bias_inside_to_end",
92
+ "transition_bias_end_to_background", "transition_bias_end_to_start",
93
+ )
94
+ DEFAULT_VITERBI_CALIBRATION_PRESET: Final[str] = "default"
95
+
96
+
97
+ def validate_model_config_contract(cfg: dict, *, context: str) -> None:
98
+ missing = [k for k in REQUIRED_MODEL_CONFIG_KEYS if k not in cfg]
99
+ if missing:
100
+ raise ValueError(f"{context} missing keys: {', '.join(missing)}")
101
+ if cfg.get("model_type") != PRIVACY_FILTER_MODEL_TYPE:
102
+ raise ValueError(f"{context} model_type must be {PRIVACY_FILTER_MODEL_TYPE!r}")
103
+ if cfg.get("bidirectional_context") is not True:
104
+ raise ValueError(f"{context} must use bidirectional_context=true")
105
+ lc, rc = cfg.get("bidirectional_left_context"), cfg.get("bidirectional_right_context")
106
+ if not isinstance(lc, int) or not isinstance(rc, int) or lc != rc or lc < 0:
107
+ raise ValueError(f"{context} bidirectional context must be equal non-negative ints")
108
+ sw = cfg.get("sliding_window")
109
+ if sw != 2 * lc + 1:
110
+ raise ValueError(f"{context} sliding_window must equal 2*context+1")
111
+ if cfg["num_labels"] != 33:
112
+ raise ValueError(f"{context} num_labels must be 33")
113
+ if cfg["param_dtype"] != "bfloat16":
114
+ raise ValueError(f"{context} param_dtype must be bfloat16")
115
+
116
+
117
+ # ── model helpers ────────────────────────────────────────────────
118
+
119
+ def expert_linear(x: torch.Tensor, weight: torch.Tensor, bias: torch.Tensor | None) -> torch.Tensor:
120
+ n, e, k = x.shape
121
+ _, _, _, o = weight.shape
122
+ out = torch.bmm(x.reshape(n * e, 1, k), weight.reshape(n * e, k, o)).reshape(n, e, o)
123
+ return out + bias if bias is not None else out
124
+
125
+
126
+ @dataclass
127
+ class ModelConfig:
128
+ num_hidden_layers: int; num_experts: int; experts_per_token: int
129
+ vocab_size: int; num_labels: int; hidden_size: int; intermediate_size: int
130
+ head_dim: int; num_attention_heads: int; num_key_value_heads: int
131
+ bidirectional_context_size: int; initial_context_length: int
132
+ rope_theta: float; rope_scaling_factor: float; rope_ntk_alpha: float; rope_ntk_beta: float
133
+
134
+ @classmethod
135
+ def from_checkpoint_config(cls, cfg: dict, *, context: str) -> "ModelConfig":
136
+ cfg = dict(cfg)
137
+ cfg["bidirectional_context_size"] = cfg["bidirectional_left_context"]
138
+ fields = {f.name for f in dataclasses.fields(cls)}
139
+ return cls(**{k: v for k, v in cfg.items() if k in fields})
140
+
141
+
142
+ class RMSNorm(torch.nn.Module):
143
+ def __init__(self, n: int, eps: float = 1e-5, device=None):
144
+ super().__init__()
145
+ self.eps = eps
146
+ self.scale = torch.nn.Parameter(torch.ones(n, device=device, dtype=torch.float32))
147
+
148
+ def forward(self, x):
149
+ t = x.float()
150
+ return (t * torch.rsqrt(t.pow(2).mean(-1, keepdim=True) + self.eps) * self.scale).to(x.dtype)
151
+
152
+
153
+ def apply_rope(x, cos, sin):
154
+ cos = cos.unsqueeze(-2).to(x.dtype); sin = sin.unsqueeze(-2).to(x.dtype)
155
+ x1, x2 = x[..., ::2], x[..., 1::2]
156
+ return torch.stack((x1 * cos - x2 * sin, x2 * cos + x1 * sin), dim=-1).reshape(x.shape)
157
+
158
+
159
+ class RotaryEmbedding(torch.nn.Module):
160
+ def __init__(self, head_dim, base, dtype, *, initial_context_length=4096,
161
+ scaling_factor=1.0, ntk_alpha=1.0, ntk_beta=32.0, device=None):
162
+ super().__init__()
163
+ self.head_dim, self.base, self.dtype = head_dim, base, dtype
164
+ self.initial_context_length = initial_context_length
165
+ self.scaling_factor, self.ntk_alpha, self.ntk_beta = scaling_factor, ntk_alpha, ntk_beta
166
+ self.device = device
167
+ mp = max(int(initial_context_length * scaling_factor), initial_context_length)
168
+ self.max_position_embeddings = mp
169
+ cos, sin = self._compute(mp, device=torch.device("cpu"))
170
+ target = device or torch.device("cpu")
171
+ self.register_buffer("cos_cache", cos.to(target), persistent=False)
172
+ self.register_buffer("sin_cache", sin.to(target), persistent=False)
173
+
174
+ def _inv_freq(self, device=None):
175
+ device = device or self.device
176
+ freq = self.base ** (torch.arange(0, self.head_dim, 2, dtype=torch.float, device=device) / self.head_dim)
177
+ if self.scaling_factor > 1.0:
178
+ d_half = self.head_dim / 2
179
+ low = d_half * math.log(self.initial_context_length / (self.ntk_beta * 2 * math.pi)) / math.log(self.base)
180
+ high = d_half * math.log(self.initial_context_length / (self.ntk_alpha * 2 * math.pi)) / math.log(self.base)
181
+ interp = 1.0 / (self.scaling_factor * freq)
182
+ extrap = 1.0 / freq
183
+ ramp = (torch.arange(d_half, dtype=torch.float32, device=device) - low) / (high - low)
184
+ mask = 1 - ramp.clamp(0, 1)
185
+ return interp * (1 - mask) + extrap * mask
186
+ return 1.0 / freq
187
+
188
+ def _compute(self, n, device=None):
189
+ inv_freq = self._inv_freq(device)
190
+ t = torch.arange(n, dtype=torch.float32, device=device or self.device)
191
+ freqs = torch.einsum("i,j->ij", t, inv_freq)
192
+ c = 0.1 * math.log(self.scaling_factor) + 1.0 if self.scaling_factor > 1.0 else 1.0
193
+ return (freqs.cos() * c).to(self.dtype), (freqs.sin() * c).to(self.dtype)
194
+
195
+ def forward(self, q, k):
196
+ n = q.shape[0]
197
+ if n > self.cos_cache.shape[0]:
198
+ cos, sin = self._compute(n, torch.device("cpu"))
199
+ self.cos_cache, self.sin_cache = cos.to(q.device), sin.to(q.device)
200
+ cc = self.cos_cache.to(q.device) if self.cos_cache.device != q.device else self.cos_cache
201
+ sc = self.sin_cache.to(q.device) if self.sin_cache.device != q.device else self.sin_cache
202
+ cos, sin = cc[:n], sc[:n]
203
+ q = apply_rope(q.view(n, -1, self.head_dim), cos, sin).reshape(q.shape)
204
+ k = apply_rope(k.view(n, -1, self.head_dim), cos, sin).reshape(k.shape)
205
+ return q, k
206
+
207
+
208
+ def sdpa(Q, K, V, S, sm_scale, ctx):
209
+ n, nh, qm, hd = Q.shape
210
+ w = 2 * ctx + 1
211
+ Kp = F.pad(K, (0, 0, 0, 0, ctx, ctx)); Vp = F.pad(V, (0, 0, 0, 0, ctx, ctx))
212
+ Kw = Kp.unfold(0, w, 1).permute(0, 3, 1, 2); Vw = Vp.unfold(0, w, 1).permute(0, 3, 1, 2)
213
+ idx = torch.arange(w, device=Q.device) - ctx
214
+ pos = torch.arange(n, device=Q.device)[:, None] + idx[None, :]
215
+ valid = (pos >= 0) & (pos < n)
216
+ scores = torch.einsum("nhqd,nwhd->nhqw", Q, Kw).float() * sm_scale
217
+ scores = scores.masked_fill(~valid[:, None, None, :], -float("inf"))
218
+ sink = (S * math.log(2.0)).reshape(nh, qm)[None, :, :, None].expand(n, -1, -1, 1)
219
+ scores = torch.cat([scores, sink], dim=-1)
220
+ wt = torch.softmax(scores, dim=-1)[..., :-1].to(V.dtype)
221
+ return torch.einsum("nhqw,nwhd->nhqd", wt, Vw).reshape(n, -1)
222
+
223
+
224
+ class AttentionBlock(torch.nn.Module):
225
+ def __init__(self, cfg: ModelConfig, device=None):
226
+ super().__init__()
227
+ dt = torch.bfloat16
228
+ self.head_dim, self.nah, self.nkv = cfg.head_dim, cfg.num_attention_heads, cfg.num_key_value_heads
229
+ self.ctx = int(cfg.bidirectional_context_size)
230
+ self.sinks = torch.nn.Parameter(torch.empty(cfg.num_attention_heads, device=device, dtype=torch.float32))
231
+ self.norm = RMSNorm(cfg.hidden_size, device=device)
232
+ qkv_d = cfg.head_dim * (cfg.num_attention_heads + 2 * cfg.num_key_value_heads)
233
+ self.qkv = torch.nn.Linear(cfg.hidden_size, qkv_d, device=device, dtype=dt)
234
+ self.out = torch.nn.Linear(cfg.head_dim * cfg.num_attention_heads, cfg.hidden_size, device=device, dtype=dt)
235
+ self.qk_scale = 1 / math.sqrt(math.sqrt(cfg.head_dim))
236
+ self.rope = RotaryEmbedding(cfg.head_dim, int(cfg.rope_theta), torch.float32,
237
+ initial_context_length=cfg.initial_context_length,
238
+ scaling_factor=cfg.rope_scaling_factor,
239
+ ntk_alpha=cfg.rope_ntk_alpha, ntk_beta=cfg.rope_ntk_beta, device=device)
240
+
241
+ def forward(self, x):
242
+ t = self.norm(x).to(self.qkv.weight.dtype)
243
+ qkv = F.linear(t, self.qkv.weight, self.qkv.bias)
244
+ hd, nah, nkv = self.head_dim, self.nah, self.nkv
245
+ q = qkv[:, :nah * hd].contiguous()
246
+ k = qkv[:, nah * hd:(nah + nkv) * hd].contiguous()
247
+ v = qkv[:, (nah + nkv) * hd:(nah + 2 * nkv) * hd].contiguous()
248
+ q, k = self.rope(q, k)
249
+ q, k = q * self.qk_scale, k * self.qk_scale
250
+ n = q.shape[0]
251
+ q = q.view(n, nkv, nah // nkv, hd); k = k.view(n, nkv, hd); v = v.view(n, nkv, hd)
252
+ ao = sdpa(q, k, v, self.sinks, 1.0, self.ctx).to(self.out.weight.dtype)
253
+ return x + F.linear(ao, self.out.weight, self.out.bias).to(x.dtype)
254
+
255
+
256
+ def swiglu(x, alpha=1.702, limit=7.0):
257
+ g, l = x.chunk(2, dim=-1)
258
+ g, l = g.clamp(max=limit), l.clamp(-limit, limit)
259
+ return g * torch.sigmoid(alpha * g) * (l + 1)
260
+
261
+
262
+ class MLPBlock(torch.nn.Module):
263
+ def __init__(self, cfg: ModelConfig, device=None):
264
+ super().__init__()
265
+ dt = torch.bfloat16
266
+ self.ne, self.ept = cfg.num_experts, cfg.experts_per_token
267
+ self.norm = RMSNorm(cfg.hidden_size, device=device)
268
+ self.gate = torch.nn.Linear(cfg.hidden_size, cfg.num_experts, device=device, dtype=dt)
269
+ self.mlp1_weight = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.hidden_size, cfg.intermediate_size * 2, device=device, dtype=dt))
270
+ self.mlp1_bias = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.intermediate_size * 2, device=device, dtype=dt))
271
+ self.mlp2_weight = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.intermediate_size, cfg.hidden_size, device=device, dtype=dt))
272
+ self.mlp2_bias = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.hidden_size, device=device, dtype=dt))
273
+
274
+ def forward(self, x):
275
+ t = self.norm(x)
276
+ gs = F.linear(t.float(), self.gate.weight.float(), self.gate.bias.float())
277
+ top = torch.topk(gs, k=self.ept, dim=-1, sorted=True)
278
+ ew = torch.softmax(top.values, dim=-1) / self.ept
279
+ ei = top.indices
280
+ ept = self.ept
281
+
282
+ def _chunk(tc, eic, ewc):
283
+ o = expert_linear(tc.float().unsqueeze(1).expand(-1, eic.shape[1], -1),
284
+ self.mlp1_weight[eic].float(), self.mlp1_bias[eic].float())
285
+ o = swiglu(o)
286
+ o = expert_linear(o.float(), self.mlp2_weight[eic].float(), self.mlp2_bias[eic].float())
287
+ return (torch.einsum("bec,be->bc", o.to(ewc.dtype), ewc) * ept).to(x.dtype)
288
+
289
+ cs = 32
290
+ if t.shape[0] > cs:
291
+ parts = [_chunk(t[s:s+cs], ei[s:s+cs], ew[s:s+cs]) for s in range(0, t.shape[0], cs)]
292
+ return x + torch.cat(parts, 0)
293
+ return x + _chunk(t, ei, ew)
294
+
295
+
296
+ class TransformerBlock(torch.nn.Module):
297
+ def __init__(self, cfg, device=None):
298
+ super().__init__()
299
+ self.attn = AttentionBlock(cfg, device=device)
300
+ self.mlp = MLPBlock(cfg, device=device)
301
+ def forward(self, x):
302
+ return self.mlp(self.attn(x))
303
+
304
+
305
+ class Checkpoint:
306
+ @staticmethod
307
+ def build_param_name_map(n):
308
+ return ({f"block.{i}.mlp.mlp1_bias": f"block.{i}.mlp.swiglu.bias" for i in range(n)}
309
+ | {f"block.{i}.mlp.mlp1_weight": f"block.{i}.mlp.swiglu.weight" for i in range(n)}
310
+ | {f"block.{i}.mlp.mlp2_bias": f"block.{i}.mlp.out.bias" for i in range(n)}
311
+ | {f"block.{i}.mlp.mlp2_weight": f"block.{i}.mlp.out.weight" for i in range(n)})
312
+
313
+ def __init__(self, path, device, num_hidden_layers):
314
+ self.pnm = self.build_param_name_map(num_hidden_layers)
315
+ self.ds = device.type if device.index is None else f"{device.type}:{device.index}"
316
+ files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(".safetensors")]
317
+ self.map = {}
318
+ for sf in files:
319
+ with safe_open(sf, framework="pt", device=self.ds) as h:
320
+ for k in h.keys():
321
+ self.map[k] = sf
322
+
323
+ def get(self, name):
324
+ mapped = self.pnm.get(name, name)
325
+ with safe_open(self.map[mapped], framework="pt", device=self.ds) as h:
326
+ return h.get_tensor(mapped)
327
+
328
+
329
+ class Transformer(torch.nn.Module):
330
+ def __init__(self, cfg, device):
331
+ super().__init__()
332
+ dt = torch.bfloat16
333
+ self.embedding = torch.nn.Embedding(cfg.vocab_size, cfg.hidden_size, device=device, dtype=dt)
334
+ self.block = torch.nn.ModuleList([TransformerBlock(cfg, device=device) for _ in range(cfg.num_hidden_layers)])
335
+ self.norm = RMSNorm(cfg.hidden_size, device=device)
336
+ self.unembedding = torch.nn.Linear(cfg.hidden_size, cfg.num_labels, bias=False, device=device, dtype=dt)
337
+
338
+ def forward(self, token_ids):
339
+ x = self.embedding(token_ids)
340
+ for blk in self.block:
341
+ x = blk(x)
342
+ return F.linear(self.norm(x), self.unembedding.weight, None)
343
+
344
+ @classmethod
345
+ def from_checkpoint(cls, checkpoint_dir, *, device):
346
+ torch.backends.cuda.matmul.allow_tf32 = False
347
+ torch.backends.cudnn.allow_tf32 = False
348
+ torch.set_float32_matmul_precision("highest")
349
+ cp = json.loads((Path(checkpoint_dir) / "config.json").read_text())
350
+ validate_model_config_contract(cp, context=str(checkpoint_dir))
351
+ cfg = ModelConfig.from_checkpoint_config(cp, context=str(checkpoint_dir))
352
+ ckpt = Checkpoint(checkpoint_dir, device, cfg.num_hidden_layers)
353
+ m = cls(cfg, device); m.eval()
354
+ for name, param in m.named_parameters():
355
+ loaded = ckpt.get(name)
356
+ if param.shape != loaded.shape:
357
+ raise ValueError(f"Shape mismatch {name}: {param.shape} vs {loaded.shape}")
358
+ param.data.copy_(loaded)
359
+ return m
360
+
361
+
362
+ # ── label info + span decoding ───────────────────────────────────
363
+
364
+ @dataclass(frozen=True)
365
+ class LabelInfo:
366
+ boundary_label_lookup: dict[str, dict[str, int]]
367
+ token_to_span_label: dict[int, int]
368
+ token_boundary_tags: dict[int, str | None]
369
+ span_class_names: tuple[str, ...]
370
+ span_label_lookup: dict[str, int]
371
+ background_token_label: int
372
+ background_span_label: int
373
+
374
+
375
+ def labels_to_spans(labels_by_index, label_info):
376
+ spans, cur_label, start_idx, prev_idx = [], None, None, None
377
+ bg = label_info.background_span_label
378
+ for ti in sorted(labels_by_index):
379
+ lid = labels_by_index[ti]
380
+ sl = label_info.token_to_span_label.get(lid)
381
+ bt = label_info.token_boundary_tags.get(lid)
382
+ if prev_idx is not None and ti != prev_idx + 1:
383
+ if cur_label is not None and start_idx is not None:
384
+ spans.append((cur_label, start_idx, prev_idx + 1))
385
+ cur_label = start_idx = None
386
+ if sl is None:
387
+ prev_idx = ti; continue
388
+ if sl == bg:
389
+ if cur_label is not None and start_idx is not None:
390
+ spans.append((cur_label, start_idx, ti))
391
+ cur_label = start_idx = None; prev_idx = ti; continue
392
+ if bt == "S":
393
+ if cur_label is not None and start_idx is not None and prev_idx is not None:
394
+ spans.append((cur_label, start_idx, prev_idx + 1))
395
+ spans.append((sl, ti, ti + 1)); cur_label = start_idx = None
396
+ elif bt == "B":
397
+ if cur_label is not None and start_idx is not None and prev_idx is not None:
398
+ spans.append((cur_label, start_idx, prev_idx + 1))
399
+ cur_label, start_idx = sl, ti
400
+ elif bt == "I":
401
+ if cur_label is None or cur_label != sl:
402
+ if cur_label is not None and start_idx is not None and prev_idx is not None:
403
+ spans.append((cur_label, start_idx, prev_idx + 1))
404
+ cur_label, start_idx = sl, ti
405
+ elif bt == "E":
406
+ if cur_label is None or cur_label != sl or start_idx is None:
407
+ if cur_label is not None and start_idx is not None and prev_idx is not None:
408
+ spans.append((cur_label, start_idx, prev_idx + 1))
409
+ spans.append((sl, ti, ti + 1)); cur_label = start_idx = None
410
+ else:
411
+ spans.append((cur_label, start_idx, ti + 1)); cur_label = start_idx = None
412
+ else:
413
+ if cur_label is not None and start_idx is not None and prev_idx is not None:
414
+ spans.append((cur_label, start_idx, prev_idx + 1))
415
+ cur_label = start_idx = None
416
+ prev_idx = ti
417
+ if cur_label is not None and start_idx is not None and prev_idx is not None:
418
+ spans.append((cur_label, start_idx, prev_idx + 1))
419
+ return spans
420
+
421
+
422
+ def token_spans_to_char_spans(spans, cs, ce):
423
+ out = []
424
+ for li, ts, te in spans:
425
+ if not (0 <= ts < te <= len(cs)):
426
+ continue
427
+ s, e = cs[ts], ce[te - 1]
428
+ if e > s:
429
+ out.append((li, s, e))
430
+ return out
431
+
432
+
433
+ def trim_char_spans_whitespace(spans, text):
434
+ out = []
435
+ for li, s, e in spans:
436
+ if not (0 <= s < e <= len(text)):
437
+ continue
438
+ while s < e and text[s].isspace(): s += 1
439
+ while e > s and text[e - 1].isspace(): e -= 1
440
+ if e > s:
441
+ out.append((li, s, e))
442
+ return out
443
+
444
+
445
+ # ── viterbi decoder ──────────────────────────────────────────────
446
+
447
+ @functools.lru_cache(maxsize=1)
448
+ def get_viterbi_transition_biases():
449
+ cp = MODEL_DIR / "viterbi_calibration.json"
450
+ default = {k: 0.0 for k in VITERBI_TRANSITION_BIAS_KEYS}
451
+ if not cp.is_file():
452
+ return default
453
+ payload = json.loads(cp.read_text())
454
+ raw = payload
455
+ ops = payload.get("operating_points")
456
+ if isinstance(ops, dict):
457
+ preset = ops.get(DEFAULT_VITERBI_CALIBRATION_PRESET)
458
+ if isinstance(preset, dict):
459
+ raw = preset.get("biases", raw)
460
+ if not isinstance(raw, dict):
461
+ return default
462
+ return {k: float(raw.get(k, 0.0)) for k in VITERBI_TRANSITION_BIAS_KEYS}
463
+
464
+
465
+ class Decoder:
466
+ def __init__(self, label_info):
467
+ nc = len(label_info.token_to_span_label)
468
+ self._start = torch.full((nc,), -1e9, dtype=torch.float32)
469
+ self._end = torch.full((nc,), -1e9, dtype=torch.float32)
470
+ self._trans = torch.full((nc, nc), -1e9, dtype=torch.float32)
471
+ biases = get_viterbi_transition_biases()
472
+ bg_tok, bg_sp = label_info.background_token_label, label_info.background_span_label
473
+ ttsl, tbt = label_info.token_to_span_label, label_info.token_boundary_tags
474
+ for i in range(nc):
475
+ tag, sl = tbt.get(i), ttsl.get(i)
476
+ if tag in {"B", "S"} or i == bg_tok: self._start[i] = 0.0
477
+ if tag in {"E", "S"} or i == bg_tok: self._end[i] = 0.0
478
+ for j in range(nc):
479
+ nt, ns = tbt.get(j), ttsl.get(j)
480
+ if self._valid(tag, sl, nt, ns, bg_tok, bg_sp, j):
481
+ self._trans[i, j] = self._bias(tag, sl, nt, ns, bg_sp, biases)
482
+
483
+ @staticmethod
484
+ def _valid(pt, ps, nt, ns, bti, bsi, ni):
485
+ nb = ns == bsi or ni == bti
486
+ if (ns is None or nt is None) and not nb: return False
487
+ if pt is None or ps is None: return nb or nt in {"B", "S"}
488
+ if ps == bsi or pt in {"E", "S"}: return nb or nt in {"B", "S"}
489
+ if pt in {"B", "I"}: return ps == ns and nt in {"I", "E"}
490
+ return False
491
+
492
+ @staticmethod
493
+ def _bias(pt, ps, nt, ns, bsi, b):
494
+ nb, pb = ns == bsi, ps == bsi
495
+ if pb: return b["transition_bias_background_stay"] if nb else b["transition_bias_background_to_start"]
496
+ if pt in {"B", "I"}: return b["transition_bias_inside_to_continue"] if nt == "I" else b["transition_bias_inside_to_end"]
497
+ return b["transition_bias_end_to_background"] if nb else b["transition_bias_end_to_start"]
498
+
499
+ def decode(self, lp):
500
+ sl, nc = lp.shape
501
+ if sl == 0: return []
502
+ st = self._start.to(lp.device, lp.dtype)
503
+ en = self._end.to(lp.device, lp.dtype)
504
+ tr = self._trans.to(lp.device, lp.dtype)
505
+ scores = lp[0] + st
506
+ bp = torch.empty((sl - 1, nc), device=lp.device, dtype=torch.int64)
507
+ for i in range(1, sl):
508
+ t = scores.unsqueeze(1) + tr
509
+ bs, bi = t.max(dim=0)
510
+ scores = bs + lp[i]; bp[i - 1] = bi
511
+ if not torch.isfinite(scores).any(): return lp.argmax(dim=1).tolist()
512
+ scores += en
513
+ path = torch.empty(sl, device=lp.device, dtype=torch.int64)
514
+ path[-1] = scores.argmax()
515
+ for i in range(sl - 2, -1, -1): path[i] = bp[i, path[i + 1]]
516
+ return path.tolist()
517
+
518
+
519
+ # ── runtime singleton ────────────────────────────────────────────
520
+
521
+ @dataclass(frozen=True)
522
+ class InferenceRuntime:
523
+ model: Transformer; encoding: tiktoken.Encoding; label_info: LabelInfo
524
+ device: torch.device; n_ctx: int
525
+
526
+
527
+ @functools.lru_cache(maxsize=1)
528
+ def get_runtime():
529
+ cp = MODEL_DIR
530
+ cfg = json.loads((cp / "config.json").read_text())
531
+ validate_model_config_contract(cfg, context=str(cp))
532
+ device = torch.device("cuda")
533
+ encoding = tiktoken.get_encoding(str(cfg["encoding"]).strip())
534
+ scn = [BACKGROUND_CLASS_LABEL]; sll = {BACKGROUND_CLASS_LABEL: 0}
535
+ bll, ttsl, tbt = {}, {}, {}
536
+ bg_idx = None
537
+ for idx, name in enumerate(NER_CLASS_NAMES):
538
+ if name == BACKGROUND_CLASS_LABEL:
539
+ bg_idx = idx; ttsl[idx] = 0; tbt[idx] = None; continue
540
+ bnd, base = name.split("-", 1)
541
+ si = sll.get(base)
542
+ if si is None:
543
+ si = len(scn); scn.append(base); sll[base] = si
544
+ ttsl[idx] = si; tbt[idx] = bnd
545
+ bll.setdefault(base, {})[bnd] = idx
546
+ li = LabelInfo(bll, ttsl, tbt, tuple(scn), sll, bg_idx, 0)
547
+ m = Transformer.from_checkpoint(str(cp), device=device)
548
+ return InferenceRuntime(m, encoding, li, device, int(cfg["default_n_ctx"]))
549
+
550
+
551
+ @torch.inference_mode()
552
+ def predict_text(runtime, text, decoder):
553
+ tids = tuple(int(t) for t in runtime.encoding.encode(text, allowed_special="all"))
554
+ if not tids: return text, []
555
+ scores = []
556
+ for s in range(0, len(tids), runtime.n_ctx):
557
+ e = min(s + runtime.n_ctx, len(tids))
558
+ wt = torch.tensor(tids[s:e], device=runtime.device, dtype=torch.int32)
559
+ lp = F.log_softmax(runtime.model(wt).float(), dim=-1)
560
+ scores.extend(lp.unbind(0))
561
+ stacked = torch.stack(scores, 0)
562
+ dl = decoder.decode(stacked)
563
+ if len(dl) != len(tids): dl = stacked.argmax(dim=1).tolist()
564
+ pli = {i: int(l) for i, l in enumerate(dl)}
565
+ pts = labels_to_spans(pli, runtime.label_info)
566
+ tb = [runtime.encoding.decode_single_token_bytes(t) for t in tids]
567
+ dt = b"".join(tb).decode("utf-8", errors="replace")
568
+ cbs, cbe = [], []
569
+ bc = 0
570
+ for ch in dt: cbs.append(bc); bc += len(ch.encode("utf-8")); cbe.append(bc)
571
+ cs, ce = [], []
572
+ tbc = 0
573
+ for rb in tb:
574
+ tbs = tbc; tbe = tbs + len(rb); tbc = tbe
575
+ cs.append(bisect_right(cbe, tbs)); ce.append(bisect_left(cbs, tbe))
576
+ pcs = token_spans_to_char_spans(pts, cs, ce)
577
+ pcs = trim_char_spans_whitespace(pcs, dt if dt != text else text)
578
+ src = dt if dt != text else text
579
+ detected = []
580
+ for li, s, e in pcs:
581
+ if 0 <= li < len(runtime.label_info.span_class_names):
582
+ lbl = runtime.label_info.span_class_names[li]
583
+ else:
584
+ lbl = f"label_{li}"
585
+ detected.append({"label": lbl, "start": s, "end": e, "text": src[s:e]})
586
+ return src, detected
587
+
588
+
589
+ # =====================================================================
590
+ # APPLICATION LAYER
591
+ # =====================================================================
592
+
593
+ def extract_text(file_path: str) -> str:
594
+ suffix = Path(file_path).suffix.lower()
595
+ if suffix == ".pdf":
596
+ import fitz
597
+ doc = fitz.open(file_path)
598
+ pages = [page.get_text() for page in doc]
599
+ doc.close()
600
+ return "\n\n".join(pages)
601
+ elif suffix in (".docx", ".doc"):
602
+ from docx import Document
603
+ doc = Document(file_path)
604
+ return "\n\n".join(p.text for p in doc.paragraphs if p.text.strip())
605
+ raise ValueError(f"Unsupported file type: {suffix}")
606
+
607
+
608
+ def compute_stats(text, spans):
609
+ total = len(text)
610
+ pii_chars = sum(s["end"] - s["start"] for s in spans)
611
+ by_cat = {}
612
+ for s in spans:
613
+ c = s["label"]
614
+ by_cat.setdefault(c, {"count": 0, "chars": 0})
615
+ by_cat[c]["count"] += 1; by_cat[c]["chars"] += s["end"] - s["start"]
616
+ return {
617
+ "total_chars": total, "pii_chars": pii_chars,
618
+ "pii_percentage": round(pii_chars / total * 100, 1) if total else 0,
619
+ "total_spans": len(spans), "categories": by_cat, "num_categories": len(by_cat),
620
+ "total_lines": text.count("\n") + 1 if total else 0,
621
+ }
622
+
623
+
624
+ def detect_speakers(text, spans):
625
+ patterns = [r"^([A-Z][a-zA-Z ]{1,30}):\s", r"^\[([^\]]{1,30})\]\s", r"^(Speaker\s*\d+):\s"]
626
+ line_sp, pos, cur = [], 0, None
627
+ for line in text.split("\n"):
628
+ for p in patterns:
629
+ m = re.match(p, line)
630
+ if m: cur = m.group(1).strip(); break
631
+ line_sp.append((pos, pos + len(line), cur)); pos += len(line) + 1
632
+ result = {}
633
+ for span in spans:
634
+ mid = (span["start"] + span["end"]) // 2
635
+ speaker = "Document"
636
+ for ls, le, sp in line_sp:
637
+ if ls <= mid <= le and sp: speaker = sp; break
638
+ result[speaker] = result.get(speaker, 0) + 1
639
+ return {} if list(result.keys()) == ["Document"] else result
640
+
641
+
642
+ @spaces.GPU
643
+ def run_pii_analysis(text: str):
644
+ """GPU-accelerated PII detection."""
645
+ runtime = get_runtime()
646
+ decoder = Decoder(label_info=runtime.label_info)
647
+ source_text, detected = predict_text(runtime, text, decoder)
648
+ return source_text, detected
649
+
650
+
651
+ # ── Gradio Server ────────────────────────────────────────────────
652
+ server = gr.Server()
653
+
654
+
655
+ @server.get("/", response_class=HTMLResponse)
656
+ async def homepage():
657
+ return FRONTEND_HTML
658
+
659
+
660
+ @server.post("/api/analyze")
661
+ async def analyze_document(file: UploadFile = File(...)):
662
+ suffix = Path(file.filename).suffix.lower()
663
+ if suffix not in (".pdf", ".doc", ".docx"):
664
+ return JSONResponse({"error": f"Unsupported: {suffix}. Use PDF, DOC, or DOCX."}, 400)
665
+ with tempfile.NamedTemporaryFile(delete=False, suffix=suffix) as tmp:
666
+ tmp.write(await file.read()); tmp_path = tmp.name
667
+ try:
668
+ text = extract_text(tmp_path)
669
+ if not text.strip():
670
+ return JSONResponse({"error": "No text content found."}, 400)
671
+ source_text, spans = run_pii_analysis(text)
672
+ stats = compute_stats(source_text, spans)
673
+ speakers = detect_speakers(source_text, spans)
674
+ return JSONResponse({
675
+ "filename": file.filename, "text": source_text, "spans": spans,
676
+ "stats": stats, "speakers": speakers,
677
+ "categories_meta": {k: {"color": v["color"], "cls": v["cls"],
678
+ "label": v["label"], "mono": v["mono"]}
679
+ for k, v in CATEGORIES_META.items()},
680
+ })
681
+ except Exception as e:
682
+ return JSONResponse({"error": str(e)}, 500)
683
+ finally:
684
+ if os.path.exists(tmp_path): os.unlink(tmp_path)
685
+
686
+
687
+ @server.api(name="analyze_text")
688
+ def analyze_text_api(text: str) -> str:
689
+ """Gradio API: analyze raw text for PII."""
690
+ source_text, spans = run_pii_analysis(text)
691
+ stats = compute_stats(source_text, spans)
692
+ return json.dumps({"text": source_text, "spans": spans, "stats": stats}, ensure_ascii=False)
693
+
694
+
695
+ # ── Frontend HTML (v3 β€” Inspector) ───────────────────────────────
696
+ FRONTEND_HTML = r"""<!DOCTYPE html>
697
+ <html lang="en">
698
+ <head>
699
+ <meta charset="UTF-8">
700
+ <meta name="viewport" content="width=device-width,initial-scale=1">
701
+ <title>PII Reveal β€” Inspector</title>
702
+ <link rel="preconnect" href="https://fonts.googleapis.com">
703
+ <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
704
+ <link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600&family=JetBrains+Mono:wght@400;500&family=Source+Serif+4:opsz,wght@8..60,400;8..60,500&display=swap" rel="stylesheet">
705
+ <style>
706
+ *,*::before,*::after{box-sizing:border-box;margin:0;padding:0}
707
+
708
+ :root{
709
+ /* neutral, paper-leaning palette */
710
+ --color-background-primary: #faf9f6;
711
+ --color-background-secondary: #f3f2ed;
712
+ --color-text-primary: #17171a;
713
+ --color-text-secondary: #555560;
714
+ --color-text-tertiary: #9a9aa2;
715
+ --color-border-tertiary: rgba(23,23,26,0.08);
716
+ --color-border-secondary: rgba(23,23,26,0.16);
717
+ --border-radius-lg: 10px;
718
+ --border-radius-md: 6px;
719
+ --border-radius-sm: 4px;
720
+
721
+ --font-sans: 'Inter', system-ui, -apple-system, Segoe UI, sans-serif;
722
+ --font-mono: 'JetBrains Mono', ui-monospace, SFMono-Regular, Menlo, Consolas, monospace;
723
+ --font-serif: 'Source Serif 4', 'Source Serif Pro', 'Iowan Old Style', Georgia, serif;
724
+ }
725
+
726
+ html,body{height:100%}
727
+ body{
728
+ font-family:var(--font-sans);
729
+ background:var(--color-background-secondary);
730
+ color:var(--color-text-primary);
731
+ font-size:13px;line-height:1.5;
732
+ -webkit-font-smoothing:antialiased;
733
+ font-feature-settings:"cv11","ss01";
734
+ }
735
+ button{font:inherit;color:inherit}
736
+ .sr-only{position:absolute;width:1px;height:1px;padding:0;margin:-1px;overflow:hidden;clip:rect(0,0,0,0);white-space:nowrap;border:0}
737
+
738
+ /* ============ UPLOAD VIEW ============ */
739
+ #upload-view{min-height:100vh;display:flex;align-items:center;justify-content:center;padding:32px}
740
+ .u-card{
741
+ width:100%;max-width:520px;
742
+ background:var(--color-background-primary);
743
+ border:0.5px solid var(--color-border-tertiary);
744
+ border-radius:var(--border-radius-lg);
745
+ padding:40px 36px;
746
+ }
747
+ .u-brand{display:flex;align-items:center;gap:10px;margin-bottom:28px}
748
+ .u-brand svg{color:var(--color-text-primary)}
749
+ .u-brand-name{font-size:13px;font-weight:500}
750
+ .u-brand-name .sub{color:var(--color-text-tertiary);font-weight:400;margin-left:4px}
751
+ .u-title{
752
+ font-family:var(--font-serif);
753
+ font-size:28px;font-weight:400;letter-spacing:-0.015em;
754
+ line-height:1.15;margin-bottom:8px;
755
+ }
756
+ .u-sub{color:var(--color-text-secondary);font-size:13px;margin-bottom:24px}
757
+ .u-drop{
758
+ border:1px dashed var(--color-border-secondary);
759
+ border-radius:var(--border-radius-md);
760
+ padding:32px 20px;
761
+ cursor:pointer;text-align:center;
762
+ background:var(--color-background-primary);
763
+ transition:all .15s;
764
+ position:relative;
765
+ }
766
+ .u-drop:hover,.u-drop.dragover{
767
+ border-color:var(--color-text-primary);
768
+ background:var(--color-background-secondary);
769
+ }
770
+ .u-drop-icon{margin:0 auto 10px;color:var(--color-text-tertiary)}
771
+ .u-drop-title{font-size:13px;font-weight:500;margin-bottom:3px}
772
+ .u-drop-sub{font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary)}
773
+ .u-drop input{position:absolute;inset:0;opacity:0;cursor:pointer}
774
+ .u-meta{
775
+ display:flex;gap:10px;margin-top:20px;
776
+ font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary);
777
+ }
778
+ .u-meta span + span::before{content:'Β·';margin-right:10px;color:var(--color-border-secondary)}
779
+
780
+ /* ============ RESULTS VIEW ============ */
781
+ #results-view{display:none;min-height:100vh;padding:14px}
782
+ .pr-app{
783
+ font-family:var(--font-sans);
784
+ border:0.5px solid var(--color-border-tertiary);
785
+ border-radius:var(--border-radius-lg);
786
+ overflow:hidden;
787
+ background:var(--color-background-primary);
788
+ color:var(--color-text-primary);
789
+ max-width:1240px;margin:0 auto;
790
+ }
791
+
792
+ /* ── top bar ── */
793
+ .pr-top{
794
+ display:flex;align-items:center;gap:10px;
795
+ padding:11px 14px;
796
+ border-bottom:0.5px solid var(--color-border-tertiary);
797
+ }
798
+ .pr-logo{display:flex;align-items:center;gap:8px}
799
+ .pr-name{font-size:13px;font-weight:500}
800
+ .pr-name-sub{color:var(--color-text-tertiary);font-weight:400;margin-left:4px}
801
+ .pr-file-chip{
802
+ font-family:var(--font-mono);font-size:11.5px;
803
+ color:var(--color-text-secondary);
804
+ padding:4px 8px;
805
+ background:var(--color-background-secondary);
806
+ border-radius:5px;margin-left:4px;
807
+ max-width:280px;overflow:hidden;text-overflow:ellipsis;white-space:nowrap;
808
+ }
809
+ .pr-grow{flex:1}
810
+ .pr-status{font-size:11.5px;color:var(--color-text-secondary);display:flex;align-items:center;gap:6px}
811
+ .pr-status-dot{width:6px;height:6px;border-radius:50%;background:#1D9E75;box-shadow:0 0 0 3px rgba(29,158,117,.14)}
812
+ .pr-new{
813
+ font-family:var(--font-mono);font-size:11px;
814
+ color:var(--color-text-secondary);
815
+ background:transparent;border:0.5px solid var(--color-border-secondary);
816
+ padding:4px 8px;border-radius:5px;cursor:pointer;margin-left:4px;
817
+ }
818
+ .pr-new:hover{background:var(--color-background-secondary)}
819
+
820
+ /* ── stats ── */
821
+ .pr-stats{padding:18px 18px 16px;border-bottom:0.5px solid var(--color-border-tertiary)}
822
+ .pr-stats-row{display:flex;align-items:flex-end;gap:26px;margin-bottom:14px;flex-wrap:wrap}
823
+ .pr-hero{
824
+ font-size:32px;font-weight:500;line-height:1;letter-spacing:-0.025em;
825
+ font-variant-numeric:tabular-nums;
826
+ }
827
+ .pr-hero-pct{font-size:17px;opacity:0.55;margin-left:1px;font-weight:400}
828
+ .pr-num{font-size:20px;font-weight:500;line-height:1;letter-spacing:-0.01em;font-variant-numeric:tabular-nums}
829
+ .pr-lab{font-size:11px;color:var(--color-text-tertiary);margin-top:7px}
830
+
831
+ .pr-bar{display:flex;height:4px;gap:2px;margin-bottom:12px;border-radius:2px;overflow:hidden}
832
+ .pr-bar > span{display:block;height:100%;border-radius:1px;min-width:4px;transition:opacity .15s}
833
+ .pr-bar > span:hover{opacity:.82}
834
+
835
+ .pr-legend{display:flex;flex-wrap:wrap;gap:8px 14px;font-size:12px}
836
+ .pr-leg{display:flex;align-items:center;gap:6px;color:var(--color-text-secondary);cursor:pointer;user-select:none}
837
+ .pr-leg-sw{width:8px;height:8px;border-radius:2px}
838
+ .pr-leg-ct{font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary);margin-left:1px}
839
+ .pr-leg.off{opacity:.45}
840
+ .pr-leg.off .pr-leg-sw{opacity:.35}
841
+
842
+ /* ── body ── */
843
+ .pr-body{display:grid;grid-template-columns:minmax(0,1fr) 188px}
844
+
845
+ /* ── doc pane ── */
846
+ .pr-doc-pane{
847
+ padding:18px 22px 26px;
848
+ border-right:0.5px solid var(--color-border-tertiary);
849
+ min-width:0;max-height:calc(100vh - 280px);overflow-y:auto;
850
+ }
851
+ .pr-doc-meta{
852
+ font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary);
853
+ margin-bottom:14px;display:flex;gap:10px;flex-wrap:wrap;
854
+ }
855
+ .pr-doc-meta span + span::before{content:'Β·';margin-right:10px;color:var(--color-border-secondary)}
856
+
857
+ .pr-text{
858
+ font-family:var(--font-serif);
859
+ font-size:14.5px;line-height:1.85;
860
+ color:var(--color-text-primary);
861
+ white-space:pre-wrap;word-wrap:break-word;
862
+ font-feature-settings:"liga","calt";
863
+ }
864
+
865
+ /* highlights β€” tinted bg + 1.5px underline, like Notion/Linear inline annotations */
866
+ .h{
867
+ padding:1px 1px;
868
+ border-bottom:1.5px solid;
869
+ transition:background .15s,opacity .15s;
870
+ cursor:pointer;
871
+ }
872
+ .h:hover{filter:brightness(0.97)}
873
+ .h.off{
874
+ background:transparent !important;
875
+ border-color:transparent !important;
876
+ color:inherit;opacity:.9;
877
+ }
878
+ .hp{background:rgba(226,75,74,.09); border-color:#E24B4A}
879
+ .hd{background:rgba(127,119,221,.10);border-color:#7F77DD}
880
+ .ha{background:rgba(29,158,117,.09); border-color:#1D9E75}
881
+ .he{background:rgba(55,138,221,.09); border-color:#378ADD}
882
+ .hac{background:rgba(186,117,23,.11);border-color:#BA7517}
883
+ .hu{background:rgba(216,90,48,.10); border-color:#D85A30}
884
+ .hs{background:rgba(212,83,126,.11); border-color:#D4537E}
885
+ .hph{background:rgba(99,153,34,.11); border-color:#639922}
886
+ .m{font-family:var(--font-mono);font-size:12.5px}
887
+
888
+ /* ── sidebar ── */
889
+ .pr-side{
890
+ background:var(--color-background-secondary);
891
+ padding:16px 14px;
892
+ display:flex;flex-direction:column;gap:18px;
893
+ min-width:0;
894
+ max-height:calc(100vh - 280px);
895
+ }
896
+ .pr-side-h{font-size:11px;color:var(--color-text-tertiary);font-weight:500;margin:0 0 10px 0;letter-spacing:.02em}
897
+ .pr-cat{
898
+ display:flex;align-items:center;gap:8px;
899
+ padding:5px 2px;font-size:12.5px;
900
+ cursor:pointer;user-select:none;
901
+ transition:opacity .15s;
902
+ }
903
+ .pr-cat:hover{opacity:.8}
904
+ .pr-cat-sw{width:9px;height:9px;border-radius:2px;flex-shrink:0}
905
+ .pr-cat-nm{flex:1;color:var(--color-text-primary)}
906
+ .pr-cat-ct{font-family:var(--font-mono);font-size:11px;color:var(--color-text-tertiary)}
907
+ .pr-cat.off .pr-cat-nm,
908
+ .pr-cat.off .pr-cat-ct{opacity:.45}
909
+ .pr-cat.off .pr-cat-sw{opacity:.3}
910
+
911
+ .pr-speakers .pr-cat-sw{background:var(--color-text-tertiary);opacity:.35;cursor:default}
912
+ .pr-speakers .pr-cat{cursor:default}
913
+ .pr-speakers .pr-cat:hover{opacity:1}
914
+
915
+ .pr-acts{
916
+ display:flex;flex-direction:column;gap:6px;
917
+ margin-top:auto;padding-top:14px;
918
+ border-top:0.5px solid var(--color-border-tertiary);
919
+ }
920
+ .pr-btn{
921
+ font-size:12px;padding:8px 10px;
922
+ border:0.5px solid var(--color-border-secondary);
923
+ border-radius:5px;
924
+ background:transparent;color:var(--color-text-primary);
925
+ cursor:pointer;text-align:left;
926
+ font-family:inherit;
927
+ display:flex;align-items:center;justify-content:space-between;
928
+ transition:all .12s;
929
+ }
930
+ .pr-btn:hover{background:var(--color-background-primary)}
931
+ .pr-btn-prim{
932
+ background:var(--color-text-primary);
933
+ color:var(--color-background-primary);
934
+ border-color:var(--color-text-primary);
935
+ }
936
+ .pr-btn-prim:hover{background:#000;border-color:#000}
937
+ .pr-btn-arr{font-family:var(--font-mono);font-size:11px;opacity:0.55}
938
+
939
+ /* empty state */
940
+ .empty-rail{color:var(--color-text-tertiary);font-size:12px;font-style:italic}
941
+
942
+ /* loading */
943
+ #loading{
944
+ position:fixed;inset:0;
945
+ background:rgba(250,249,246,.88);
946
+ backdrop-filter:blur(8px);
947
+ display:none;flex-direction:column;align-items:center;justify-content:center;
948
+ gap:10px;z-index:9999;
949
+ }
950
+ .l-ring{
951
+ width:26px;height:26px;
952
+ border:1.5px solid var(--color-border-secondary);
953
+ border-top-color:var(--color-text-primary);
954
+ border-radius:50%;
955
+ animation:sp .7s linear infinite;
956
+ }
957
+ @keyframes sp{to{transform:rotate(360deg)}}
958
+ .l-label{font-family:var(--font-mono);font-size:11.5px;color:var(--color-text-secondary)}
959
+
960
+ .error-banner{
961
+ margin:14px 18px 0;padding:10px 14px;
962
+ background:rgba(226,75,74,.08);border:0.5px solid rgba(226,75,74,.35);
963
+ border-radius:var(--border-radius-md);
964
+ color:#8a2423;font-size:12.5px;display:none;
965
+ }
966
+
967
+ /* tooltip */
968
+ .tip{
969
+ position:fixed;z-index:9998;
970
+ font-family:var(--font-mono);font-size:11px;
971
+ color:var(--color-background-primary);
972
+ background:var(--color-text-primary);
973
+ padding:4px 8px;border-radius:4px;
974
+ pointer-events:none;white-space:nowrap;
975
+ max-width:420px;overflow:hidden;text-overflow:ellipsis;
976
+ }
977
+
978
+ @media(max-width:840px){
979
+ .pr-body{grid-template-columns:1fr}
980
+ .pr-doc-pane{border-right:none;border-bottom:0.5px solid var(--color-border-tertiary);max-height:none}
981
+ .pr-side{max-height:none}
982
+ }
983
+ </style>
984
+ </head>
985
+ <body>
986
+
987
+ <!-- ============ UPLOAD VIEW ============ -->
988
+ <div id="upload-view">
989
+ <div class="u-card">
990
+ <div class="u-brand">
991
+ <svg width="20" height="20" viewBox="0 0 20 20" fill="none">
992
+ <rect x="0" y="0" width="20" height="20" rx="5" fill="currentColor"/>
993
+ <circle cx="8.5" cy="8.5" r="3.2" stroke="var(--color-background-primary)" stroke-width="1.4" fill="none"/>
994
+ <line x1="11.2" y1="11.2" x2="14.2" y2="14.2" stroke="var(--color-background-primary)" stroke-width="1.4" stroke-linecap="round"/>
995
+ </svg>
996
+ <span class="u-brand-name">PII Reveal<span class="sub">/ inspector</span></span>
997
+ </div>
998
+ <h1 class="u-title">Reveal what&rsquo;s hidden in your documents.</h1>
999
+ <p class="u-sub">Scan PDFs, DOC and DOCX files for names, accounts, secrets and seven other entity types.</p>
1000
+
1001
+ <div class="u-drop" id="dropzone">
1002
+ <div class="u-drop-icon">
1003
+ <svg width="22" height="22" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5" stroke-linecap="round" stroke-linejoin="round">
1004
+ <path d="M12 3v13"/><path d="m6 9 6-6 6 6"/><path d="M4 17v2a2 2 0 0 0 2 2h12a2 2 0 0 0 2-2v-2"/>
1005
+ </svg>
1006
+ </div>
1007
+ <div class="u-drop-title">Drop a document, or click to browse</div>
1008
+ <div class="u-drop-sub">pdf &middot; doc &middot; docx &middot; up to 128k tokens</div>
1009
+ <input type="file" id="file-input" accept=".pdf,.doc,.docx">
1010
+ </div>
1011
+
1012
+ <div class="u-meta">
1013
+ <span>openai privacy filter</span>
1014
+ <span>128k ctx</span>
1015
+ <span>bfloat16</span>
1016
+ <span>apache 2.0</span>
1017
+ </div>
1018
+ </div>
1019
+ </div>
1020
+
1021
+ <!-- ============ RESULTS VIEW ============ -->
1022
+ <div id="results-view">
1023
+ <div class="pr-app" aria-label="PII Reveal inspector">
1024
+
1025
+ <div class="pr-top">
1026
+ <div class="pr-logo">
1027
+ <svg width="20" height="20" viewBox="0 0 20 20" fill="none" style="color: var(--color-text-primary);">
1028
+ <rect x="0" y="0" width="20" height="20" rx="5" fill="currentColor"/>
1029
+ <circle cx="8.5" cy="8.5" r="3.2" stroke="var(--color-background-primary)" stroke-width="1.4" fill="none"/>
1030
+ <line x1="11.2" y1="11.2" x2="14.2" y2="14.2" stroke="var(--color-background-primary)" stroke-width="1.4" stroke-linecap="round"/>
1031
+ </svg>
1032
+ <span class="pr-name">PII Reveal<span class="pr-name-sub">/ inspector</span></span>
1033
+ </div>
1034
+ <span class="pr-file-chip" id="file-chip"></span>
1035
+ <div class="pr-grow"></div>
1036
+ <div class="pr-status" id="scan-status"><span class="pr-status-dot"></span>Scan complete</div>
1037
+ <button class="pr-new" id="btn-new">new file</button>
1038
+ </div>
1039
+
1040
+ <div class="error-banner" id="error-banner"></div>
1041
+
1042
+ <div class="pr-stats">
1043
+ <div class="pr-stats-row">
1044
+ <div>
1045
+ <div class="pr-hero"><span id="hero-val">0</span><span class="pr-hero-pct">%</span></div>
1046
+ <div class="pr-lab">PII content</div>
1047
+ </div>
1048
+ <div>
1049
+ <div class="pr-num" id="num-spans">0</div>
1050
+ <div class="pr-lab">Spans detected</div>
1051
+ </div>
1052
+ <div>
1053
+ <div class="pr-num" id="num-cats">0 / 8</div>
1054
+ <div class="pr-lab">Categories present</div>
1055
+ </div>
1056
+ <div>
1057
+ <div class="pr-num" id="num-speakers">0</div>
1058
+ <div class="pr-lab">Speakers identified</div>
1059
+ </div>
1060
+ </div>
1061
+
1062
+ <div class="pr-bar" id="dist-bar"></div>
1063
+ <div class="pr-legend" id="legend"></div>
1064
+ </div>
1065
+
1066
+ <div class="pr-body">
1067
+ <div class="pr-doc-pane">
1068
+ <div class="pr-doc-meta" id="doc-meta"></div>
1069
+ <div class="pr-text" id="doc-text"></div>
1070
+ </div>
1071
+
1072
+ <aside class="pr-side">
1073
+ <div>
1074
+ <div class="pr-side-h">Filter categories</div>
1075
+ <div id="cat-list"></div>
1076
+ </div>
1077
+ <div id="speakers-block" style="display:none">
1078
+ <div class="pr-side-h">Speakers</div>
1079
+ <div class="pr-speakers" id="speakers-list"></div>
1080
+ </div>
1081
+ <div class="pr-acts">
1082
+ <button class="pr-btn pr-btn-prim" id="act-redact">Redact and export <span class="pr-btn-arr">&rarr;</span></button>
1083
+ <button class="pr-btn" id="act-copy">Copy sanitized</button>
1084
+ <button class="pr-btn" id="act-report">Download report</button>
1085
+ </div>
1086
+ </aside>
1087
+ </div>
1088
+ </div>
1089
+ </div>
1090
+
1091
+ <div id="loading">
1092
+ <div class="l-ring"></div>
1093
+ <div class="l-label">scanning document&hellip;</div>
1094
+ </div>
1095
+
1096
+ <div class="tip" id="tip" style="display:none"></div>
1097
+
1098
+ <script>
1099
+ /* ===== state ===== */
1100
+ const S = {
1101
+ text:'', spans:[], stats:{}, speakers:{}, catMeta:{}, filename:'',
1102
+ activeCats:new Set(), scanMs:0, sortedSpans:[],
1103
+ };
1104
+
1105
+ /* defaults (fallback when backend meta missing) */
1106
+ const DEFAULT_META = {
1107
+ private_person: {color:'#E24B4A', cls:'hp', label:'Person', mono:false},
1108
+ private_date: {color:'#7F77DD', cls:'hd', label:'Date', mono:true},
1109
+ private_address: {color:'#1D9E75', cls:'ha', label:'Address', mono:false},
1110
+ private_email: {color:'#378ADD', cls:'he', label:'Email', mono:true},
1111
+ account_number: {color:'#BA7517', cls:'hac', label:'Account', mono:true},
1112
+ private_url: {color:'#D85A30', cls:'hu', label:'URL', mono:true},
1113
+ secret: {color:'#D4537E', cls:'hs', label:'Secret', mono:true},
1114
+ private_phone: {color:'#639922', cls:'hph', label:'Phone', mono:true},
1115
+ };
1116
+ const ORDER = ['private_person','private_address','private_email','private_phone',
1117
+ 'private_url','private_date','account_number','secret'];
1118
+
1119
+ const metaFor = c => ({...(DEFAULT_META[c]||{color:'#999',cls:'',label:c,mono:false}), ...(S.catMeta[c]||{})});
1120
+
1121
+ /* ===== upload flow ===== */
1122
+ const dz = document.getElementById('dropzone');
1123
+ const fi = document.getElementById('file-input');
1124
+ ['dragenter','dragover'].forEach(e => dz.addEventListener(e, ev => { ev.preventDefault(); dz.classList.add('dragover'); }));
1125
+ ['dragleave','drop'].forEach(e => dz.addEventListener(e, ev => { ev.preventDefault(); dz.classList.remove('dragover'); }));
1126
+ dz.addEventListener('drop', ev => { if (ev.dataTransfer.files[0]) uploadFile(ev.dataTransfer.files[0]); });
1127
+ fi.addEventListener('change', ev => { if (ev.target.files[0]) uploadFile(ev.target.files[0]); });
1128
+
1129
+ async function uploadFile(file){
1130
+ const ext = file.name.split('.').pop().toLowerCase();
1131
+ if (!['pdf','doc','docx'].includes(ext)) { showError('Unsupported file type.'); return; }
1132
+ document.getElementById('loading').style.display='flex';
1133
+ document.getElementById('upload-view').style.display='none';
1134
+ const form = new FormData(); form.append('file', file);
1135
+ const t0 = performance.now();
1136
+ try{
1137
+ const r = await fetch('/api/analyze', {method:'POST', body:form});
1138
+ const d = await r.json();
1139
+ if (d.error) { showError(d.error); return; }
1140
+ S.scanMs = performance.now() - t0;
1141
+ S.text = d.text; S.spans = d.spans; S.stats = d.stats;
1142
+ S.speakers = d.speakers||{}; S.catMeta = d.categories_meta||{};
1143
+ S.filename = d.filename;
1144
+ S.activeCats = new Set(Object.keys(d.stats.categories));
1145
+ S.sortedSpans = [...S.spans].sort((a,b) => a.start - b.start);
1146
+ renderResults();
1147
+ } catch(e){ showError('Analysis failed: '+e.message); }
1148
+ finally { document.getElementById('loading').style.display='none'; }
1149
+ }
1150
+
1151
+ function showError(m){
1152
+ document.getElementById('loading').style.display='none';
1153
+ document.getElementById('upload-view').style.display='flex';
1154
+ document.getElementById('results-view').style.display='none';
1155
+ alert(m);
1156
+ }
1157
+
1158
+ function resetView(){
1159
+ document.getElementById('results-view').style.display='none';
1160
+ document.getElementById('upload-view').style.display='flex';
1161
+ fi.value = '';
1162
+ }
1163
+ document.getElementById('btn-new').addEventListener('click', resetView);
1164
+
1165
+ /* ===== render ===== */
1166
+ function renderResults(){
1167
+ document.getElementById('results-view').style.display='block';
1168
+ document.getElementById('file-chip').textContent = S.filename;
1169
+ document.getElementById('scan-status').innerHTML =
1170
+ `<span class="pr-status-dot"></span>Scan complete &middot; ${(S.scanMs/1000).toFixed(1)}s`;
1171
+ renderStats();
1172
+ renderBar();
1173
+ renderLegend();
1174
+ renderDocMeta();
1175
+ renderDoc();
1176
+ renderCats();
1177
+ renderSpeakers();
1178
+ }
1179
+
1180
+ function renderStats(){
1181
+ const s = S.stats;
1182
+ document.getElementById('hero-val').textContent = (s.pii_percentage ?? 0).toFixed(1);
1183
+ document.getElementById('num-spans').textContent = s.total_spans;
1184
+ document.getElementById('num-cats').textContent = `${s.num_categories} / 8`;
1185
+ const n = Object.keys(S.speakers).length;
1186
+ document.getElementById('num-speakers').textContent = n || 'β€”';
1187
+ }
1188
+
1189
+ function renderBar(){
1190
+ const bar = document.getElementById('dist-bar');
1191
+ bar.innerHTML = '';
1192
+ const cats = S.stats.categories;
1193
+ const total = Object.values(cats).reduce((a,b) => a + b.chars, 0) || 1;
1194
+ const ordered = ORDER.filter(c => cats[c]);
1195
+ if (!ordered.length) {
1196
+ const span = document.createElement('span');
1197
+ span.style.cssText = 'flex:1;background:var(--color-border-tertiary);opacity:.4';
1198
+ bar.appendChild(span); return;
1199
+ }
1200
+ for (const c of ordered) {
1201
+ const m = metaFor(c);
1202
+ const span = document.createElement('span');
1203
+ span.style.background = m.color;
1204
+ span.style.flex = cats[c].chars / total;
1205
+ span.dataset.cat = c;
1206
+ span.title = `${m.label} β€” ${cats[c].count} span${cats[c].count===1?'':'s'}`;
1207
+ span.addEventListener('mouseenter', ev => showTip(ev, `${m.label} Β· ${cats[c].count}`));
1208
+ span.addEventListener('mousemove', moveTip);
1209
+ span.addEventListener('mouseleave', hideTip);
1210
+ if (!S.activeCats.has(c)) span.style.opacity = '.25';
1211
+ bar.appendChild(span);
1212
+ }
1213
+ }
1214
+
1215
+ function renderLegend(){
1216
+ const leg = document.getElementById('legend');
1217
+ leg.innerHTML = '';
1218
+ const cats = S.stats.categories;
1219
+ const ordered = ORDER.filter(c => cats[c]);
1220
+ for (const c of ordered) {
1221
+ const m = metaFor(c);
1222
+ const el = document.createElement('span');
1223
+ el.className = 'pr-leg' + (S.activeCats.has(c) ? '' : ' off');
1224
+ el.dataset.cat = c;
1225
+ el.innerHTML = `<span class="pr-leg-sw" style="background:${m.color}"></span>${m.label}<span class="pr-leg-ct">${cats[c].count}</span>`;
1226
+ el.addEventListener('click', () => toggleCat(c));
1227
+ leg.appendChild(el);
1228
+ }
1229
+ }
1230
+
1231
+ function renderDocMeta(){
1232
+ const s = S.stats;
1233
+ const meta = document.getElementById('doc-meta');
1234
+ const parts = [
1235
+ `${s.total_chars.toLocaleString()} characters`,
1236
+ `${s.total_lines.toLocaleString()} lines`,
1237
+ `scanned in ${(S.scanMs/1000).toFixed(1)}s`,
1238
+ ];
1239
+ meta.innerHTML = parts.map(p => `<span>${p}</span>`).join('');
1240
+ }
1241
+
1242
+ function esc(s){ const d=document.createElement('div'); d.textContent=s; return d.innerHTML; }
1243
+
1244
+ function renderDoc(){
1245
+ const { text, sortedSpans, activeCats } = S;
1246
+ const el = document.getElementById('doc-text');
1247
+ let html = '', pos = 0;
1248
+ for (const sp of sortedSpans) {
1249
+ if (sp.start < pos) continue;
1250
+ if (sp.start > pos) html += esc(text.substring(pos, sp.start));
1251
+ const m = metaFor(sp.label);
1252
+ const cls = ['h', m.cls];
1253
+ if (m.mono) cls.push('m');
1254
+ if (!activeCats.has(sp.label)) cls.push('off');
1255
+ html += `<span class="${cls.join(' ')}" data-cat="${sp.label}">${esc(text.substring(sp.start, sp.end))}</span>`;
1256
+ pos = sp.end;
1257
+ }
1258
+ if (pos < text.length) html += esc(text.substring(pos));
1259
+ // preserve paragraph feel β€” serif font + white-space:pre-wrap handles this naturally
1260
+ el.innerHTML = html;
1261
+
1262
+ // span tooltips
1263
+ el.querySelectorAll('.h').forEach(span => {
1264
+ const cat = span.dataset.cat, m = metaFor(cat);
1265
+ span.addEventListener('mouseenter', ev => showTip(ev, `${m.label}: ${span.textContent.trim()}`));
1266
+ span.addEventListener('mousemove', moveTip);
1267
+ span.addEventListener('mouseleave', hideTip);
1268
+ });
1269
+ }
1270
+
1271
+ function renderCats(){
1272
+ const box = document.getElementById('cat-list');
1273
+ box.innerHTML = '';
1274
+ const cats = S.stats.categories;
1275
+ const ordered = ORDER.filter(c => cats[c]);
1276
+ if (!ordered.length) { box.innerHTML = '<div class="empty-rail">No entities detected.</div>'; return; }
1277
+ for (const c of ordered) {
1278
+ const m = metaFor(c);
1279
+ const el = document.createElement('div');
1280
+ el.className = 'pr-cat' + (S.activeCats.has(c) ? '' : ' off');
1281
+ el.dataset.cat = c;
1282
+ el.innerHTML = `<span class="pr-cat-sw" style="background:${m.color}"></span><span class="pr-cat-nm">${m.label}</span><span class="pr-cat-ct">${cats[c].count}</span>`;
1283
+ el.addEventListener('click', () => toggleCat(c));
1284
+ box.appendChild(el);
1285
+ }
1286
+ }
1287
+
1288
+ function renderSpeakers(){
1289
+ const names = Object.keys(S.speakers);
1290
+ const block = document.getElementById('speakers-block');
1291
+ const box = document.getElementById('speakers-list');
1292
+ if (!names.length) { block.style.display = 'none'; return; }
1293
+ block.style.display = 'block';
1294
+ box.innerHTML = '';
1295
+ for (const n of names) {
1296
+ const el = document.createElement('div');
1297
+ el.className = 'pr-cat';
1298
+ el.innerHTML = `<span class="pr-cat-sw"></span><span class="pr-cat-nm">${esc(n)}</span><span class="pr-cat-ct">${S.speakers[n]}</span>`;
1299
+ box.appendChild(el);
1300
+ }
1301
+ }
1302
+
1303
+ function toggleCat(c){
1304
+ if (S.activeCats.has(c)) S.activeCats.delete(c);
1305
+ else S.activeCats.add(c);
1306
+ // targeted toggles β€” avoid full re-render to keep scroll position
1307
+ document.querySelectorAll(`.pr-cat[data-cat="${c}"]`).forEach(el => el.classList.toggle('off', !S.activeCats.has(c)));
1308
+ document.querySelectorAll(`.pr-leg[data-cat="${c}"]`).forEach(el => el.classList.toggle('off', !S.activeCats.has(c)));
1309
+ document.querySelectorAll(`.h[data-cat="${c}"]`).forEach(el => el.classList.toggle('off', !S.activeCats.has(c)));
1310
+ document.querySelectorAll(`.pr-bar span[data-cat="${c}"]`).forEach(el => el.style.opacity = S.activeCats.has(c) ? '1' : '.25');
1311
+ }
1312
+
1313
+ /* tooltip */
1314
+ function showTip(ev, text){ const t = document.getElementById('tip'); t.textContent = text; t.style.display = 'block'; moveTip(ev); }
1315
+ function moveTip(ev){ const t = document.getElementById('tip'); t.style.left = (ev.clientX + 12) + 'px'; t.style.top = (ev.clientY - 26) + 'px'; }
1316
+ function hideTip(){ document.getElementById('tip').style.display = 'none'; }
1317
+
1318
+ /* ===== actions ===== */
1319
+ function sanitizedText(){
1320
+ const parts = []; let pos = 0;
1321
+ for (const sp of S.sortedSpans) {
1322
+ if (sp.start < pos) continue;
1323
+ if (sp.start > pos) parts.push(S.text.substring(pos, sp.start));
1324
+ const m = metaFor(sp.label);
1325
+ parts.push(S.activeCats.has(sp.label) ? `[${m.label.toUpperCase()}]` : S.text.substring(sp.start, sp.end));
1326
+ pos = sp.end;
1327
+ }
1328
+ if (pos < S.text.length) parts.push(S.text.substring(pos));
1329
+ return parts.join('');
1330
+ }
1331
+
1332
+ function download(name, content, type){
1333
+ const blob = new Blob([content], { type: type || 'text/plain' });
1334
+ const a = document.createElement('a');
1335
+ a.href = URL.createObjectURL(blob); a.download = name;
1336
+ document.body.appendChild(a); a.click(); a.remove();
1337
+ setTimeout(() => URL.revokeObjectURL(a.href), 1000);
1338
+ }
1339
+
1340
+ function baseName(){
1341
+ const f = S.filename || 'document';
1342
+ const i = f.lastIndexOf('.');
1343
+ return i > 0 ? f.slice(0, i) : f;
1344
+ }
1345
+
1346
+ document.getElementById('act-redact').addEventListener('click', () => {
1347
+ download(baseName() + '.redacted.txt', sanitizedText(), 'text/plain');
1348
+ flash('act-redact', 'Exported &rarr;');
1349
+ });
1350
+ document.getElementById('act-copy').addEventListener('click', async () => {
1351
+ try { await navigator.clipboard.writeText(sanitizedText()); flash('act-copy', 'Copied'); }
1352
+ catch { flash('act-copy', 'Copy failed'); }
1353
+ });
1354
+ document.getElementById('act-report').addEventListener('click', () => {
1355
+ const report = {
1356
+ filename: S.filename,
1357
+ scanned_in_ms: Math.round(S.scanMs),
1358
+ stats: S.stats,
1359
+ speakers: S.speakers,
1360
+ active_categories: [...S.activeCats],
1361
+ spans: S.spans,
1362
+ };
1363
+ download(baseName() + '.report.json', JSON.stringify(report, null, 2), 'application/json');
1364
+ flash('act-report', 'Downloaded');
1365
+ });
1366
+
1367
+ const _flashTimers = {};
1368
+ function flash(id, msg){
1369
+ const btn = document.getElementById(id);
1370
+ const prev = btn.innerHTML;
1371
+ btn.innerHTML = msg;
1372
+ clearTimeout(_flashTimers[id]);
1373
+ _flashTimers[id] = setTimeout(() => { btn.innerHTML = prev; }, 1300);
1374
+ }
1375
+ </script>
1376
+ </body>
1377
+ </html>"""
1378
+
1379
+ # ── launch ───────────────────────────────────────────────────────
1380
+ if __name__ == "__main__":
1381
+ server.launch(server_name="0.0.0.0", server_port=7860)