Spaces:
Running on Zero
Running on Zero
Upload 3 files
Browse files- app.py +1283 -0
- packages.txt +1 -0
- requirements.txt +11 -0
app.py
ADDED
|
@@ -0,0 +1,1283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Screenshot Anonymizer
|
| 3 |
+
=====================
|
| 4 |
+
Backend : gr.Server (Gradio + FastAPI)
|
| 5 |
+
Frontend: Canvas-based image editor
|
| 6 |
+
Model : charles-first-org/second-model (OpenAI Privacy Filter)
|
| 7 |
+
OCR : pytesseract (Tesseract 5)
|
| 8 |
+
|
| 9 |
+
Drag in a screenshot of a chat / email / document. OCR extracts words with
|
| 10 |
+
pixel positions, the privacy filter finds PII character spans, we map the
|
| 11 |
+
spans back to pixel rectangles and render black bars on top. The canvas
|
| 12 |
+
editor lets the user toggle, move, add, or delete bars, then export the
|
| 13 |
+
redacted screenshot as a PNG.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
# ββ stdlib βββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 17 |
+
import base64
|
| 18 |
+
import dataclasses
|
| 19 |
+
import functools
|
| 20 |
+
import io
|
| 21 |
+
import json
|
| 22 |
+
import math
|
| 23 |
+
import os
|
| 24 |
+
from bisect import bisect_left, bisect_right
|
| 25 |
+
from dataclasses import dataclass
|
| 26 |
+
from pathlib import Path
|
| 27 |
+
from typing import Final
|
| 28 |
+
|
| 29 |
+
# ββ third-party ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 30 |
+
import gradio as gr
|
| 31 |
+
import pytesseract
|
| 32 |
+
import spaces
|
| 33 |
+
import tiktoken
|
| 34 |
+
import torch
|
| 35 |
+
import torch.nn.functional as F
|
| 36 |
+
from fastapi import File, UploadFile
|
| 37 |
+
from fastapi.responses import HTMLResponse, JSONResponse
|
| 38 |
+
from huggingface_hub import snapshot_download
|
| 39 |
+
from PIL import Image
|
| 40 |
+
from safetensors import safe_open
|
| 41 |
+
|
| 42 |
+
# ββ configuration ββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 43 |
+
MODEL_REPO = os.getenv("MODEL_ID", "charles-first-org/second-model")
|
| 44 |
+
HF_TOKEN = os.getenv("HF_TOKEN", None)
|
| 45 |
+
MODEL_DIR = Path(snapshot_download(MODEL_REPO, token=HF_TOKEN))
|
| 46 |
+
|
| 47 |
+
CATEGORIES_META = {
|
| 48 |
+
"private_person": {"color": "#ef4444", "label": "Person"},
|
| 49 |
+
"private_address": {"color": "#06b6d4", "label": "Address"},
|
| 50 |
+
"private_email": {"color": "#3b82f6", "label": "Email"},
|
| 51 |
+
"private_phone": {"color": "#22c55e", "label": "Phone"},
|
| 52 |
+
"private_url": {"color": "#eab308", "label": "URL"},
|
| 53 |
+
"private_date": {"color": "#a855f7", "label": "Date"},
|
| 54 |
+
"account_number": {"color": "#f97316", "label": "Account"},
|
| 55 |
+
"secret": {"color": "#dc2626", "label": "Secret"},
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
# =====================================================================
|
| 59 |
+
# MODEL ARCHITECTURE + INFERENCE
|
| 60 |
+
# =====================================================================
|
| 61 |
+
|
| 62 |
+
PRIVACY_FILTER_MODEL_TYPE: Final[str] = "privacy_filter"
|
| 63 |
+
REQUIRED_MODEL_CONFIG_KEYS: Final[tuple[str, ...]] = (
|
| 64 |
+
"model_type", "encoding", "num_hidden_layers", "num_experts",
|
| 65 |
+
"experts_per_token", "vocab_size", "num_labels", "hidden_size",
|
| 66 |
+
"intermediate_size", "head_dim", "num_attention_heads",
|
| 67 |
+
"num_key_value_heads", "sliding_window", "bidirectional_context",
|
| 68 |
+
"bidirectional_left_context", "bidirectional_right_context",
|
| 69 |
+
"default_n_ctx", "initial_context_length", "rope_theta",
|
| 70 |
+
"rope_scaling_factor", "rope_ntk_alpha", "rope_ntk_beta", "param_dtype",
|
| 71 |
+
)
|
| 72 |
+
BACKGROUND_CLASS_LABEL: Final[str] = "O"
|
| 73 |
+
BOUNDARY_PREFIXES: Final[tuple[str, ...]] = ("B", "I", "E", "S")
|
| 74 |
+
SPAN_CLASS_NAMES: Final[tuple[str, ...]] = (
|
| 75 |
+
BACKGROUND_CLASS_LABEL,
|
| 76 |
+
"account_number", "private_address", "private_date", "private_email",
|
| 77 |
+
"private_person", "private_phone", "private_url", "secret",
|
| 78 |
+
)
|
| 79 |
+
NER_CLASS_NAMES: Final[tuple[str, ...]] = (BACKGROUND_CLASS_LABEL,) + tuple(
|
| 80 |
+
f"{prefix}-{base}"
|
| 81 |
+
for base in SPAN_CLASS_NAMES if base != BACKGROUND_CLASS_LABEL
|
| 82 |
+
for prefix in BOUNDARY_PREFIXES
|
| 83 |
+
)
|
| 84 |
+
VITERBI_TRANSITION_BIAS_KEYS: Final[tuple[str, ...]] = (
|
| 85 |
+
"transition_bias_background_stay", "transition_bias_background_to_start",
|
| 86 |
+
"transition_bias_inside_to_continue", "transition_bias_inside_to_end",
|
| 87 |
+
"transition_bias_end_to_background", "transition_bias_end_to_start",
|
| 88 |
+
)
|
| 89 |
+
DEFAULT_VITERBI_CALIBRATION_PRESET: Final[str] = "default"
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def validate_model_config_contract(cfg: dict, *, context: str) -> None:
|
| 93 |
+
missing = [k for k in REQUIRED_MODEL_CONFIG_KEYS if k not in cfg]
|
| 94 |
+
if missing:
|
| 95 |
+
raise ValueError(f"{context} missing keys: {', '.join(missing)}")
|
| 96 |
+
if cfg.get("model_type") != PRIVACY_FILTER_MODEL_TYPE:
|
| 97 |
+
raise ValueError(f"{context} model_type must be {PRIVACY_FILTER_MODEL_TYPE!r}")
|
| 98 |
+
if cfg.get("bidirectional_context") is not True:
|
| 99 |
+
raise ValueError(f"{context} must use bidirectional_context=true")
|
| 100 |
+
lc, rc = cfg.get("bidirectional_left_context"), cfg.get("bidirectional_right_context")
|
| 101 |
+
if not isinstance(lc, int) or not isinstance(rc, int) or lc != rc or lc < 0:
|
| 102 |
+
raise ValueError(f"{context} bidirectional context must be equal non-negative ints")
|
| 103 |
+
sw = cfg.get("sliding_window")
|
| 104 |
+
if sw != 2 * lc + 1:
|
| 105 |
+
raise ValueError(f"{context} sliding_window must equal 2*context+1")
|
| 106 |
+
if cfg["num_labels"] != 33:
|
| 107 |
+
raise ValueError(f"{context} num_labels must be 33")
|
| 108 |
+
if cfg["param_dtype"] != "bfloat16":
|
| 109 |
+
raise ValueError(f"{context} param_dtype must be bfloat16")
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def expert_linear(x, weight, bias):
|
| 113 |
+
n, e, k = x.shape
|
| 114 |
+
_, _, _, o = weight.shape
|
| 115 |
+
out = torch.bmm(x.reshape(n * e, 1, k), weight.reshape(n * e, k, o)).reshape(n, e, o)
|
| 116 |
+
return out + bias if bias is not None else out
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
@dataclass
|
| 120 |
+
class ModelConfig:
|
| 121 |
+
num_hidden_layers: int; num_experts: int; experts_per_token: int
|
| 122 |
+
vocab_size: int; num_labels: int; hidden_size: int; intermediate_size: int
|
| 123 |
+
head_dim: int; num_attention_heads: int; num_key_value_heads: int
|
| 124 |
+
bidirectional_context_size: int; initial_context_length: int
|
| 125 |
+
rope_theta: float; rope_scaling_factor: float; rope_ntk_alpha: float; rope_ntk_beta: float
|
| 126 |
+
|
| 127 |
+
@classmethod
|
| 128 |
+
def from_checkpoint_config(cls, cfg, *, context):
|
| 129 |
+
cfg = dict(cfg)
|
| 130 |
+
cfg["bidirectional_context_size"] = cfg["bidirectional_left_context"]
|
| 131 |
+
fields = {f.name for f in dataclasses.fields(cls)}
|
| 132 |
+
return cls(**{k: v for k, v in cfg.items() if k in fields})
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class RMSNorm(torch.nn.Module):
|
| 136 |
+
def __init__(self, n, eps=1e-5, device=None):
|
| 137 |
+
super().__init__()
|
| 138 |
+
self.eps = eps
|
| 139 |
+
self.scale = torch.nn.Parameter(torch.ones(n, device=device, dtype=torch.float32))
|
| 140 |
+
|
| 141 |
+
def forward(self, x):
|
| 142 |
+
t = x.float()
|
| 143 |
+
return (t * torch.rsqrt(t.pow(2).mean(-1, keepdim=True) + self.eps) * self.scale).to(x.dtype)
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
def apply_rope(x, cos, sin):
|
| 147 |
+
cos = cos.unsqueeze(-2).to(x.dtype); sin = sin.unsqueeze(-2).to(x.dtype)
|
| 148 |
+
x1, x2 = x[..., ::2], x[..., 1::2]
|
| 149 |
+
return torch.stack((x1 * cos - x2 * sin, x2 * cos + x1 * sin), dim=-1).reshape(x.shape)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
class RotaryEmbedding(torch.nn.Module):
|
| 153 |
+
def __init__(self, head_dim, base, dtype, *, initial_context_length=4096,
|
| 154 |
+
scaling_factor=1.0, ntk_alpha=1.0, ntk_beta=32.0, device=None):
|
| 155 |
+
super().__init__()
|
| 156 |
+
self.head_dim, self.base, self.dtype = head_dim, base, dtype
|
| 157 |
+
self.initial_context_length = initial_context_length
|
| 158 |
+
self.scaling_factor, self.ntk_alpha, self.ntk_beta = scaling_factor, ntk_alpha, ntk_beta
|
| 159 |
+
self.device = device
|
| 160 |
+
mp = max(int(initial_context_length * scaling_factor), initial_context_length)
|
| 161 |
+
self.max_position_embeddings = mp
|
| 162 |
+
cos, sin = self._compute(mp, device=torch.device("cpu"))
|
| 163 |
+
target = device or torch.device("cpu")
|
| 164 |
+
self.register_buffer("cos_cache", cos.to(target), persistent=False)
|
| 165 |
+
self.register_buffer("sin_cache", sin.to(target), persistent=False)
|
| 166 |
+
|
| 167 |
+
def _inv_freq(self, device=None):
|
| 168 |
+
device = device or self.device
|
| 169 |
+
freq = self.base ** (torch.arange(0, self.head_dim, 2, dtype=torch.float, device=device) / self.head_dim)
|
| 170 |
+
if self.scaling_factor > 1.0:
|
| 171 |
+
d_half = self.head_dim / 2
|
| 172 |
+
low = d_half * math.log(self.initial_context_length / (self.ntk_beta * 2 * math.pi)) / math.log(self.base)
|
| 173 |
+
high = d_half * math.log(self.initial_context_length / (self.ntk_alpha * 2 * math.pi)) / math.log(self.base)
|
| 174 |
+
interp = 1.0 / (self.scaling_factor * freq)
|
| 175 |
+
extrap = 1.0 / freq
|
| 176 |
+
ramp = (torch.arange(d_half, dtype=torch.float32, device=device) - low) / (high - low)
|
| 177 |
+
mask = 1 - ramp.clamp(0, 1)
|
| 178 |
+
return interp * (1 - mask) + extrap * mask
|
| 179 |
+
return 1.0 / freq
|
| 180 |
+
|
| 181 |
+
def _compute(self, n, device=None):
|
| 182 |
+
inv_freq = self._inv_freq(device)
|
| 183 |
+
t = torch.arange(n, dtype=torch.float32, device=device or self.device)
|
| 184 |
+
freqs = torch.einsum("i,j->ij", t, inv_freq)
|
| 185 |
+
c = 0.1 * math.log(self.scaling_factor) + 1.0 if self.scaling_factor > 1.0 else 1.0
|
| 186 |
+
return (freqs.cos() * c).to(self.dtype), (freqs.sin() * c).to(self.dtype)
|
| 187 |
+
|
| 188 |
+
def forward(self, q, k):
|
| 189 |
+
n = q.shape[0]
|
| 190 |
+
if n > self.cos_cache.shape[0]:
|
| 191 |
+
cos, sin = self._compute(n, torch.device("cpu"))
|
| 192 |
+
self.cos_cache, self.sin_cache = cos.to(q.device), sin.to(q.device)
|
| 193 |
+
cc = self.cos_cache.to(q.device) if self.cos_cache.device != q.device else self.cos_cache
|
| 194 |
+
sc = self.sin_cache.to(q.device) if self.sin_cache.device != q.device else self.sin_cache
|
| 195 |
+
cos, sin = cc[:n], sc[:n]
|
| 196 |
+
q = apply_rope(q.view(n, -1, self.head_dim), cos, sin).reshape(q.shape)
|
| 197 |
+
k = apply_rope(k.view(n, -1, self.head_dim), cos, sin).reshape(k.shape)
|
| 198 |
+
return q, k
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
def sdpa(Q, K, V, S, sm_scale, ctx):
|
| 202 |
+
n, nh, qm, hd = Q.shape
|
| 203 |
+
w = 2 * ctx + 1
|
| 204 |
+
Kp = F.pad(K, (0, 0, 0, 0, ctx, ctx)); Vp = F.pad(V, (0, 0, 0, 0, ctx, ctx))
|
| 205 |
+
Kw = Kp.unfold(0, w, 1).permute(0, 3, 1, 2); Vw = Vp.unfold(0, w, 1).permute(0, 3, 1, 2)
|
| 206 |
+
idx = torch.arange(w, device=Q.device) - ctx
|
| 207 |
+
pos = torch.arange(n, device=Q.device)[:, None] + idx[None, :]
|
| 208 |
+
valid = (pos >= 0) & (pos < n)
|
| 209 |
+
scores = torch.einsum("nhqd,nwhd->nhqw", Q, Kw).float() * sm_scale
|
| 210 |
+
scores = scores.masked_fill(~valid[:, None, None, :], -float("inf"))
|
| 211 |
+
sink = (S * math.log(2.0)).reshape(nh, qm)[None, :, :, None].expand(n, -1, -1, 1)
|
| 212 |
+
scores = torch.cat([scores, sink], dim=-1)
|
| 213 |
+
wt = torch.softmax(scores, dim=-1)[..., :-1].to(V.dtype)
|
| 214 |
+
return torch.einsum("nhqw,nwhd->nhqd", wt, Vw).reshape(n, -1)
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
class AttentionBlock(torch.nn.Module):
|
| 218 |
+
def __init__(self, cfg, device=None):
|
| 219 |
+
super().__init__()
|
| 220 |
+
dt = torch.bfloat16
|
| 221 |
+
self.head_dim, self.nah, self.nkv = cfg.head_dim, cfg.num_attention_heads, cfg.num_key_value_heads
|
| 222 |
+
self.ctx = int(cfg.bidirectional_context_size)
|
| 223 |
+
self.sinks = torch.nn.Parameter(torch.empty(cfg.num_attention_heads, device=device, dtype=torch.float32))
|
| 224 |
+
self.norm = RMSNorm(cfg.hidden_size, device=device)
|
| 225 |
+
qkv_d = cfg.head_dim * (cfg.num_attention_heads + 2 * cfg.num_key_value_heads)
|
| 226 |
+
self.qkv = torch.nn.Linear(cfg.hidden_size, qkv_d, device=device, dtype=dt)
|
| 227 |
+
self.out = torch.nn.Linear(cfg.head_dim * cfg.num_attention_heads, cfg.hidden_size, device=device, dtype=dt)
|
| 228 |
+
self.qk_scale = 1 / math.sqrt(math.sqrt(cfg.head_dim))
|
| 229 |
+
self.rope = RotaryEmbedding(cfg.head_dim, int(cfg.rope_theta), torch.float32,
|
| 230 |
+
initial_context_length=cfg.initial_context_length,
|
| 231 |
+
scaling_factor=cfg.rope_scaling_factor,
|
| 232 |
+
ntk_alpha=cfg.rope_ntk_alpha, ntk_beta=cfg.rope_ntk_beta, device=device)
|
| 233 |
+
|
| 234 |
+
def forward(self, x):
|
| 235 |
+
t = self.norm(x).to(self.qkv.weight.dtype)
|
| 236 |
+
qkv = F.linear(t, self.qkv.weight, self.qkv.bias)
|
| 237 |
+
hd, nah, nkv = self.head_dim, self.nah, self.nkv
|
| 238 |
+
q = qkv[:, :nah * hd].contiguous()
|
| 239 |
+
k = qkv[:, nah * hd:(nah + nkv) * hd].contiguous()
|
| 240 |
+
v = qkv[:, (nah + nkv) * hd:(nah + 2 * nkv) * hd].contiguous()
|
| 241 |
+
q, k = self.rope(q, k)
|
| 242 |
+
q, k = q * self.qk_scale, k * self.qk_scale
|
| 243 |
+
n = q.shape[0]
|
| 244 |
+
q = q.view(n, nkv, nah // nkv, hd); k = k.view(n, nkv, hd); v = v.view(n, nkv, hd)
|
| 245 |
+
ao = sdpa(q, k, v, self.sinks, 1.0, self.ctx).to(self.out.weight.dtype)
|
| 246 |
+
return x + F.linear(ao, self.out.weight, self.out.bias).to(x.dtype)
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
def swiglu(x, alpha=1.702, limit=7.0):
|
| 250 |
+
g, l = x.chunk(2, dim=-1)
|
| 251 |
+
g, l = g.clamp(max=limit), l.clamp(-limit, limit)
|
| 252 |
+
return g * torch.sigmoid(alpha * g) * (l + 1)
|
| 253 |
+
|
| 254 |
+
|
| 255 |
+
class MLPBlock(torch.nn.Module):
|
| 256 |
+
def __init__(self, cfg, device=None):
|
| 257 |
+
super().__init__()
|
| 258 |
+
dt = torch.bfloat16
|
| 259 |
+
self.ne, self.ept = cfg.num_experts, cfg.experts_per_token
|
| 260 |
+
self.norm = RMSNorm(cfg.hidden_size, device=device)
|
| 261 |
+
self.gate = torch.nn.Linear(cfg.hidden_size, cfg.num_experts, device=device, dtype=dt)
|
| 262 |
+
self.mlp1_weight = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.hidden_size, cfg.intermediate_size * 2, device=device, dtype=dt))
|
| 263 |
+
self.mlp1_bias = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.intermediate_size * 2, device=device, dtype=dt))
|
| 264 |
+
self.mlp2_weight = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.intermediate_size, cfg.hidden_size, device=device, dtype=dt))
|
| 265 |
+
self.mlp2_bias = torch.nn.Parameter(torch.empty(cfg.num_experts, cfg.hidden_size, device=device, dtype=dt))
|
| 266 |
+
|
| 267 |
+
def forward(self, x):
|
| 268 |
+
t = self.norm(x)
|
| 269 |
+
gs = F.linear(t.float(), self.gate.weight.float(), self.gate.bias.float())
|
| 270 |
+
top = torch.topk(gs, k=self.ept, dim=-1, sorted=True)
|
| 271 |
+
ew = torch.softmax(top.values, dim=-1) / self.ept
|
| 272 |
+
ei = top.indices
|
| 273 |
+
ept = self.ept
|
| 274 |
+
|
| 275 |
+
def _chunk(tc, eic, ewc):
|
| 276 |
+
o = expert_linear(tc.float().unsqueeze(1).expand(-1, eic.shape[1], -1),
|
| 277 |
+
self.mlp1_weight[eic].float(), self.mlp1_bias[eic].float())
|
| 278 |
+
o = swiglu(o)
|
| 279 |
+
o = expert_linear(o.float(), self.mlp2_weight[eic].float(), self.mlp2_bias[eic].float())
|
| 280 |
+
return (torch.einsum("bec,be->bc", o.to(ewc.dtype), ewc) * ept).to(x.dtype)
|
| 281 |
+
|
| 282 |
+
cs = 32
|
| 283 |
+
if t.shape[0] > cs:
|
| 284 |
+
parts = [_chunk(t[s:s+cs], ei[s:s+cs], ew[s:s+cs]) for s in range(0, t.shape[0], cs)]
|
| 285 |
+
return x + torch.cat(parts, 0)
|
| 286 |
+
return x + _chunk(t, ei, ew)
|
| 287 |
+
|
| 288 |
+
|
| 289 |
+
class TransformerBlock(torch.nn.Module):
|
| 290 |
+
def __init__(self, cfg, device=None):
|
| 291 |
+
super().__init__()
|
| 292 |
+
self.attn = AttentionBlock(cfg, device=device)
|
| 293 |
+
self.mlp = MLPBlock(cfg, device=device)
|
| 294 |
+
def forward(self, x):
|
| 295 |
+
return self.mlp(self.attn(x))
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
class Checkpoint:
|
| 299 |
+
@staticmethod
|
| 300 |
+
def build_param_name_map(n):
|
| 301 |
+
return ({f"block.{i}.mlp.mlp1_bias": f"block.{i}.mlp.swiglu.bias" for i in range(n)}
|
| 302 |
+
| {f"block.{i}.mlp.mlp1_weight": f"block.{i}.mlp.swiglu.weight" for i in range(n)}
|
| 303 |
+
| {f"block.{i}.mlp.mlp2_bias": f"block.{i}.mlp.out.bias" for i in range(n)}
|
| 304 |
+
| {f"block.{i}.mlp.mlp2_weight": f"block.{i}.mlp.out.weight" for i in range(n)})
|
| 305 |
+
|
| 306 |
+
def __init__(self, path, device, num_hidden_layers):
|
| 307 |
+
self.pnm = self.build_param_name_map(num_hidden_layers)
|
| 308 |
+
self.ds = device.type if device.index is None else f"{device.type}:{device.index}"
|
| 309 |
+
files = [os.path.join(path, f) for f in os.listdir(path) if f.endswith(".safetensors")]
|
| 310 |
+
self.map = {}
|
| 311 |
+
for sf in files:
|
| 312 |
+
with safe_open(sf, framework="pt", device=self.ds) as h:
|
| 313 |
+
for k in h.keys():
|
| 314 |
+
self.map[k] = sf
|
| 315 |
+
|
| 316 |
+
def get(self, name):
|
| 317 |
+
mapped = self.pnm.get(name, name)
|
| 318 |
+
with safe_open(self.map[mapped], framework="pt", device=self.ds) as h:
|
| 319 |
+
return h.get_tensor(mapped)
|
| 320 |
+
|
| 321 |
+
|
| 322 |
+
class Transformer(torch.nn.Module):
|
| 323 |
+
def __init__(self, cfg, device):
|
| 324 |
+
super().__init__()
|
| 325 |
+
dt = torch.bfloat16
|
| 326 |
+
self.embedding = torch.nn.Embedding(cfg.vocab_size, cfg.hidden_size, device=device, dtype=dt)
|
| 327 |
+
self.block = torch.nn.ModuleList([TransformerBlock(cfg, device=device) for _ in range(cfg.num_hidden_layers)])
|
| 328 |
+
self.norm = RMSNorm(cfg.hidden_size, device=device)
|
| 329 |
+
self.unembedding = torch.nn.Linear(cfg.hidden_size, cfg.num_labels, bias=False, device=device, dtype=dt)
|
| 330 |
+
|
| 331 |
+
def forward(self, token_ids):
|
| 332 |
+
x = self.embedding(token_ids)
|
| 333 |
+
for blk in self.block:
|
| 334 |
+
x = blk(x)
|
| 335 |
+
return F.linear(self.norm(x), self.unembedding.weight, None)
|
| 336 |
+
|
| 337 |
+
@classmethod
|
| 338 |
+
def from_checkpoint(cls, checkpoint_dir, *, device):
|
| 339 |
+
torch.backends.cuda.matmul.allow_tf32 = False
|
| 340 |
+
torch.backends.cudnn.allow_tf32 = False
|
| 341 |
+
torch.set_float32_matmul_precision("highest")
|
| 342 |
+
cp = json.loads((Path(checkpoint_dir) / "config.json").read_text())
|
| 343 |
+
validate_model_config_contract(cp, context=str(checkpoint_dir))
|
| 344 |
+
cfg = ModelConfig.from_checkpoint_config(cp, context=str(checkpoint_dir))
|
| 345 |
+
ckpt = Checkpoint(checkpoint_dir, device, cfg.num_hidden_layers)
|
| 346 |
+
m = cls(cfg, device); m.eval()
|
| 347 |
+
for name, param in m.named_parameters():
|
| 348 |
+
loaded = ckpt.get(name)
|
| 349 |
+
if param.shape != loaded.shape:
|
| 350 |
+
raise ValueError(f"Shape mismatch {name}: {param.shape} vs {loaded.shape}")
|
| 351 |
+
param.data.copy_(loaded)
|
| 352 |
+
return m
|
| 353 |
+
|
| 354 |
+
|
| 355 |
+
@dataclass(frozen=True)
|
| 356 |
+
class LabelInfo:
|
| 357 |
+
boundary_label_lookup: dict
|
| 358 |
+
token_to_span_label: dict
|
| 359 |
+
token_boundary_tags: dict
|
| 360 |
+
span_class_names: tuple
|
| 361 |
+
span_label_lookup: dict
|
| 362 |
+
background_token_label: int
|
| 363 |
+
background_span_label: int
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def labels_to_spans(labels_by_index, label_info):
|
| 367 |
+
spans, cur_label, start_idx, prev_idx = [], None, None, None
|
| 368 |
+
bg = label_info.background_span_label
|
| 369 |
+
for ti in sorted(labels_by_index):
|
| 370 |
+
lid = labels_by_index[ti]
|
| 371 |
+
sl = label_info.token_to_span_label.get(lid)
|
| 372 |
+
bt = label_info.token_boundary_tags.get(lid)
|
| 373 |
+
if prev_idx is not None and ti != prev_idx + 1:
|
| 374 |
+
if cur_label is not None and start_idx is not None:
|
| 375 |
+
spans.append((cur_label, start_idx, prev_idx + 1))
|
| 376 |
+
cur_label = start_idx = None
|
| 377 |
+
if sl is None:
|
| 378 |
+
prev_idx = ti; continue
|
| 379 |
+
if sl == bg:
|
| 380 |
+
if cur_label is not None and start_idx is not None:
|
| 381 |
+
spans.append((cur_label, start_idx, ti))
|
| 382 |
+
cur_label = start_idx = None; prev_idx = ti; continue
|
| 383 |
+
if bt == "S":
|
| 384 |
+
if cur_label is not None and start_idx is not None and prev_idx is not None:
|
| 385 |
+
spans.append((cur_label, start_idx, prev_idx + 1))
|
| 386 |
+
spans.append((sl, ti, ti + 1)); cur_label = start_idx = None
|
| 387 |
+
elif bt == "B":
|
| 388 |
+
if cur_label is not None and start_idx is not None and prev_idx is not None:
|
| 389 |
+
spans.append((cur_label, start_idx, prev_idx + 1))
|
| 390 |
+
cur_label, start_idx = sl, ti
|
| 391 |
+
elif bt == "I":
|
| 392 |
+
if cur_label is None or cur_label != sl:
|
| 393 |
+
if cur_label is not None and start_idx is not None and prev_idx is not None:
|
| 394 |
+
spans.append((cur_label, start_idx, prev_idx + 1))
|
| 395 |
+
cur_label, start_idx = sl, ti
|
| 396 |
+
elif bt == "E":
|
| 397 |
+
if cur_label is None or cur_label != sl or start_idx is None:
|
| 398 |
+
if cur_label is not None and start_idx is not None and prev_idx is not None:
|
| 399 |
+
spans.append((cur_label, start_idx, prev_idx + 1))
|
| 400 |
+
spans.append((sl, ti, ti + 1)); cur_label = start_idx = None
|
| 401 |
+
else:
|
| 402 |
+
spans.append((cur_label, start_idx, ti + 1)); cur_label = start_idx = None
|
| 403 |
+
else:
|
| 404 |
+
if cur_label is not None and start_idx is not None and prev_idx is not None:
|
| 405 |
+
spans.append((cur_label, start_idx, prev_idx + 1))
|
| 406 |
+
cur_label = start_idx = None
|
| 407 |
+
prev_idx = ti
|
| 408 |
+
if cur_label is not None and start_idx is not None and prev_idx is not None:
|
| 409 |
+
spans.append((cur_label, start_idx, prev_idx + 1))
|
| 410 |
+
return spans
|
| 411 |
+
|
| 412 |
+
|
| 413 |
+
def token_spans_to_char_spans(spans, cs, ce):
|
| 414 |
+
out = []
|
| 415 |
+
for li, ts, te in spans:
|
| 416 |
+
if not (0 <= ts < te <= len(cs)):
|
| 417 |
+
continue
|
| 418 |
+
s, e = cs[ts], ce[te - 1]
|
| 419 |
+
if e > s:
|
| 420 |
+
out.append((li, s, e))
|
| 421 |
+
return out
|
| 422 |
+
|
| 423 |
+
|
| 424 |
+
def trim_char_spans_whitespace(spans, text):
|
| 425 |
+
out = []
|
| 426 |
+
for li, s, e in spans:
|
| 427 |
+
if not (0 <= s < e <= len(text)):
|
| 428 |
+
continue
|
| 429 |
+
while s < e and text[s].isspace(): s += 1
|
| 430 |
+
while e > s and text[e - 1].isspace(): e -= 1
|
| 431 |
+
if e > s:
|
| 432 |
+
out.append((li, s, e))
|
| 433 |
+
return out
|
| 434 |
+
|
| 435 |
+
|
| 436 |
+
@functools.lru_cache(maxsize=1)
|
| 437 |
+
def get_viterbi_transition_biases():
|
| 438 |
+
cp = MODEL_DIR / "viterbi_calibration.json"
|
| 439 |
+
default = {k: 0.0 for k in VITERBI_TRANSITION_BIAS_KEYS}
|
| 440 |
+
if not cp.is_file():
|
| 441 |
+
return default
|
| 442 |
+
payload = json.loads(cp.read_text())
|
| 443 |
+
raw = payload
|
| 444 |
+
ops = payload.get("operating_points")
|
| 445 |
+
if isinstance(ops, dict):
|
| 446 |
+
preset = ops.get(DEFAULT_VITERBI_CALIBRATION_PRESET)
|
| 447 |
+
if isinstance(preset, dict):
|
| 448 |
+
raw = preset.get("biases", raw)
|
| 449 |
+
if not isinstance(raw, dict):
|
| 450 |
+
return default
|
| 451 |
+
return {k: float(raw.get(k, 0.0)) for k in VITERBI_TRANSITION_BIAS_KEYS}
|
| 452 |
+
|
| 453 |
+
|
| 454 |
+
class Decoder:
|
| 455 |
+
def __init__(self, label_info):
|
| 456 |
+
nc = len(label_info.token_to_span_label)
|
| 457 |
+
self._start = torch.full((nc,), -1e9, dtype=torch.float32)
|
| 458 |
+
self._end = torch.full((nc,), -1e9, dtype=torch.float32)
|
| 459 |
+
self._trans = torch.full((nc, nc), -1e9, dtype=torch.float32)
|
| 460 |
+
biases = get_viterbi_transition_biases()
|
| 461 |
+
bg_tok, bg_sp = label_info.background_token_label, label_info.background_span_label
|
| 462 |
+
ttsl, tbt = label_info.token_to_span_label, label_info.token_boundary_tags
|
| 463 |
+
for i in range(nc):
|
| 464 |
+
tag, sl = tbt.get(i), ttsl.get(i)
|
| 465 |
+
if tag in {"B", "S"} or i == bg_tok: self._start[i] = 0.0
|
| 466 |
+
if tag in {"E", "S"} or i == bg_tok: self._end[i] = 0.0
|
| 467 |
+
for j in range(nc):
|
| 468 |
+
nt, ns = tbt.get(j), ttsl.get(j)
|
| 469 |
+
if self._valid(tag, sl, nt, ns, bg_tok, bg_sp, j):
|
| 470 |
+
self._trans[i, j] = self._bias(tag, sl, nt, ns, bg_sp, biases)
|
| 471 |
+
|
| 472 |
+
@staticmethod
|
| 473 |
+
def _valid(pt, ps, nt, ns, bti, bsi, ni):
|
| 474 |
+
nb = ns == bsi or ni == bti
|
| 475 |
+
if (ns is None or nt is None) and not nb: return False
|
| 476 |
+
if pt is None or ps is None: return nb or nt in {"B", "S"}
|
| 477 |
+
if ps == bsi or pt in {"E", "S"}: return nb or nt in {"B", "S"}
|
| 478 |
+
if pt in {"B", "I"}: return ps == ns and nt in {"I", "E"}
|
| 479 |
+
return False
|
| 480 |
+
|
| 481 |
+
@staticmethod
|
| 482 |
+
def _bias(pt, ps, nt, ns, bsi, b):
|
| 483 |
+
nb, pb = ns == bsi, ps == bsi
|
| 484 |
+
if pb: return b["transition_bias_background_stay"] if nb else b["transition_bias_background_to_start"]
|
| 485 |
+
if pt in {"B", "I"}: return b["transition_bias_inside_to_continue"] if nt == "I" else b["transition_bias_inside_to_end"]
|
| 486 |
+
return b["transition_bias_end_to_background"] if nb else b["transition_bias_end_to_start"]
|
| 487 |
+
|
| 488 |
+
def decode(self, lp):
|
| 489 |
+
sl, nc = lp.shape
|
| 490 |
+
if sl == 0: return []
|
| 491 |
+
st = self._start.to(lp.device, lp.dtype)
|
| 492 |
+
en = self._end.to(lp.device, lp.dtype)
|
| 493 |
+
tr = self._trans.to(lp.device, lp.dtype)
|
| 494 |
+
scores = lp[0] + st
|
| 495 |
+
bp = torch.empty((sl - 1, nc), device=lp.device, dtype=torch.int64)
|
| 496 |
+
for i in range(1, sl):
|
| 497 |
+
t = scores.unsqueeze(1) + tr
|
| 498 |
+
bs, bi = t.max(dim=0)
|
| 499 |
+
scores = bs + lp[i]; bp[i - 1] = bi
|
| 500 |
+
if not torch.isfinite(scores).any(): return lp.argmax(dim=1).tolist()
|
| 501 |
+
scores += en
|
| 502 |
+
path = torch.empty(sl, device=lp.device, dtype=torch.int64)
|
| 503 |
+
path[-1] = scores.argmax()
|
| 504 |
+
for i in range(sl - 2, -1, -1): path[i] = bp[i, path[i + 1]]
|
| 505 |
+
return path.tolist()
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
@dataclass(frozen=True)
|
| 509 |
+
class InferenceRuntime:
|
| 510 |
+
model: Transformer; encoding: tiktoken.Encoding; label_info: LabelInfo
|
| 511 |
+
device: torch.device; n_ctx: int
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
@functools.lru_cache(maxsize=1)
|
| 515 |
+
def get_runtime():
|
| 516 |
+
cp = MODEL_DIR
|
| 517 |
+
cfg = json.loads((cp / "config.json").read_text())
|
| 518 |
+
validate_model_config_contract(cfg, context=str(cp))
|
| 519 |
+
device = torch.device("cuda")
|
| 520 |
+
encoding = tiktoken.get_encoding(str(cfg["encoding"]).strip())
|
| 521 |
+
scn = [BACKGROUND_CLASS_LABEL]; sll = {BACKGROUND_CLASS_LABEL: 0}
|
| 522 |
+
bll, ttsl, tbt = {}, {}, {}
|
| 523 |
+
bg_idx = None
|
| 524 |
+
for idx, name in enumerate(NER_CLASS_NAMES):
|
| 525 |
+
if name == BACKGROUND_CLASS_LABEL:
|
| 526 |
+
bg_idx = idx; ttsl[idx] = 0; tbt[idx] = None; continue
|
| 527 |
+
bnd, base = name.split("-", 1)
|
| 528 |
+
si = sll.get(base)
|
| 529 |
+
if si is None:
|
| 530 |
+
si = len(scn); scn.append(base); sll[base] = si
|
| 531 |
+
ttsl[idx] = si; tbt[idx] = bnd
|
| 532 |
+
bll.setdefault(base, {})[bnd] = idx
|
| 533 |
+
li = LabelInfo(bll, ttsl, tbt, tuple(scn), sll, bg_idx, 0)
|
| 534 |
+
m = Transformer.from_checkpoint(str(cp), device=device)
|
| 535 |
+
return InferenceRuntime(m, encoding, li, device, int(cfg["default_n_ctx"]))
|
| 536 |
+
|
| 537 |
+
|
| 538 |
+
@torch.inference_mode()
|
| 539 |
+
def predict_text(runtime, text, decoder):
|
| 540 |
+
tids = tuple(int(t) for t in runtime.encoding.encode(text, allowed_special="all"))
|
| 541 |
+
if not tids: return text, []
|
| 542 |
+
scores = []
|
| 543 |
+
for s in range(0, len(tids), runtime.n_ctx):
|
| 544 |
+
e = min(s + runtime.n_ctx, len(tids))
|
| 545 |
+
wt = torch.tensor(tids[s:e], device=runtime.device, dtype=torch.int32)
|
| 546 |
+
lp = F.log_softmax(runtime.model(wt).float(), dim=-1)
|
| 547 |
+
scores.extend(lp.unbind(0))
|
| 548 |
+
stacked = torch.stack(scores, 0)
|
| 549 |
+
dl = decoder.decode(stacked)
|
| 550 |
+
if len(dl) != len(tids): dl = stacked.argmax(dim=1).tolist()
|
| 551 |
+
pli = {i: int(l) for i, l in enumerate(dl)}
|
| 552 |
+
pts = labels_to_spans(pli, runtime.label_info)
|
| 553 |
+
tb = [runtime.encoding.decode_single_token_bytes(t) for t in tids]
|
| 554 |
+
dt = b"".join(tb).decode("utf-8", errors="replace")
|
| 555 |
+
cbs, cbe = [], []
|
| 556 |
+
bc = 0
|
| 557 |
+
for ch in dt: cbs.append(bc); bc += len(ch.encode("utf-8")); cbe.append(bc)
|
| 558 |
+
cs, ce = [], []
|
| 559 |
+
tbc = 0
|
| 560 |
+
for rb in tb:
|
| 561 |
+
tbs = tbc; tbe = tbs + len(rb); tbc = tbe
|
| 562 |
+
cs.append(bisect_right(cbe, tbs)); ce.append(bisect_left(cbs, tbe))
|
| 563 |
+
pcs = token_spans_to_char_spans(pts, cs, ce)
|
| 564 |
+
pcs = trim_char_spans_whitespace(pcs, dt if dt != text else text)
|
| 565 |
+
src = dt if dt != text else text
|
| 566 |
+
detected = []
|
| 567 |
+
for li, s, e in pcs:
|
| 568 |
+
lbl = (runtime.label_info.span_class_names[li]
|
| 569 |
+
if 0 <= li < len(runtime.label_info.span_class_names) else f"label_{li}")
|
| 570 |
+
detected.append({"label": lbl, "start": s, "end": e, "text": src[s:e]})
|
| 571 |
+
return src, detected
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
@spaces.GPU
|
| 575 |
+
def run_pii_analysis(text):
|
| 576 |
+
runtime = get_runtime()
|
| 577 |
+
decoder = Decoder(label_info=runtime.label_info)
|
| 578 |
+
return predict_text(runtime, text, decoder)
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
# =====================================================================
|
| 582 |
+
# OCR + SPAN β BOX MAPPING
|
| 583 |
+
# =====================================================================
|
| 584 |
+
|
| 585 |
+
def ocr_image(img: Image.Image) -> dict:
|
| 586 |
+
"""Run Tesseract and return the concatenated text plus per-word boxes.
|
| 587 |
+
|
| 588 |
+
The text is reconstructed with a single space between words on the same
|
| 589 |
+
line and a newline between lines, matching the character offsets we emit
|
| 590 |
+
in the `words` list β so later char-span β box mapping is a pure lookup.
|
| 591 |
+
"""
|
| 592 |
+
data = pytesseract.image_to_data(img, output_type=pytesseract.Output.DICT)
|
| 593 |
+
words, parts = [], []
|
| 594 |
+
pos = 0
|
| 595 |
+
last_line_key = None
|
| 596 |
+
for i in range(len(data["text"])):
|
| 597 |
+
text = data["text"][i]
|
| 598 |
+
if text is None:
|
| 599 |
+
continue
|
| 600 |
+
text = text.strip()
|
| 601 |
+
if not text:
|
| 602 |
+
continue
|
| 603 |
+
try:
|
| 604 |
+
conf = float(data["conf"][i])
|
| 605 |
+
except (TypeError, ValueError):
|
| 606 |
+
conf = -1.0
|
| 607 |
+
if conf < 0:
|
| 608 |
+
continue
|
| 609 |
+
line_key = (data["block_num"][i], data["par_num"][i], data["line_num"][i])
|
| 610 |
+
if last_line_key is None:
|
| 611 |
+
pass
|
| 612 |
+
elif line_key != last_line_key:
|
| 613 |
+
parts.append("\n"); pos += 1
|
| 614 |
+
else:
|
| 615 |
+
parts.append(" "); pos += 1
|
| 616 |
+
last_line_key = line_key
|
| 617 |
+
start = pos
|
| 618 |
+
parts.append(text); pos += len(text)
|
| 619 |
+
words.append({
|
| 620 |
+
"text": text, "start": start, "end": pos,
|
| 621 |
+
"x": int(data["left"][i]), "y": int(data["top"][i]),
|
| 622 |
+
"w": int(data["width"][i]), "h": int(data["height"][i]),
|
| 623 |
+
})
|
| 624 |
+
return {"text": "".join(parts), "words": words}
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def map_spans_to_boxes(words, spans, pad=3):
|
| 628 |
+
"""Map each char span to one or more pixel boxes, splitting across lines."""
|
| 629 |
+
boxes = []
|
| 630 |
+
for span in spans:
|
| 631 |
+
ss, se, lbl = span["start"], span["end"], span["label"]
|
| 632 |
+
hits = [w for w in words if w["start"] < se and w["end"] > ss]
|
| 633 |
+
if not hits:
|
| 634 |
+
continue
|
| 635 |
+
by_line = {}
|
| 636 |
+
for w in hits:
|
| 637 |
+
yc = w["y"] + w["h"] // 2
|
| 638 |
+
matched = None
|
| 639 |
+
for key in by_line:
|
| 640 |
+
if abs(key - yc) < max(w["h"] * 0.6, 10):
|
| 641 |
+
matched = key; break
|
| 642 |
+
key = matched if matched is not None else yc
|
| 643 |
+
by_line.setdefault(key, []).append(w)
|
| 644 |
+
for line_words in by_line.values():
|
| 645 |
+
x1 = min(w["x"] for w in line_words) - pad
|
| 646 |
+
y1 = min(w["y"] for w in line_words) - pad
|
| 647 |
+
x2 = max(w["x"] + w["w"] for w in line_words) + pad
|
| 648 |
+
y2 = max(w["y"] + w["h"] for w in line_words) + pad
|
| 649 |
+
boxes.append({
|
| 650 |
+
"x": max(0, x1), "y": max(0, y1),
|
| 651 |
+
"w": max(1, x2 - x1), "h": max(1, y2 - y1),
|
| 652 |
+
"label": lbl,
|
| 653 |
+
"text": " ".join(w["text"] for w in line_words),
|
| 654 |
+
})
|
| 655 |
+
return boxes
|
| 656 |
+
|
| 657 |
+
|
| 658 |
+
# =====================================================================
|
| 659 |
+
# SERVER
|
| 660 |
+
# =====================================================================
|
| 661 |
+
|
| 662 |
+
server = gr.Server()
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
@server.get("/", response_class=HTMLResponse)
|
| 666 |
+
async def homepage():
|
| 667 |
+
return FRONTEND_HTML
|
| 668 |
+
|
| 669 |
+
|
| 670 |
+
@server.post("/api/detect")
|
| 671 |
+
async def detect(file: UploadFile = File(...)):
|
| 672 |
+
suffix = Path(file.filename or "").suffix.lower()
|
| 673 |
+
if suffix not in (".png", ".jpg", ".jpeg", ".webp", ".bmp", ".tif", ".tiff"):
|
| 674 |
+
return JSONResponse({"error": f"Unsupported image type: {suffix or '(none)'}"}, 400)
|
| 675 |
+
try:
|
| 676 |
+
img_bytes = await file.read()
|
| 677 |
+
img = Image.open(io.BytesIO(img_bytes)).convert("RGB")
|
| 678 |
+
except Exception as e:
|
| 679 |
+
return JSONResponse({"error": f"Could not read image: {e}"}, 400)
|
| 680 |
+
|
| 681 |
+
ocr = ocr_image(img)
|
| 682 |
+
if not ocr["text"].strip():
|
| 683 |
+
return JSONResponse({"error": "No text detected in the image."}, 400)
|
| 684 |
+
|
| 685 |
+
try:
|
| 686 |
+
source_text, spans = run_pii_analysis(ocr["text"])
|
| 687 |
+
except Exception as e:
|
| 688 |
+
return JSONResponse({"error": f"PII analysis failed: {e}"}, 500)
|
| 689 |
+
|
| 690 |
+
# If the model round-tripped the text differently, fall back to original
|
| 691 |
+
# so the word offsets still line up. In practice this only matters for
|
| 692 |
+
# exotic unicode that tesseract won't produce anyway.
|
| 693 |
+
if source_text != ocr["text"]:
|
| 694 |
+
spans = [s for s in spans if s["end"] <= len(ocr["text"])]
|
| 695 |
+
|
| 696 |
+
boxes = map_spans_to_boxes(ocr["words"], spans)
|
| 697 |
+
|
| 698 |
+
buf = io.BytesIO(); img.save(buf, format="PNG")
|
| 699 |
+
data_url = "data:image/png;base64," + base64.b64encode(buf.getvalue()).decode()
|
| 700 |
+
|
| 701 |
+
return JSONResponse({
|
| 702 |
+
"filename": file.filename,
|
| 703 |
+
"image": data_url,
|
| 704 |
+
"width": img.width, "height": img.height,
|
| 705 |
+
"boxes": boxes,
|
| 706 |
+
"text": ocr["text"],
|
| 707 |
+
"spans": spans,
|
| 708 |
+
"categories_meta": {k: {"color": v["color"], "label": v["label"]}
|
| 709 |
+
for k, v in CATEGORIES_META.items()},
|
| 710 |
+
})
|
| 711 |
+
|
| 712 |
+
|
| 713 |
+
@server.api(name="anonymize_screenshot")
|
| 714 |
+
def anonymize_screenshot_api(image_path: str) -> str:
|
| 715 |
+
"""Gradio API: takes a path to an image, returns JSON with detected boxes."""
|
| 716 |
+
img = Image.open(image_path).convert("RGB")
|
| 717 |
+
ocr = ocr_image(img)
|
| 718 |
+
if not ocr["text"].strip():
|
| 719 |
+
return json.dumps({"boxes": [], "text": "", "spans": []})
|
| 720 |
+
_, spans = run_pii_analysis(ocr["text"])
|
| 721 |
+
boxes = map_spans_to_boxes(ocr["words"], spans)
|
| 722 |
+
return json.dumps({
|
| 723 |
+
"width": img.width, "height": img.height,
|
| 724 |
+
"boxes": boxes, "text": ocr["text"], "spans": spans,
|
| 725 |
+
}, ensure_ascii=False)
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
# =====================================================================
|
| 729 |
+
# FRONTEND
|
| 730 |
+
# =====================================================================
|
| 731 |
+
|
| 732 |
+
FRONTEND_HTML = r"""<!DOCTYPE html>
|
| 733 |
+
<html lang="en">
|
| 734 |
+
<head>
|
| 735 |
+
<meta charset="UTF-8">
|
| 736 |
+
<meta name="viewport" content="width=device-width,initial-scale=1">
|
| 737 |
+
<title>Screenshot Anonymizer</title>
|
| 738 |
+
<link rel="preconnect" href="https://fonts.googleapis.com">
|
| 739 |
+
<link href="https://fonts.googleapis.com/css2?family=Inter:wght@400;500;600;700;800&display=swap" rel="stylesheet">
|
| 740 |
+
<style>
|
| 741 |
+
*,*::before,*::after{box-sizing:border-box;margin:0;padding:0}
|
| 742 |
+
:root{
|
| 743 |
+
--bg:#0f172a;--surface:#1e293b;--surface2:#334155;--border:#334155;--border2:#475569;
|
| 744 |
+
--text:#f1f5f9;--text2:#cbd5e1;--text3:#94a3b8;
|
| 745 |
+
--primary:#6366f1;--primary-light:#818cf8;--danger:#ef4444;--success:#22c55e;
|
| 746 |
+
--radius:12px;--radius-sm:8px;--shadow:0 4px 12px rgba(0,0,0,.3);--shadow-lg:0 12px 40px rgba(0,0,0,.4);
|
| 747 |
+
}
|
| 748 |
+
html,body{height:100%}
|
| 749 |
+
body{font-family:'Inter',system-ui,sans-serif;background:var(--bg);color:var(--text);min-height:100vh;line-height:1.5;overflow:hidden}
|
| 750 |
+
|
| 751 |
+
/* ββ upload view ββ */
|
| 752 |
+
#upload-view{display:flex;flex-direction:column;align-items:center;justify-content:center;min-height:100vh;padding:2rem;background:radial-gradient(circle at 30% 20%,#312e81 0%,var(--bg) 50%)}
|
| 753 |
+
.upload-card{background:var(--surface);border:1px solid var(--border);border-radius:20px;padding:3rem;max-width:640px;width:100%;text-align:center;box-shadow:var(--shadow-lg)}
|
| 754 |
+
.brand{display:flex;align-items:center;justify-content:center;gap:.75rem;margin-bottom:.5rem}
|
| 755 |
+
.brand h1{font-size:1.75rem;font-weight:800;background:linear-gradient(135deg,#818cf8,#ec4899);-webkit-background-clip:text;-webkit-text-fill-color:transparent}
|
| 756 |
+
.brand-icon{width:40px;height:40px;background:linear-gradient(135deg,var(--primary),#ec4899);border-radius:10px;display:flex;align-items:center;justify-content:center;color:#fff;font-size:1.3rem}
|
| 757 |
+
.subtitle{color:var(--text2);margin-bottom:2rem;font-size:1rem}
|
| 758 |
+
.dropzone{border:2px dashed var(--border2);border-radius:var(--radius);padding:3rem 2rem;cursor:pointer;transition:all .2s;position:relative;background:rgba(15,23,42,.4)}
|
| 759 |
+
.dropzone:hover,.dropzone.dragover{border-color:var(--primary-light);background:rgba(99,102,241,.08)}
|
| 760 |
+
.dropzone-icon{font-size:2.5rem;margin-bottom:.75rem}
|
| 761 |
+
.dropzone-text{font-weight:600;font-size:1.05rem;margin-bottom:.25rem}
|
| 762 |
+
.dropzone-hint{color:var(--text3);font-size:.85rem}
|
| 763 |
+
.dropzone input{position:absolute;inset:0;opacity:0;cursor:pointer}
|
| 764 |
+
.features{display:grid;grid-template-columns:repeat(3,1fr);gap:.75rem;margin-top:1.75rem;text-align:left}
|
| 765 |
+
.feature{background:rgba(15,23,42,.5);border:1px solid var(--border);padding:.9rem;border-radius:var(--radius-sm)}
|
| 766 |
+
.feature-title{font-weight:600;font-size:.8rem;margin-bottom:.2rem;color:var(--text)}
|
| 767 |
+
.feature-desc{color:var(--text3);font-size:.72rem;line-height:1.45}
|
| 768 |
+
.powered-by{margin-top:1.5rem;font-size:.78rem;color:var(--text3)}
|
| 769 |
+
.powered-by strong{color:var(--text2)}
|
| 770 |
+
|
| 771 |
+
/* ββ editor view ββ */
|
| 772 |
+
#editor-view{display:none;flex-direction:column;height:100vh}
|
| 773 |
+
.top-bar{background:var(--surface);border-bottom:1px solid var(--border);padding:.65rem 1.25rem;display:flex;align-items:center;gap:1rem;flex-shrink:0}
|
| 774 |
+
.top-bar .brand{margin:0}
|
| 775 |
+
.top-bar .brand h1{font-size:1.15rem}
|
| 776 |
+
.top-bar .brand-icon{width:30px;height:30px;font-size:.95rem}
|
| 777 |
+
.file-info{font-size:.85rem;color:var(--text3);margin-left:.25rem;flex:1;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}
|
| 778 |
+
.btn{padding:.5rem 1rem;border-radius:var(--radius-sm);border:1px solid var(--border2);cursor:pointer;font-weight:600;font-size:.82rem;transition:all .15s;background:var(--surface2);color:var(--text);font-family:inherit}
|
| 779 |
+
.btn:hover{background:var(--border2);border-color:var(--text3)}
|
| 780 |
+
.btn-primary{background:var(--primary);border-color:var(--primary);color:#fff}
|
| 781 |
+
.btn-primary:hover{background:var(--primary-light);border-color:var(--primary-light)}
|
| 782 |
+
.btn-ghost{background:transparent}
|
| 783 |
+
.btn-ghost:hover{background:var(--surface2)}
|
| 784 |
+
|
| 785 |
+
.editor-layout{flex:1;display:flex;min-height:0}
|
| 786 |
+
.canvas-area{flex:1;background:#020617;background-image:linear-gradient(45deg,#0f172a 25%,transparent 25%),linear-gradient(-45deg,#0f172a 25%,transparent 25%),linear-gradient(45deg,transparent 75%,#0f172a 75%),linear-gradient(-45deg,transparent 75%,#0f172a 75%);background-size:20px 20px;background-position:0 0,0 10px,10px -10px,10px 0px;overflow:auto;position:relative;display:flex;align-items:center;justify-content:center;padding:2rem}
|
| 787 |
+
.canvas-wrap{position:relative;box-shadow:0 20px 60px rgba(0,0,0,.5);cursor:crosshair;flex-shrink:0}
|
| 788 |
+
.canvas-wrap.mode-move{cursor:grab}
|
| 789 |
+
.canvas-wrap.mode-move.dragging{cursor:grabbing}
|
| 790 |
+
.canvas-wrap canvas{display:block;border-radius:4px}
|
| 791 |
+
|
| 792 |
+
.sidebar{width:300px;background:var(--surface);border-left:1px solid var(--border);overflow-y:auto;flex-shrink:0;display:flex;flex-direction:column}
|
| 793 |
+
.sidebar section{padding:1rem 1.25rem;border-bottom:1px solid var(--border)}
|
| 794 |
+
.sidebar section:last-child{border-bottom:none}
|
| 795 |
+
.sidebar h3{font-size:.68rem;text-transform:uppercase;letter-spacing:.8px;color:var(--text3);margin-bottom:.65rem;font-weight:700}
|
| 796 |
+
|
| 797 |
+
.tool-row{display:grid;grid-template-columns:1fr 1fr;gap:.4rem;margin-bottom:.5rem}
|
| 798 |
+
.tool-btn{padding:.55rem .3rem;border-radius:var(--radius-sm);border:1px solid var(--border);background:transparent;color:var(--text2);cursor:pointer;font-size:.78rem;font-weight:600;display:flex;flex-direction:column;align-items:center;gap:.25rem;transition:all .15s;font-family:inherit}
|
| 799 |
+
.tool-btn:hover{background:var(--surface2);color:var(--text)}
|
| 800 |
+
.tool-btn.active{background:rgba(99,102,241,.15);border-color:var(--primary);color:var(--primary-light)}
|
| 801 |
+
.tool-btn .ico{font-size:1.1rem}
|
| 802 |
+
.tool-hint{font-size:.72rem;color:var(--text3);line-height:1.4;margin-top:.35rem}
|
| 803 |
+
|
| 804 |
+
.stat-grid{display:grid;grid-template-columns:repeat(2,1fr);gap:.5rem;margin-bottom:.65rem}
|
| 805 |
+
.stat{background:rgba(15,23,42,.5);border:1px solid var(--border);padding:.6rem .75rem;border-radius:var(--radius-sm)}
|
| 806 |
+
.stat .num{font-size:1.4rem;font-weight:800;color:var(--primary-light);line-height:1}
|
| 807 |
+
.stat .lbl{font-size:.65rem;color:var(--text3);text-transform:uppercase;letter-spacing:.4px;margin-top:.2rem}
|
| 808 |
+
|
| 809 |
+
.filter-item{display:flex;align-items:center;gap:.55rem;padding:.4rem .45rem;border-radius:var(--radius-sm);cursor:pointer;transition:background .15s;user-select:none}
|
| 810 |
+
.filter-item:hover{background:rgba(15,23,42,.5)}
|
| 811 |
+
.filter-item input{display:none}
|
| 812 |
+
.filter-check{width:16px;height:16px;border-radius:4px;border:2px solid var(--border2);display:flex;align-items:center;justify-content:center;transition:all .15s;flex-shrink:0}
|
| 813 |
+
.filter-item input:checked~.filter-check{border-color:currentColor;background:currentColor}
|
| 814 |
+
.filter-item input:checked~.filter-check::after{content:'';display:block;width:4px;height:8px;border:solid #fff;border-width:0 2px 2px 0;transform:rotate(45deg) translateY(-1px)}
|
| 815 |
+
.filter-dot{width:9px;height:9px;border-radius:50%;flex-shrink:0}
|
| 816 |
+
.filter-label{flex:1;font-size:.82rem;font-weight:500;color:var(--text)}
|
| 817 |
+
.filter-count{font-size:.72rem;color:var(--text3);font-weight:600;background:var(--surface2);padding:.1rem .4rem;border-radius:10px}
|
| 818 |
+
|
| 819 |
+
.action-row{display:flex;flex-direction:column;gap:.45rem}
|
| 820 |
+
.action-row .btn{width:100%;justify-content:center;text-align:center}
|
| 821 |
+
|
| 822 |
+
.empty-state{color:var(--text3);font-size:.8rem;font-style:italic;padding:.25rem 0}
|
| 823 |
+
|
| 824 |
+
/* ββ loading ββ */
|
| 825 |
+
#loading{position:fixed;inset:0;background:rgba(15,23,42,.8);backdrop-filter:blur(8px);display:none;flex-direction:column;align-items:center;justify-content:center;z-index:9999}
|
| 826 |
+
.spinner{width:44px;height:44px;border:3px solid var(--border2);border-top-color:var(--primary-light);border-radius:50%;animation:spin .8s linear infinite}
|
| 827 |
+
@keyframes spin{to{transform:rotate(360deg)}}
|
| 828 |
+
#loading p{margin-top:1rem;font-weight:600;color:var(--text)}
|
| 829 |
+
.progress-text{font-size:.82rem;color:var(--text3);margin-top:.25rem}
|
| 830 |
+
.error-banner{background:rgba(239,68,68,.1);border:1px solid rgba(239,68,68,.4);color:#fca5a5;padding:.75rem 1rem;border-radius:var(--radius-sm);margin:.75rem 1.25rem;font-size:.85rem;display:none}
|
| 831 |
+
|
| 832 |
+
.toast{position:fixed;bottom:1.5rem;left:50%;transform:translateX(-50%) translateY(100px);background:var(--surface);border:1px solid var(--border2);color:var(--text);padding:.7rem 1.25rem;border-radius:var(--radius-sm);font-size:.85rem;font-weight:500;box-shadow:var(--shadow-lg);transition:transform .25s ease;z-index:9998}
|
| 833 |
+
.toast.show{transform:translateX(-50%) translateY(0)}
|
| 834 |
+
.toast.success{border-color:rgba(34,197,94,.5)}
|
| 835 |
+
|
| 836 |
+
@media(max-width:900px){
|
| 837 |
+
.editor-layout{flex-direction:column}
|
| 838 |
+
.sidebar{width:100%;border-left:none;border-top:1px solid var(--border);max-height:40vh}
|
| 839 |
+
.features{grid-template-columns:1fr}
|
| 840 |
+
}
|
| 841 |
+
</style>
|
| 842 |
+
</head>
|
| 843 |
+
<body>
|
| 844 |
+
|
| 845 |
+
<!-- Upload view -->
|
| 846 |
+
<div id="upload-view">
|
| 847 |
+
<div class="upload-card">
|
| 848 |
+
<div class="brand">
|
| 849 |
+
<div class="brand-icon">🕶</div>
|
| 850 |
+
<h1>Screenshot Anonymizer</h1>
|
| 851 |
+
</div>
|
| 852 |
+
<p class="subtitle">Redact PII in images · OCR + OpenAI Privacy Filter</p>
|
| 853 |
+
<div class="dropzone" id="dropzone">
|
| 854 |
+
<div class="dropzone-icon">🖼</div>
|
| 855 |
+
<div class="dropzone-text">Drop a screenshot here</div>
|
| 856 |
+
<div class="dropzone-hint">PNG, JPG, WebP · chat, email, document</div>
|
| 857 |
+
<input type="file" id="file-input" accept="image/png,image/jpeg,image/webp,image/bmp,image/tiff">
|
| 858 |
+
</div>
|
| 859 |
+
<div class="features">
|
| 860 |
+
<div class="feature"><div class="feature-title">Auto-detect</div><div class="feature-desc">OCR reads the text, the model finds names, emails, phones, secrets.</div></div>
|
| 861 |
+
<div class="feature"><div class="feature-title">Edit freely</div><div class="feature-desc">Drag to add bars, click to toggle, Delete to remove. Nothing is sent back.</div></div>
|
| 862 |
+
<div class="feature"><div class="feature-title">Export</div><div class="feature-desc">Save as PNG or copy to clipboard — ready for Twitter.</div></div>
|
| 863 |
+
</div>
|
| 864 |
+
<div class="powered-by">Powered by <strong>OpenAI Privacy Filter</strong> + <strong>Tesseract OCR</strong></div>
|
| 865 |
+
</div>
|
| 866 |
+
</div>
|
| 867 |
+
|
| 868 |
+
<!-- Editor view -->
|
| 869 |
+
<div id="editor-view">
|
| 870 |
+
<div class="top-bar">
|
| 871 |
+
<div class="brand"><div class="brand-icon">🕶</div><h1>Screenshot Anonymizer</h1></div>
|
| 872 |
+
<div class="file-info" id="file-info"></div>
|
| 873 |
+
<button class="btn btn-ghost" onclick="resetView()">New Screenshot</button>
|
| 874 |
+
</div>
|
| 875 |
+
<div class="error-banner" id="error-banner"></div>
|
| 876 |
+
<div class="editor-layout">
|
| 877 |
+
<div class="canvas-area" id="canvas-area">
|
| 878 |
+
<div class="canvas-wrap" id="canvas-wrap">
|
| 879 |
+
<canvas id="canvas"></canvas>
|
| 880 |
+
</div>
|
| 881 |
+
</div>
|
| 882 |
+
<div class="sidebar">
|
| 883 |
+
|
| 884 |
+
<section>
|
| 885 |
+
<h3>Tool</h3>
|
| 886 |
+
<div class="tool-row">
|
| 887 |
+
<button class="tool-btn active" data-mode="add" id="tool-add"><span class="ico">✏</span><span>Draw</span></button>
|
| 888 |
+
<button class="tool-btn" data-mode="move" id="tool-move"><span class="ico">✥</span><span>Select</span></button>
|
| 889 |
+
</div>
|
| 890 |
+
<div class="tool-hint" id="tool-hint">Drag anywhere to draw a black bar. Click a bar to select it.</div>
|
| 891 |
+
</section>
|
| 892 |
+
|
| 893 |
+
<section>
|
| 894 |
+
<h3>Detected</h3>
|
| 895 |
+
<div class="stat-grid">
|
| 896 |
+
<div class="stat"><div class="num" id="stat-boxes">0</div><div class="lbl">Total Bars</div></div>
|
| 897 |
+
<div class="stat"><div class="num" id="stat-cats">0</div><div class="lbl">Categories</div></div>
|
| 898 |
+
</div>
|
| 899 |
+
</section>
|
| 900 |
+
|
| 901 |
+
<section>
|
| 902 |
+
<h3>Categories</h3>
|
| 903 |
+
<div id="category-filters">
|
| 904 |
+
<div class="empty-state" id="cat-empty">No PII detected</div>
|
| 905 |
+
</div>
|
| 906 |
+
</section>
|
| 907 |
+
|
| 908 |
+
<section>
|
| 909 |
+
<h3>Export</h3>
|
| 910 |
+
<div class="action-row">
|
| 911 |
+
<button class="btn btn-primary" onclick="downloadImage()">⬇ Download PNG</button>
|
| 912 |
+
<button class="btn" onclick="copyToClipboard()">📋 Copy to Clipboard</button>
|
| 913 |
+
</div>
|
| 914 |
+
</section>
|
| 915 |
+
|
| 916 |
+
</div>
|
| 917 |
+
</div>
|
| 918 |
+
</div>
|
| 919 |
+
|
| 920 |
+
<div id="loading"><div class="spinner"></div><p>Analyzing screenshot…</p><div class="progress-text">OCR → Privacy Filter → Map to pixels</div></div>
|
| 921 |
+
<div class="toast" id="toast"></div>
|
| 922 |
+
|
| 923 |
+
<script>
|
| 924 |
+
const State = {
|
| 925 |
+
img: null, // HTMLImageElement of the screenshot
|
| 926 |
+
width: 0, height: 0,
|
| 927 |
+
boxes: [], // [{id, x, y, w, h, label, enabled, custom}]
|
| 928 |
+
nextId: 1,
|
| 929 |
+
activeCats: new Set(),
|
| 930 |
+
catMeta: {},
|
| 931 |
+
mode: 'add', // 'add' | 'move'
|
| 932 |
+
selected: null, // box id
|
| 933 |
+
drag: null, // {type:'draw'|'move', startX, startY, origBox?, newBox?}
|
| 934 |
+
scale: 1, // display scale (natural px -> screen px)
|
| 935 |
+
filename: '',
|
| 936 |
+
catCounts: {},
|
| 937 |
+
};
|
| 938 |
+
|
| 939 |
+
const LABEL_NAMES = {private_person:'Person',private_address:'Address',private_email:'Email',private_phone:'Phone',private_url:'URL',private_date:'Date',account_number:'Account',secret:'Secret'};
|
| 940 |
+
|
| 941 |
+
// ββ upload flow βββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 942 |
+
const dz = document.getElementById('dropzone');
|
| 943 |
+
const fi = document.getElementById('file-input');
|
| 944 |
+
['dragenter','dragover'].forEach(e=>dz.addEventListener(e,ev=>{ev.preventDefault();dz.classList.add('dragover')}));
|
| 945 |
+
['dragleave','drop'].forEach(e=>dz.addEventListener(e,ev=>{ev.preventDefault();dz.classList.remove('dragover')}));
|
| 946 |
+
dz.addEventListener('drop',ev=>{if(ev.dataTransfer.files[0]) uploadFile(ev.dataTransfer.files[0])});
|
| 947 |
+
fi.addEventListener('change',ev=>{if(ev.target.files[0]) uploadFile(ev.target.files[0])});
|
| 948 |
+
|
| 949 |
+
// Also accept a paste (ctrl+v) when on upload view
|
| 950 |
+
document.addEventListener('paste',ev=>{
|
| 951 |
+
if(document.getElementById('upload-view').style.display==='none') return;
|
| 952 |
+
const items = ev.clipboardData && ev.clipboardData.items;
|
| 953 |
+
if(!items) return;
|
| 954 |
+
for(const it of items){
|
| 955 |
+
if(it.type && it.type.startsWith('image/')){
|
| 956 |
+
const f = it.getAsFile(); if(f) uploadFile(f); ev.preventDefault(); return;
|
| 957 |
+
}
|
| 958 |
+
}
|
| 959 |
+
});
|
| 960 |
+
|
| 961 |
+
async function uploadFile(file){
|
| 962 |
+
if(!file.type || !file.type.startsWith('image/')){showError('Please drop an image file.'); return;}
|
| 963 |
+
document.getElementById('loading').style.display='flex';
|
| 964 |
+
document.getElementById('upload-view').style.display='none';
|
| 965 |
+
const form = new FormData(); form.append('file', file);
|
| 966 |
+
try{
|
| 967 |
+
const r = await fetch('/api/detect', {method:'POST', body:form});
|
| 968 |
+
const d = await r.json();
|
| 969 |
+
if(d.error){showError(d.error); return;}
|
| 970 |
+
await initEditor(d);
|
| 971 |
+
}catch(e){showError('Analysis failed: '+e.message);}
|
| 972 |
+
finally{document.getElementById('loading').style.display='none';}
|
| 973 |
+
}
|
| 974 |
+
|
| 975 |
+
function showError(m){
|
| 976 |
+
document.getElementById('loading').style.display='none';
|
| 977 |
+
document.getElementById('editor-view').style.display='flex';
|
| 978 |
+
const b = document.getElementById('error-banner');
|
| 979 |
+
b.textContent = m; b.style.display = 'block';
|
| 980 |
+
}
|
| 981 |
+
|
| 982 |
+
function resetView(){
|
| 983 |
+
document.getElementById('editor-view').style.display='none';
|
| 984 |
+
document.getElementById('upload-view').style.display='flex';
|
| 985 |
+
document.getElementById('error-banner').style.display='none';
|
| 986 |
+
fi.value='';
|
| 987 |
+
State.boxes = []; State.selected = null; State.img = null;
|
| 988 |
+
}
|
| 989 |
+
|
| 990 |
+
// ββ init editor βββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 991 |
+
async function initEditor(data){
|
| 992 |
+
document.getElementById('editor-view').style.display='flex';
|
| 993 |
+
document.getElementById('error-banner').style.display='none';
|
| 994 |
+
document.getElementById('file-info').textContent = data.filename || '';
|
| 995 |
+
State.filename = data.filename || 'redacted.png';
|
| 996 |
+
State.width = data.width; State.height = data.height;
|
| 997 |
+
State.catMeta = data.categories_meta || {};
|
| 998 |
+
State.catCounts = {};
|
| 999 |
+
State.boxes = (data.boxes || []).map(b => {
|
| 1000 |
+
State.catCounts[b.label] = (State.catCounts[b.label]||0) + 1;
|
| 1001 |
+
return {id: State.nextId++, x:b.x, y:b.y, w:b.w, h:b.h, label:b.label, text:b.text, enabled:true, custom:false};
|
| 1002 |
+
});
|
| 1003 |
+
State.activeCats = new Set(Object.keys(State.catCounts));
|
| 1004 |
+
|
| 1005 |
+
const img = new Image();
|
| 1006 |
+
await new Promise((res, rej) => { img.onload = res; img.onerror = rej; img.src = data.image; });
|
| 1007 |
+
State.img = img;
|
| 1008 |
+
|
| 1009 |
+
setupCanvas();
|
| 1010 |
+
renderCategoryFilters();
|
| 1011 |
+
updateStats();
|
| 1012 |
+
draw();
|
| 1013 |
+
}
|
| 1014 |
+
|
| 1015 |
+
function setupCanvas(){
|
| 1016 |
+
const cv = document.getElementById('canvas');
|
| 1017 |
+
cv.width = State.width;
|
| 1018 |
+
cv.height = State.height;
|
| 1019 |
+
// scale to fit view area
|
| 1020 |
+
const area = document.getElementById('canvas-area');
|
| 1021 |
+
const maxW = area.clientWidth - 64, maxH = area.clientHeight - 64;
|
| 1022 |
+
const scale = Math.min(1, maxW/State.width, maxH/State.height);
|
| 1023 |
+
State.scale = scale;
|
| 1024 |
+
cv.style.width = (State.width * scale) + 'px';
|
| 1025 |
+
cv.style.height = (State.height * scale) + 'px';
|
| 1026 |
+
}
|
| 1027 |
+
|
| 1028 |
+
window.addEventListener('resize', ()=>{ if(State.img){ setupCanvas(); draw(); } });
|
| 1029 |
+
|
| 1030 |
+
// ββ drawing βββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1031 |
+
function draw(){
|
| 1032 |
+
const cv = document.getElementById('canvas');
|
| 1033 |
+
const ctx = cv.getContext('2d');
|
| 1034 |
+
ctx.clearRect(0,0,cv.width,cv.height);
|
| 1035 |
+
ctx.drawImage(State.img, 0, 0);
|
| 1036 |
+
|
| 1037 |
+
// draw enabled bars
|
| 1038 |
+
for(const b of State.boxes){
|
| 1039 |
+
if(!isVisible(b)) continue;
|
| 1040 |
+
ctx.fillStyle = '#000';
|
| 1041 |
+
ctx.fillRect(b.x, b.y, b.w, b.h);
|
| 1042 |
+
}
|
| 1043 |
+
// draw selection outline
|
| 1044 |
+
const sel = selectedBox();
|
| 1045 |
+
if(sel){
|
| 1046 |
+
ctx.save();
|
| 1047 |
+
ctx.strokeStyle = '#818cf8';
|
| 1048 |
+
ctx.lineWidth = Math.max(2, 2/State.scale);
|
| 1049 |
+
ctx.setLineDash([6/State.scale, 4/State.scale]);
|
| 1050 |
+
ctx.strokeRect(sel.x, sel.y, sel.w, sel.h);
|
| 1051 |
+
ctx.restore();
|
| 1052 |
+
}
|
| 1053 |
+
// draw in-progress new rectangle
|
| 1054 |
+
if(State.drag && State.drag.type==='draw' && State.drag.newBox){
|
| 1055 |
+
const b = State.drag.newBox;
|
| 1056 |
+
ctx.save();
|
| 1057 |
+
ctx.fillStyle = 'rgba(0,0,0,.75)';
|
| 1058 |
+
ctx.fillRect(b.x, b.y, b.w, b.h);
|
| 1059 |
+
ctx.strokeStyle = '#818cf8';
|
| 1060 |
+
ctx.lineWidth = Math.max(1.5, 1.5/State.scale);
|
| 1061 |
+
ctx.strokeRect(b.x, b.y, b.w, b.h);
|
| 1062 |
+
ctx.restore();
|
| 1063 |
+
}
|
| 1064 |
+
}
|
| 1065 |
+
|
| 1066 |
+
function isVisible(b){
|
| 1067 |
+
if(!b.enabled) return false;
|
| 1068 |
+
if(!b.custom && !State.activeCats.has(b.label)) return false;
|
| 1069 |
+
return true;
|
| 1070 |
+
}
|
| 1071 |
+
|
| 1072 |
+
function selectedBox(){
|
| 1073 |
+
if(State.selected==null) return null;
|
| 1074 |
+
return State.boxes.find(b => b.id === State.selected) || null;
|
| 1075 |
+
}
|
| 1076 |
+
|
| 1077 |
+
// ββ mouse interaction βββββββββββββββββββββββββββββββββββββββββββββ
|
| 1078 |
+
const wrap = document.getElementById('canvas-wrap');
|
| 1079 |
+
|
| 1080 |
+
function toCanvasXY(ev){
|
| 1081 |
+
const rect = wrap.getBoundingClientRect();
|
| 1082 |
+
const x = (ev.clientX - rect.left) / State.scale;
|
| 1083 |
+
const y = (ev.clientY - rect.top) / State.scale;
|
| 1084 |
+
return {x, y};
|
| 1085 |
+
}
|
| 1086 |
+
|
| 1087 |
+
function hitTest(x, y){
|
| 1088 |
+
// Topmost first (last drawn is on top)
|
| 1089 |
+
for(let i = State.boxes.length - 1; i >= 0; i--){
|
| 1090 |
+
const b = State.boxes[i];
|
| 1091 |
+
if(!isVisible(b)) continue;
|
| 1092 |
+
if(x >= b.x && x <= b.x + b.w && y >= b.y && y <= b.y + b.h) return b;
|
| 1093 |
+
}
|
| 1094 |
+
return null;
|
| 1095 |
+
}
|
| 1096 |
+
|
| 1097 |
+
wrap.addEventListener('mousedown', ev => {
|
| 1098 |
+
if(ev.button !== 0) return;
|
| 1099 |
+
ev.preventDefault();
|
| 1100 |
+
const {x, y} = toCanvasXY(ev);
|
| 1101 |
+
const hit = hitTest(x, y);
|
| 1102 |
+
|
| 1103 |
+
if(State.mode === 'add' && !hit){
|
| 1104 |
+
// start new rectangle
|
| 1105 |
+
State.selected = null;
|
| 1106 |
+
State.drag = {type:'draw', startX:x, startY:y, newBox:{x,y,w:0,h:0}};
|
| 1107 |
+
} else if(hit){
|
| 1108 |
+
State.selected = hit.id;
|
| 1109 |
+
State.drag = {type:'move', startX:x, startY:y, origBox:{x:hit.x, y:hit.y, w:hit.w, h:hit.h}, boxId:hit.id};
|
| 1110 |
+
wrap.classList.add('dragging');
|
| 1111 |
+
} else {
|
| 1112 |
+
State.selected = null;
|
| 1113 |
+
State.drag = null;
|
| 1114 |
+
}
|
| 1115 |
+
draw();
|
| 1116 |
+
});
|
| 1117 |
+
|
| 1118 |
+
window.addEventListener('mousemove', ev => {
|
| 1119 |
+
if(!State.drag) return;
|
| 1120 |
+
const {x, y} = toCanvasXY(ev);
|
| 1121 |
+
if(State.drag.type === 'draw'){
|
| 1122 |
+
const sx = State.drag.startX, sy = State.drag.startY;
|
| 1123 |
+
State.drag.newBox = {
|
| 1124 |
+
x: Math.min(sx, x), y: Math.min(sy, y),
|
| 1125 |
+
w: Math.abs(x - sx), h: Math.abs(y - sy)
|
| 1126 |
+
};
|
| 1127 |
+
} else if(State.drag.type === 'move'){
|
| 1128 |
+
const dx = x - State.drag.startX, dy = y - State.drag.startY;
|
| 1129 |
+
const b = State.boxes.find(b => b.id === State.drag.boxId);
|
| 1130 |
+
if(b){
|
| 1131 |
+
const o = State.drag.origBox;
|
| 1132 |
+
b.x = Math.max(0, Math.min(State.width - o.w, o.x + dx));
|
| 1133 |
+
b.y = Math.max(0, Math.min(State.height - o.h, o.y + dy));
|
| 1134 |
+
}
|
| 1135 |
+
}
|
| 1136 |
+
draw();
|
| 1137 |
+
});
|
| 1138 |
+
|
| 1139 |
+
window.addEventListener('mouseup', ev => {
|
| 1140 |
+
if(!State.drag) return;
|
| 1141 |
+
wrap.classList.remove('dragging');
|
| 1142 |
+
if(State.drag.type === 'draw'){
|
| 1143 |
+
const b = State.drag.newBox;
|
| 1144 |
+
if(b.w > 3 && b.h > 3){
|
| 1145 |
+
const nb = {id: State.nextId++, x: Math.round(b.x), y: Math.round(b.y), w: Math.round(b.w), h: Math.round(b.h),
|
| 1146 |
+
label: 'custom', text: '', enabled: true, custom: true};
|
| 1147 |
+
State.boxes.push(nb);
|
| 1148 |
+
State.selected = nb.id;
|
| 1149 |
+
updateStats();
|
| 1150 |
+
}
|
| 1151 |
+
}
|
| 1152 |
+
State.drag = null;
|
| 1153 |
+
draw();
|
| 1154 |
+
});
|
| 1155 |
+
|
| 1156 |
+
// Keyboard: Delete to remove, Esc to deselect, D/S tool toggle
|
| 1157 |
+
window.addEventListener('keydown', ev => {
|
| 1158 |
+
if(document.getElementById('editor-view').style.display === 'none') return;
|
| 1159 |
+
if(ev.key === 'Delete' || ev.key === 'Backspace'){
|
| 1160 |
+
if(State.selected != null){
|
| 1161 |
+
State.boxes = State.boxes.filter(b => b.id !== State.selected);
|
| 1162 |
+
State.selected = null;
|
| 1163 |
+
updateStats();
|
| 1164 |
+
draw();
|
| 1165 |
+
ev.preventDefault();
|
| 1166 |
+
}
|
| 1167 |
+
} else if(ev.key === 'Escape'){
|
| 1168 |
+
State.selected = null; State.drag = null; draw();
|
| 1169 |
+
} else if(ev.key === 'd' || ev.key === 'D'){
|
| 1170 |
+
setMode('add');
|
| 1171 |
+
} else if(ev.key === 's' || ev.key === 'S' || ev.key === 'v' || ev.key === 'V'){
|
| 1172 |
+
setMode('move');
|
| 1173 |
+
}
|
| 1174 |
+
});
|
| 1175 |
+
|
| 1176 |
+
// ββ tool mode βββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1177 |
+
document.querySelectorAll('.tool-btn').forEach(btn => {
|
| 1178 |
+
btn.addEventListener('click', () => setMode(btn.dataset.mode));
|
| 1179 |
+
});
|
| 1180 |
+
|
| 1181 |
+
function setMode(m){
|
| 1182 |
+
State.mode = m;
|
| 1183 |
+
document.querySelectorAll('.tool-btn').forEach(b => b.classList.toggle('active', b.dataset.mode === m));
|
| 1184 |
+
wrap.classList.toggle('mode-move', m === 'move');
|
| 1185 |
+
document.getElementById('tool-hint').textContent = (m === 'add')
|
| 1186 |
+
? 'Drag anywhere to draw a black bar. Click a bar to select it.'
|
| 1187 |
+
: 'Click a bar to select, drag to move, Delete to remove.';
|
| 1188 |
+
}
|
| 1189 |
+
|
| 1190 |
+
// ββ category filters ββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1191 |
+
function renderCategoryFilters(){
|
| 1192 |
+
const ct = document.getElementById('category-filters');
|
| 1193 |
+
const empty = document.getElementById('cat-empty');
|
| 1194 |
+
const cats = Object.keys(State.catCounts);
|
| 1195 |
+
if(!cats.length){ ct.innerHTML = ''; ct.appendChild(empty); empty.style.display='block'; return; }
|
| 1196 |
+
ct.innerHTML = '';
|
| 1197 |
+
for(const cat of cats){
|
| 1198 |
+
const meta = State.catMeta[cat] || {color:'#888', label:cat};
|
| 1199 |
+
const count = State.catCounts[cat];
|
| 1200 |
+
const el = document.createElement('label');
|
| 1201 |
+
el.className = 'filter-item';
|
| 1202 |
+
el.style.color = meta.color;
|
| 1203 |
+
el.innerHTML = `<input type="checkbox" ${State.activeCats.has(cat)?'checked':''}>
|
| 1204 |
+
<span class="filter-check"></span>
|
| 1205 |
+
<span class="filter-dot" style="background:${meta.color}"></span>
|
| 1206 |
+
<span class="filter-label">${meta.label}</span>
|
| 1207 |
+
<span class="filter-count">${count}</span>`;
|
| 1208 |
+
el.querySelector('input').addEventListener('change', ev => {
|
| 1209 |
+
if(ev.target.checked) State.activeCats.add(cat);
|
| 1210 |
+
else State.activeCats.delete(cat);
|
| 1211 |
+
draw();
|
| 1212 |
+
updateStats();
|
| 1213 |
+
});
|
| 1214 |
+
ct.appendChild(el);
|
| 1215 |
+
}
|
| 1216 |
+
}
|
| 1217 |
+
|
| 1218 |
+
function updateStats(){
|
| 1219 |
+
const visible = State.boxes.filter(isVisible).length;
|
| 1220 |
+
document.getElementById('stat-boxes').textContent = visible;
|
| 1221 |
+
const cats = new Set();
|
| 1222 |
+
for(const b of State.boxes) if(isVisible(b)) cats.add(b.custom ? 'custom' : b.label);
|
| 1223 |
+
document.getElementById('stat-cats').textContent = cats.size;
|
| 1224 |
+
}
|
| 1225 |
+
|
| 1226 |
+
// ββ export ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 1227 |
+
function renderExportCanvas(){
|
| 1228 |
+
const ec = document.createElement('canvas');
|
| 1229 |
+
ec.width = State.width; ec.height = State.height;
|
| 1230 |
+
const ctx = ec.getContext('2d');
|
| 1231 |
+
ctx.drawImage(State.img, 0, 0);
|
| 1232 |
+
ctx.fillStyle = '#000';
|
| 1233 |
+
for(const b of State.boxes) if(isVisible(b)) ctx.fillRect(b.x, b.y, b.w, b.h);
|
| 1234 |
+
return ec;
|
| 1235 |
+
}
|
| 1236 |
+
|
| 1237 |
+
function downloadImage(){
|
| 1238 |
+
const ec = renderExportCanvas();
|
| 1239 |
+
ec.toBlob(blob => {
|
| 1240 |
+
const a = document.createElement('a');
|
| 1241 |
+
const base = (State.filename || 'screenshot').replace(/\.[^/.]+$/, '');
|
| 1242 |
+
a.download = base + '-redacted.png';
|
| 1243 |
+
a.href = URL.createObjectURL(blob);
|
| 1244 |
+
a.click();
|
| 1245 |
+
setTimeout(() => URL.revokeObjectURL(a.href), 1000);
|
| 1246 |
+
toast('Saved ' + a.download, true);
|
| 1247 |
+
}, 'image/png');
|
| 1248 |
+
}
|
| 1249 |
+
|
| 1250 |
+
async function copyToClipboard(){
|
| 1251 |
+
const ec = renderExportCanvas();
|
| 1252 |
+
try{
|
| 1253 |
+
await new Promise((res, rej) => {
|
| 1254 |
+
ec.toBlob(async blob => {
|
| 1255 |
+
try{
|
| 1256 |
+
if(!navigator.clipboard || !window.ClipboardItem){ rej(new Error('Clipboard not supported')); return; }
|
| 1257 |
+
await navigator.clipboard.write([new ClipboardItem({'image/png': blob})]);
|
| 1258 |
+
res();
|
| 1259 |
+
}catch(e){ rej(e); }
|
| 1260 |
+
}, 'image/png');
|
| 1261 |
+
});
|
| 1262 |
+
toast('Copied to clipboard', true);
|
| 1263 |
+
}catch(e){
|
| 1264 |
+
toast('Copy failed: ' + e.message);
|
| 1265 |
+
}
|
| 1266 |
+
}
|
| 1267 |
+
|
| 1268 |
+
let toastTimer = null;
|
| 1269 |
+
function toast(msg, ok){
|
| 1270 |
+
const t = document.getElementById('toast');
|
| 1271 |
+
t.textContent = msg;
|
| 1272 |
+
t.classList.toggle('success', !!ok);
|
| 1273 |
+
t.classList.add('show');
|
| 1274 |
+
clearTimeout(toastTimer);
|
| 1275 |
+
toastTimer = setTimeout(() => t.classList.remove('show'), 2200);
|
| 1276 |
+
}
|
| 1277 |
+
</script>
|
| 1278 |
+
</body>
|
| 1279 |
+
</html>"""
|
| 1280 |
+
|
| 1281 |
+
|
| 1282 |
+
if __name__ == "__main__":
|
| 1283 |
+
server.launch(server_name="0.0.0.0", server_port=7860)
|
packages.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
tesseract-ocr
|
requirements.txt
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
tiktoken
|
| 2 |
+
sentencepiece
|
| 3 |
+
torch
|
| 4 |
+
safetensors
|
| 5 |
+
huggingface_hub
|
| 6 |
+
gradio[mcp]
|
| 7 |
+
pytesseract
|
| 8 |
+
Pillow
|
| 9 |
+
python-multipart
|
| 10 |
+
accelerate
|
| 11 |
+
spaces
|