Praveen-K-0503 commited on
Commit ·
3a66575
1
Parent(s): ae46d30
fix: resolve parameter bugs in frontend settings panel
Browse files- Add nmsRadius (9px default) to settings state; expose as slider (3-20px)
- Fix float precision drift on magnification/nmsRadius sliders using toFixed
- Ensure integer params (capacity, frameSkip) use Math.round on slider change
- Properly coerce all FormData values to strings for FastAPI compatibility
- Fix WebSocket settings: Boolean() wraps toggles, Math.round() wraps ints
- Capacity slider step reduced from 10 to 5 for finer control
- Add frontend/.npm-cache/ to .gitignore
- .gitignore +64 -0
- Dockerfile +29 -0
- LICENSE +1401 -0
- MANUAL_TESTING.md +19 -0
- README.md +214 -0
- alert_system.py +16 -0
- api.py +529 -0
- app.py +606 -0
- app_enhancements.py +115 -0
- benchmark.py +171 -0
- crowd_datasets/SHHA/SHHA.py +158 -0
- crowd_datasets/SHHA/__init__.py +0 -0
- crowd_datasets/SHHA/loading_data.py +27 -0
- crowd_datasets/__init__.py +8 -0
- database.py +14 -0
- download_weights.py +53 -0
- engine.py +159 -0
- evaluate.py +171 -0
- frontend/.env.example +5 -0
- frontend/.env.production +3 -0
- frontend/.gitignore +24 -0
- frontend/README.md +16 -0
- frontend/eslint.config.js +29 -0
- frontend/index.html +13 -0
- frontend/package-lock.json +0 -0
- frontend/package.json +29 -0
- frontend/public/favicon.svg +1 -0
- frontend/public/icons.svg +24 -0
- frontend/src/App.css +184 -0
- frontend/src/App.jsx +1067 -0
- frontend/src/index.css +607 -0
- frontend/src/main.jsx +10 -0
- frontend/vercel.json +6 -0
- frontend/vite.config.js +8 -0
- label_tool.py +99 -0
- models/__init__.py +6 -0
- models/backbone.py +68 -0
- models/matcher.py +83 -0
- models/p2pnet.py +342 -0
- models/vgg_.py +197 -0
- motion_estimator.py +35 -0
- optimization_notes.md +81 -0
- report_generator.py +28 -0
- requirements.txt +28 -0
- run_test.py +103 -0
- test_system.py +114 -0
- tracker.py +96 -0
- train.py +229 -0
- util/__init__.py +0 -0
- util/misc.py +506 -0
.gitignore
ADDED
|
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Python
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*.pyo
|
| 5 |
+
*.pyd
|
| 6 |
+
*.egg
|
| 7 |
+
*.egg-info/
|
| 8 |
+
dist/
|
| 9 |
+
build/
|
| 10 |
+
.eggs/
|
| 11 |
+
*.so
|
| 12 |
+
.Python
|
| 13 |
+
env/
|
| 14 |
+
.venv/
|
| 15 |
+
venv/
|
| 16 |
+
ENV/
|
| 17 |
+
pip-log.txt
|
| 18 |
+
pip-delete-this-directory.txt
|
| 19 |
+
.tox/
|
| 20 |
+
.coverage
|
| 21 |
+
.coverage.*
|
| 22 |
+
.cache
|
| 23 |
+
nosetests.xml
|
| 24 |
+
coverage.xml
|
| 25 |
+
*.cover
|
| 26 |
+
*.log
|
| 27 |
+
.mypy_cache/
|
| 28 |
+
.dmypy.json
|
| 29 |
+
dmypy.json
|
| 30 |
+
|
| 31 |
+
# SQLite databases
|
| 32 |
+
*.db
|
| 33 |
+
*.sqlite3
|
| 34 |
+
|
| 35 |
+
# Log files
|
| 36 |
+
*.log
|
| 37 |
+
*.err.log
|
| 38 |
+
logs/
|
| 39 |
+
|
| 40 |
+
# Temp uploads
|
| 41 |
+
temp_uploads/
|
| 42 |
+
|
| 43 |
+
# Node / Frontend
|
| 44 |
+
frontend/node_modules/
|
| 45 |
+
frontend/dist/
|
| 46 |
+
frontend/.env
|
| 47 |
+
frontend/.npm-cache/
|
| 48 |
+
node_modules/
|
| 49 |
+
|
| 50 |
+
# OS
|
| 51 |
+
.DS_Store
|
| 52 |
+
Thumbs.db
|
| 53 |
+
|
| 54 |
+
# IDE
|
| 55 |
+
.vscode/
|
| 56 |
+
.idea/
|
| 57 |
+
*.swp
|
| 58 |
+
*.swo
|
| 59 |
+
|
| 60 |
+
# Model weights (large binary files - too large for GitHub without LFS)
|
| 61 |
+
weights/
|
| 62 |
+
|
| 63 |
+
# Datasets (usually large)
|
| 64 |
+
# crowd_datasets/
|
Dockerfile
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.11-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# System deps for OpenCV headless + general runtime
|
| 6 |
+
RUN apt-get update && apt-get install -y \
|
| 7 |
+
libglib2.0-0 \
|
| 8 |
+
libsm6 \
|
| 9 |
+
libxext6 \
|
| 10 |
+
libxrender-dev \
|
| 11 |
+
libgl1 \
|
| 12 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 13 |
+
|
| 14 |
+
# Copy requirements first for layer caching
|
| 15 |
+
COPY requirements.txt .
|
| 16 |
+
|
| 17 |
+
# Install Python dependencies
|
| 18 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 19 |
+
|
| 20 |
+
# Copy the rest of the project
|
| 21 |
+
COPY . .
|
| 22 |
+
|
| 23 |
+
# Create runtime directories
|
| 24 |
+
RUN mkdir -p temp_uploads weights
|
| 25 |
+
|
| 26 |
+
# HuggingFace Spaces uses port 7860
|
| 27 |
+
EXPOSE 7860
|
| 28 |
+
|
| 29 |
+
CMD ["uvicorn", "api:app", "--host", "0.0.0.0", "--port", "7860"]
|
LICENSE
ADDED
|
@@ -0,0 +1,1401 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Tencent is pleased to support the open source community by making P2PNet available.
|
| 2 |
+
|
| 3 |
+
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. The below software in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) THL A29 Limited.
|
| 4 |
+
|
| 5 |
+
P2PNet is licensed under the following License, except for the third-party components listed below.
|
| 6 |
+
|
| 7 |
+
License for P2PNet:
|
| 8 |
+
--------------------------------------------------------------------
|
| 9 |
+
Redistribution and use in source and binary forms, with or without modification, are permitted provided
|
| 10 |
+
that the following conditions are met:
|
| 11 |
+
|
| 12 |
+
1. Use in source and binary forms shall only be for the purpose of academic research.
|
| 13 |
+
|
| 14 |
+
2. Redistributions of source code must retain the above copyright notice, this list of conditions and the
|
| 15 |
+
following disclaimer.
|
| 16 |
+
|
| 17 |
+
3. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
|
| 18 |
+
the following disclaimer in the documentation and/or other materials provided with the distribution.
|
| 19 |
+
|
| 20 |
+
4. Neither the name of the copyright holder nor the names of its contributors may be used to endorse
|
| 21 |
+
or promote products derived from this software without specific prior written permission.
|
| 22 |
+
|
| 23 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
| 24 |
+
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
| 25 |
+
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 26 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 27 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 28 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 29 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 30 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 31 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 32 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
Other dependencies and licenses:
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
Open Source Software Licensed under the BSD 3-Clause License:
|
| 39 |
+
--------------------------------------------------------------------
|
| 40 |
+
1. Pytorch
|
| 41 |
+
Copyright (c) 2016- Facebook, Inc (Adam Paszke)
|
| 42 |
+
Copyright (c) 2014- Facebook, Inc (Soumith Chintala)
|
| 43 |
+
Copyright (c) 2011-2014 Idiap Research Institute (Ronan Collobert)
|
| 44 |
+
Copyright (c) 2012-2014 Deepmind Technologies (Koray Kavukcuoglu)
|
| 45 |
+
Copyright (c) 2011-2012 NEC Laboratories America (Koray Kavukcuoglu)
|
| 46 |
+
Copyright (c) 2011-2013 NYU (Clement Farabet)
|
| 47 |
+
Copyright (c) 2006-2010 NEC Laboratories America (Ronan Collobert, Leon Bottou, Iain Melvin, Jason Weston)
|
| 48 |
+
Copyright (c) 2006 Idiap Research Institute (Samy Bengio)
|
| 49 |
+
Copyright (c) 2001-2004 Idiap Research Institute (Ronan Collobert, Samy Bengio, Johnny Mariethoz)
|
| 50 |
+
|
| 51 |
+
2. torchvision
|
| 52 |
+
Copyright (c) Soumith Chintala 2016,
|
| 53 |
+
All rights reserved.
|
| 54 |
+
|
| 55 |
+
3. opencv-python
|
| 56 |
+
Copyright (C) 2000-2020, Intel Corporation, all rights reserved.
|
| 57 |
+
Copyright (C) 2009-2011, Willow Garage Inc., all rights reserved.
|
| 58 |
+
Copyright (C) 2009-2016, NVIDIA Corporation, all rights reserved.
|
| 59 |
+
Copyright (C) 2010-2013, Advanced Micro Devices, Inc., all rights reserved.
|
| 60 |
+
Copyright (C) 2015-2016, OpenCV Foundation, all rights reserved.
|
| 61 |
+
Copyright (C) 2015-2016, Itseez Inc., all rights reserved.
|
| 62 |
+
Copyright (C) 2019-2020, Xperience AI, all rights reserved.
|
| 63 |
+
Third party copyrights are property of their respective owners.
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
Terms of the BSD 3-Clause License:
|
| 67 |
+
--------------------------------------------------------------------
|
| 68 |
+
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
| 69 |
+
|
| 70 |
+
1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
| 71 |
+
|
| 72 |
+
2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
| 73 |
+
|
| 74 |
+
3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
| 75 |
+
|
| 76 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
Open Source Software Licensed under the PIL Software License:
|
| 81 |
+
--------------------------------------------------------------------
|
| 82 |
+
1. Pillow
|
| 83 |
+
Copyright © 2010-2020 by Alex Clark and contributors
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
Terms of the PIL Software License:
|
| 87 |
+
--------------------------------------------------------------------
|
| 88 |
+
By obtaining, using, and/or copying this software and/or its associated documentation, you agree that you have read, understood, and will comply with the following terms and conditions:
|
| 89 |
+
|
| 90 |
+
Permission to use, copy, modify, and distribute this software and its associated documentation for any purpose and without fee is hereby granted, provided that the above copyright notice appears in all copies, and that both that copyright notice and this permission notice appear in supporting documentation, and that the name of Secret Labs AB or the author not be used in advertising or publicity pertaining to distribution of the software without specific, written prior permission.
|
| 91 |
+
|
| 92 |
+
SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
Open Source Software Licensed under the BSD 3-Clause License and Other Licenses of the Third-Party Components therein:
|
| 97 |
+
--------------------------------------------------------------------
|
| 98 |
+
1. numpy
|
| 99 |
+
Copyright (c) 2005-2020, NumPy Developers.
|
| 100 |
+
All rights reserved.
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
A copy of the BSD 3-Clause License is included in this file.
|
| 104 |
+
|
| 105 |
+
The NumPy repository and source distributions bundle several libraries that are
|
| 106 |
+
compatibly licensed. We list these here.
|
| 107 |
+
|
| 108 |
+
Name: Numpydoc
|
| 109 |
+
Files: doc/sphinxext/numpydoc/*
|
| 110 |
+
License: BSD-2-Clause
|
| 111 |
+
|
| 112 |
+
Name: scipy-sphinx-theme
|
| 113 |
+
Files: doc/scipy-sphinx-theme/*
|
| 114 |
+
License: BSD-3-Clause AND PSF-2.0 AND Apache-2.0
|
| 115 |
+
|
| 116 |
+
Name: lapack-lite
|
| 117 |
+
Files: numpy/linalg/lapack_lite/*
|
| 118 |
+
License: BSD-3-Clause
|
| 119 |
+
|
| 120 |
+
Name: tempita
|
| 121 |
+
Files: tools/npy_tempita/*
|
| 122 |
+
License: MIT
|
| 123 |
+
|
| 124 |
+
Name: dragon4
|
| 125 |
+
Files: numpy/core/src/multiarray/dragon4.c
|
| 126 |
+
License: MIT
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
|
| 130 |
+
Open Source Software Licensed under the BSD 3-Clause License and Other Licenses of the Third-Party Components therein:
|
| 131 |
+
--------------------------------------------------------------------
|
| 132 |
+
1. h5py
|
| 133 |
+
Copyright (c) 2008 Andrew Collette and contributors
|
| 134 |
+
All rights reserved.
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
A copy of the BSD 3-Clause License is included in this file.
|
| 138 |
+
|
| 139 |
+
For thirdparty hdf5:
|
| 140 |
+
HDF5 (Hierarchical Data Format 5) Software Library and Utilities
|
| 141 |
+
Copyright 2006-2007 by The HDF Group (THG).
|
| 142 |
+
|
| 143 |
+
NCSA HDF5 (Hierarchical Data Format 5) Software Library and Utilities
|
| 144 |
+
Copyright 1998-2006 by the Board of Trustees of the University of Illinois.
|
| 145 |
+
|
| 146 |
+
All rights reserved.
|
| 147 |
+
|
| 148 |
+
Contributors: National Center for Supercomputing Applications (NCSA)
|
| 149 |
+
at the University of Illinois, Fortner Software, Unidata Program
|
| 150 |
+
Center (netCDF), The Independent JPEG Group (JPEG), Jean-loup Gailly
|
| 151 |
+
and Mark Adler (gzip), and Digital Equipment Corporation (DEC).
|
| 152 |
+
|
| 153 |
+
Redistribution and use in source and binary forms, with or without
|
| 154 |
+
modification, are permitted for any purpose (including commercial
|
| 155 |
+
purposes) provided that the following conditions are met:
|
| 156 |
+
|
| 157 |
+
1. Redistributions of source code must retain the above copyright
|
| 158 |
+
notice, this list of conditions, and the following disclaimer.
|
| 159 |
+
2. Redistributions in binary form must reproduce the above
|
| 160 |
+
copyright notice, this list of conditions, and the following
|
| 161 |
+
disclaimer in the documentation and/or materials provided with the
|
| 162 |
+
distribution.
|
| 163 |
+
3. In addition, redistributions of modified forms of the source or
|
| 164 |
+
binary code must carry prominent notices stating that the original
|
| 165 |
+
code was changed and the date of the change.
|
| 166 |
+
4. All publications or advertising materials mentioning features or
|
| 167 |
+
use of this software are asked, but not required, to acknowledge that
|
| 168 |
+
it was developed by The HDF Group and by the National Center for
|
| 169 |
+
Supercomputing Applications at the University of Illinois at
|
| 170 |
+
Urbana-Champaign and credit the contributors.
|
| 171 |
+
5. Neither the name of The HDF Group, the name of the University,
|
| 172 |
+
nor the name of any Contributor may be used to endorse or promote
|
| 173 |
+
products derived from this software without specific prior written
|
| 174 |
+
permission from THG, the University, or the Contributor, respectively.
|
| 175 |
+
|
| 176 |
+
DISCLAIMER: THIS SOFTWARE IS PROVIDED BY THE HDF GROUP (THG) AND THE
|
| 177 |
+
CONTRIBUTORS "AS IS" WITH NO WARRANTY OF ANY KIND, EITHER EXPRESSED OR
|
| 178 |
+
IMPLIED. In no event shall THG or the Contributors be liable for any
|
| 179 |
+
damages suffered by the users arising out of the use of this software,
|
| 180 |
+
even if advised of the possibility of such damage.
|
| 181 |
+
|
| 182 |
+
Portions of HDF5 were developed with support from the University of
|
| 183 |
+
California, Lawrence Livermore National Laboratory (UC LLNL). The
|
| 184 |
+
following statement applies to those portions of the product and must
|
| 185 |
+
be retained in any redistribution of source code, binaries,
|
| 186 |
+
documentation, and/or accompanying materials:
|
| 187 |
+
|
| 188 |
+
This work was partially produced at the University of California,
|
| 189 |
+
Lawrence Livermore National Laboratory (UC LLNL) under contract
|
| 190 |
+
no. W-7405-ENG-48 (Contract 48) between the U.S. Department of Energy
|
| 191 |
+
(DOE) and The Regents of the University of California (University) for
|
| 192 |
+
the operation of UC LLNL.
|
| 193 |
+
|
| 194 |
+
DISCLAIMER: This work was prepared as an account of work sponsored by
|
| 195 |
+
an agency of the United States Government. Neither the United States
|
| 196 |
+
Government nor the University of California nor any of their
|
| 197 |
+
employees, makes any warranty, express or implied, or assumes any
|
| 198 |
+
liability or responsibility for the accuracy, completeness, or
|
| 199 |
+
usefulness of any information, apparatus, product, or process
|
| 200 |
+
disclosed, or represents that its use would not infringe privately-
|
| 201 |
+
owned rights. Reference herein to any specific commercial products,
|
| 202 |
+
process, or service by trade name, trademark, manufacturer, or
|
| 203 |
+
otherwise, does not necessarily constitute or imply its endorsement,
|
| 204 |
+
recommendation, or favoring by the United States Government or the
|
| 205 |
+
University of California. The views and opinions of authors expressed
|
| 206 |
+
herein do not necessarily state or reflect those of the United States
|
| 207 |
+
Government or the University of California, and shall not be used for
|
| 208 |
+
advertising or product endorsement purposes.
|
| 209 |
+
|
| 210 |
+
For third-party pytables:
|
| 211 |
+
Copyright Notice and Statement for PyTables Software Library and Utilities:
|
| 212 |
+
|
| 213 |
+
Copyright (c) 2002, 2003, 2004 Francesc Altet
|
| 214 |
+
Copyright (c) 2005, 2006, 2007 Carabos Coop. V.
|
| 215 |
+
All rights reserved.
|
| 216 |
+
|
| 217 |
+
Redistribution and use in source and binary forms, with or without
|
| 218 |
+
modification, are permitted provided that the following conditions are
|
| 219 |
+
met:
|
| 220 |
+
|
| 221 |
+
a. Redistributions of source code must retain the above copyright
|
| 222 |
+
notice, this list of conditions and the following disclaimer.
|
| 223 |
+
|
| 224 |
+
b. Redistributions in binary form must reproduce the above copyright
|
| 225 |
+
notice, this list of conditions and the following disclaimer in the
|
| 226 |
+
documentation and/or other materials provided with the
|
| 227 |
+
distribution.
|
| 228 |
+
|
| 229 |
+
c. Neither the name of the Carabos Coop. V. nor the names of its
|
| 230 |
+
contributors may be used to endorse or promote products derived
|
| 231 |
+
from this software without specific prior written permission.
|
| 232 |
+
|
| 233 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 234 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 235 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 236 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 237 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 238 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 239 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 240 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 241 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 242 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 243 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 244 |
+
|
| 245 |
+
For third-party python:
|
| 246 |
+
Python license
|
| 247 |
+
==============
|
| 248 |
+
|
| 249 |
+
#. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
|
| 250 |
+
the Individual or Organization ("Licensee") accessing and otherwise using Python
|
| 251 |
+
Python 2.7.5 software in source or binary form and its associated documentation.
|
| 252 |
+
|
| 253 |
+
#. Subject to the terms and conditions of this License Agreement, PSF hereby
|
| 254 |
+
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
| 255 |
+
analyze, test, perform and/or display publicly, prepare derivative works,
|
| 256 |
+
distribute, and otherwise use Python Python 2.7.5 alone or in any derivative
|
| 257 |
+
version, provided, however, that PSF's License Agreement and PSF's notice of
|
| 258 |
+
copyright, i.e., "Copyright 2001-2013 Python Software Foundation; All Rights
|
| 259 |
+
Reserved" are retained in Python Python 2.7.5 alone or in any derivative version
|
| 260 |
+
prepared by Licensee.
|
| 261 |
+
|
| 262 |
+
#. In the event Licensee prepares a derivative work that is based on or
|
| 263 |
+
incorporates Python Python 2.7.5 or any part thereof, and wants to make the
|
| 264 |
+
derivative work available to others as provided herein, then Licensee hereby
|
| 265 |
+
agrees to include in any such work a brief summary of the changes made to Python
|
| 266 |
+
Python 2.7.5.
|
| 267 |
+
|
| 268 |
+
#. PSF is making Python Python 2.7.5 available to Licensee on an "AS IS" basis.
|
| 269 |
+
PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
|
| 270 |
+
EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
|
| 271 |
+
WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
|
| 272 |
+
USE OF PYTHON Python 2.7.5 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
|
| 273 |
+
|
| 274 |
+
#. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON Python 2.7.5
|
| 275 |
+
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
|
| 276 |
+
MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON Python 2.7.5, OR ANY DERIVATIVE
|
| 277 |
+
THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
| 278 |
+
|
| 279 |
+
#. This License Agreement will automatically terminate upon a material breach of
|
| 280 |
+
its terms and conditions.
|
| 281 |
+
|
| 282 |
+
#. Nothing in this License Agreement shall be deemed to create any relationship
|
| 283 |
+
of agency, partnership, or joint venture between PSF and Licensee. This License
|
| 284 |
+
Agreement does not grant permission to use PSF trademarks or trade name in a
|
| 285 |
+
trademark sense to endorse or promote products or services of Licensee, or any
|
| 286 |
+
third party.
|
| 287 |
+
|
| 288 |
+
#. By copying, installing or otherwise using Python Python 2.7.5, Licensee agrees
|
| 289 |
+
to be bound by the terms and conditions of this License Agreement.
|
| 290 |
+
|
| 291 |
+
For third-party stdint:
|
| 292 |
+
Copyright (c) 2006-2008 Alexander Chemeris
|
| 293 |
+
|
| 294 |
+
Redistribution and use in source and binary forms, with or without
|
| 295 |
+
modification, are permitted provided that the following conditions are met:
|
| 296 |
+
|
| 297 |
+
1. Redistributions of source code must retain the above copyright notice,
|
| 298 |
+
this list of conditions and the following disclaimer.
|
| 299 |
+
|
| 300 |
+
2. Redistributions in binary form must reproduce the above copyright
|
| 301 |
+
notice, this list of conditions and the following disclaimer in the
|
| 302 |
+
documentation and/or other materials provided with the distribution.
|
| 303 |
+
|
| 304 |
+
3. The name of the author may be used to endorse or promote products
|
| 305 |
+
derived from this software without specific prior written permission.
|
| 306 |
+
|
| 307 |
+
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED
|
| 308 |
+
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
|
| 309 |
+
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
|
| 310 |
+
EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 311 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
|
| 312 |
+
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
|
| 313 |
+
OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
| 314 |
+
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
|
| 315 |
+
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
|
| 316 |
+
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 317 |
+
|
| 318 |
+
|
| 319 |
+
|
| 320 |
+
Open Source Software Licensed under the BSD 3-Clause License and Other Licenses of the Third-Party Components therein:
|
| 321 |
+
--------------------------------------------------------------------
|
| 322 |
+
1. scipy
|
| 323 |
+
Copyright (c) 2001-2002 Enthought, Inc. 2003-2019, SciPy Developers.
|
| 324 |
+
All rights reserved.
|
| 325 |
+
|
| 326 |
+
|
| 327 |
+
A copy of the BSD 3-Clause License is included in this file.
|
| 328 |
+
|
| 329 |
+
The SciPy repository and source distributions bundle a number of libraries that
|
| 330 |
+
are compatibly licensed. We list these here.
|
| 331 |
+
|
| 332 |
+
Name: Numpydoc
|
| 333 |
+
Files: doc/sphinxext/numpydoc/*
|
| 334 |
+
License: 2-clause BSD
|
| 335 |
+
For details, see doc/sphinxext/LICENSE.txt
|
| 336 |
+
|
| 337 |
+
Name: scipy-sphinx-theme
|
| 338 |
+
Files: doc/scipy-sphinx-theme/*
|
| 339 |
+
License: 3-clause BSD, PSF and Apache 2.0
|
| 340 |
+
For details, see doc/sphinxext/LICENSE.txt
|
| 341 |
+
|
| 342 |
+
Name: Decorator
|
| 343 |
+
Files: scipy/_lib/decorator.py
|
| 344 |
+
License: 2-clause BSD
|
| 345 |
+
For details, see the header inside scipy/_lib/decorator.py
|
| 346 |
+
|
| 347 |
+
Name: ID
|
| 348 |
+
Files: scipy/linalg/src/id_dist/*
|
| 349 |
+
License: 3-clause BSD
|
| 350 |
+
For details, see scipy/linalg/src/id_dist/doc/doc.tex
|
| 351 |
+
|
| 352 |
+
Name: L-BFGS-B
|
| 353 |
+
Files: scipy/optimize/lbfgsb/*
|
| 354 |
+
License: BSD license
|
| 355 |
+
For details, see scipy/optimize/lbfgsb/README
|
| 356 |
+
|
| 357 |
+
Name: SuperLU
|
| 358 |
+
Files: scipy/sparse/linalg/dsolve/SuperLU/*
|
| 359 |
+
License: 3-clause BSD
|
| 360 |
+
For details, see scipy/sparse/linalg/dsolve/SuperLU/License.txt
|
| 361 |
+
|
| 362 |
+
Name: ARPACK
|
| 363 |
+
Files: scipy/sparse/linalg/eigen/arpack/ARPACK/*
|
| 364 |
+
License: 3-clause BSD
|
| 365 |
+
For details, see scipy/sparse/linalg/eigen/arpack/ARPACK/COPYING
|
| 366 |
+
|
| 367 |
+
Name: Qhull
|
| 368 |
+
Files: scipy/spatial/qhull/*
|
| 369 |
+
License: Qhull license (BSD-like)
|
| 370 |
+
For details, see scipy/spatial/qhull/COPYING.txt
|
| 371 |
+
|
| 372 |
+
Name: Cephes
|
| 373 |
+
Files: scipy/special/cephes/*
|
| 374 |
+
License: 3-clause BSD
|
| 375 |
+
Distributed under 3-clause BSD license with permission from the author,
|
| 376 |
+
see https://lists.debian.org/debian-legal/2004/12/msg00295.html
|
| 377 |
+
|
| 378 |
+
Cephes Math Library Release 2.8: June, 2000
|
| 379 |
+
Copyright 1984, 1995, 2000 by Stephen L. Moshier
|
| 380 |
+
|
| 381 |
+
This software is derived from the Cephes Math Library and is
|
| 382 |
+
incorporated herein by permission of the author.
|
| 383 |
+
|
| 384 |
+
All rights reserved.
|
| 385 |
+
|
| 386 |
+
Redistribution and use in source and binary forms, with or without
|
| 387 |
+
modification, are permitted provided that the following conditions are met:
|
| 388 |
+
* Redistributions of source code must retain the above copyright
|
| 389 |
+
notice, this list of conditions and the following disclaimer.
|
| 390 |
+
* Redistributions in binary form must reproduce the above copyright
|
| 391 |
+
notice, this list of conditions and the following disclaimer in the
|
| 392 |
+
documentation and/or other materials provided with the distribution.
|
| 393 |
+
* Neither the name of the <organization> nor the
|
| 394 |
+
names of its contributors may be used to endorse or promote products
|
| 395 |
+
derived from this software without specific prior written permission.
|
| 396 |
+
|
| 397 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 398 |
+
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 399 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 400 |
+
DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
|
| 401 |
+
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
| 402 |
+
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
| 403 |
+
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
|
| 404 |
+
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 405 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
| 406 |
+
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 407 |
+
|
| 408 |
+
Name: Faddeeva
|
| 409 |
+
Files: scipy/special/Faddeeva.*
|
| 410 |
+
License: MIT
|
| 411 |
+
Copyright (c) 2012 Massachusetts Institute of Technology
|
| 412 |
+
|
| 413 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
| 414 |
+
a copy of this software and associated documentation files (the
|
| 415 |
+
"Software"), to deal in the Software without restriction, including
|
| 416 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
| 417 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
| 418 |
+
permit persons to whom the Software is furnished to do so, subject to
|
| 419 |
+
the following conditions:
|
| 420 |
+
|
| 421 |
+
The above copyright notice and this permission notice shall be
|
| 422 |
+
included in all copies or substantial portions of the Software.
|
| 423 |
+
|
| 424 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 425 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 426 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 427 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
| 428 |
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
| 429 |
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
| 430 |
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 431 |
+
|
| 432 |
+
Name: qd
|
| 433 |
+
Files: scipy/special/cephes/dd_*.[ch]
|
| 434 |
+
License: modified BSD license ("BSD-LBNL-License.doc")
|
| 435 |
+
This work was supported by the Director, Office of Science, Division
|
| 436 |
+
of Mathematical, Information, and Computational Sciences of the
|
| 437 |
+
U.S. Department of Energy under contract numbers DE-AC03-76SF00098 and
|
| 438 |
+
DE-AC02-05CH11231.
|
| 439 |
+
|
| 440 |
+
Copyright (c) 2003-2009, The Regents of the University of California,
|
| 441 |
+
through Lawrence Berkeley National Laboratory (subject to receipt of
|
| 442 |
+
any required approvals from U.S. Dept. of Energy) All rights reserved.
|
| 443 |
+
|
| 444 |
+
1. Redistribution and use in source and binary forms, with or
|
| 445 |
+
without modification, are permitted provided that the following
|
| 446 |
+
conditions are met:
|
| 447 |
+
|
| 448 |
+
(1) Redistributions of source code must retain the copyright
|
| 449 |
+
notice, this list of conditions and the following disclaimer.
|
| 450 |
+
|
| 451 |
+
(2) Redistributions in binary form must reproduce the copyright
|
| 452 |
+
notice, this list of conditions and the following disclaimer in
|
| 453 |
+
the documentation and/or other materials provided with the
|
| 454 |
+
distribution.
|
| 455 |
+
|
| 456 |
+
(3) Neither the name of the University of California, Lawrence
|
| 457 |
+
Berkeley National Laboratory, U.S. Dept. of Energy nor the names
|
| 458 |
+
of its contributors may be used to endorse or promote products
|
| 459 |
+
derived from this software without specific prior written
|
| 460 |
+
permission.
|
| 461 |
+
|
| 462 |
+
2. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 463 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 464 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 465 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 466 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 467 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 468 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 469 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 470 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 471 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 472 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 473 |
+
|
| 474 |
+
3. You are under no obligation whatsoever to provide any bug fixes,
|
| 475 |
+
patches, or upgrades to the features, functionality or performance of
|
| 476 |
+
the source code ("Enhancements") to anyone; however, if you choose to
|
| 477 |
+
make your Enhancements available either publicly, or directly to
|
| 478 |
+
Lawrence Berkeley National Laboratory, without imposing a separate
|
| 479 |
+
written license agreement for such Enhancements, then you hereby grant
|
| 480 |
+
the following license: a non-exclusive, royalty-free perpetual license
|
| 481 |
+
to install, use, modify, prepare derivative works, incorporate into
|
| 482 |
+
other computer software, distribute, and sublicense such enhancements
|
| 483 |
+
or derivative works thereof, in binary and source code form.
|
| 484 |
+
|
| 485 |
+
Name: pypocketfft
|
| 486 |
+
Files: scipy/fft/_pocketfft/[pocketfft.h, pypocketfft.cxx]
|
| 487 |
+
License: 3-Clause BSD
|
| 488 |
+
For details, see scipy/fft/_pocketfft/LICENSE.md
|
| 489 |
+
|
| 490 |
+
Name: uarray
|
| 491 |
+
Files: scipy/_lib/uarray/*
|
| 492 |
+
License: 3-Clause BSD
|
| 493 |
+
For details, see scipy/_lib/uarray/LICENSE
|
| 494 |
+
|
| 495 |
+
Name: ampgo
|
| 496 |
+
Files: benchmarks/benchmarks/go_benchmark_functions/*.py
|
| 497 |
+
License: MIT
|
| 498 |
+
Functions for testing global optimizers, forked from the AMPGO project,
|
| 499 |
+
https://code.google.com/archive/p/ampgo
|
| 500 |
+
|
| 501 |
+
Name: pybind11
|
| 502 |
+
Files: no source files are included, however pybind11 binary artifacts are
|
| 503 |
+
included with every binary build of SciPy.
|
| 504 |
+
License:
|
| 505 |
+
Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>, All rights reserved.
|
| 506 |
+
|
| 507 |
+
Redistribution and use in source and binary forms, with or without
|
| 508 |
+
modification, are permitted provided that the following conditions are met:
|
| 509 |
+
|
| 510 |
+
1. Redistributions of source code must retain the above copyright notice, this
|
| 511 |
+
list of conditions and the following disclaimer.
|
| 512 |
+
|
| 513 |
+
2. Redistributions in binary form must reproduce the above copyright notice,
|
| 514 |
+
this list of conditions and the following disclaimer in the documentation
|
| 515 |
+
and/or other materials provided with the distribution.
|
| 516 |
+
|
| 517 |
+
3. Neither the name of the copyright holder nor the names of its contributors
|
| 518 |
+
may be used to endorse or promote products derived from this software
|
| 519 |
+
without specific prior written permission.
|
| 520 |
+
|
| 521 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
| 522 |
+
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
| 523 |
+
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
| 524 |
+
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
| 525 |
+
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
| 526 |
+
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
| 527 |
+
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
| 528 |
+
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
| 529 |
+
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 530 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
| 531 |
+
|
| 532 |
+
|
| 533 |
+
|
| 534 |
+
Open Source Software Licensed under the Specific License and Other Licenses of the Third-Party Components therein:
|
| 535 |
+
--------------------------------------------------------------------
|
| 536 |
+
1. matplotlib
|
| 537 |
+
Copyright (c)
|
| 538 |
+
2012- Matplotlib Development Team; All Rights Reserved
|
| 539 |
+
|
| 540 |
+
|
| 541 |
+
Terms of the Specific License:
|
| 542 |
+
--------------------------------------------------------------------
|
| 543 |
+
1. This LICENSE AGREEMENT is between the Matplotlib Development Team
|
| 544 |
+
("MDT"), and the Individual or Organization ("Licensee") accessing and
|
| 545 |
+
otherwise using matplotlib software in source or binary form and its
|
| 546 |
+
associated documentation.
|
| 547 |
+
|
| 548 |
+
2. Subject to the terms and conditions of this License Agreement, MDT
|
| 549 |
+
hereby grants Licensee a nonexclusive, royalty-free, world-wide license
|
| 550 |
+
to reproduce, analyze, test, perform and/or display publicly, prepare
|
| 551 |
+
derivative works, distribute, and otherwise use matplotlib
|
| 552 |
+
alone or in any derivative version, provided, however, that MDT's
|
| 553 |
+
License Agreement and MDT's notice of copyright, i.e., "Copyright (c)
|
| 554 |
+
2012- Matplotlib Development Team; All Rights Reserved" are retained in
|
| 555 |
+
matplotlib alone or in any derivative version prepared by
|
| 556 |
+
Licensee.
|
| 557 |
+
|
| 558 |
+
3. In the event Licensee prepares a derivative work that is based on or
|
| 559 |
+
incorporates matplotlib or any part thereof, and wants to
|
| 560 |
+
make the derivative work available to others as provided herein, then
|
| 561 |
+
Licensee hereby agrees to include in any such work a brief summary of
|
| 562 |
+
the changes made to matplotlib .
|
| 563 |
+
|
| 564 |
+
4. MDT is making matplotlib available to Licensee on an "AS
|
| 565 |
+
IS" basis. MDT MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
| 566 |
+
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, MDT MAKES NO AND
|
| 567 |
+
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
| 568 |
+
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF MATPLOTLIB
|
| 569 |
+
WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
|
| 570 |
+
|
| 571 |
+
5. MDT SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF MATPLOTLIB
|
| 572 |
+
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR
|
| 573 |
+
LOSS AS A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING
|
| 574 |
+
MATPLOTLIB , OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF
|
| 575 |
+
THE POSSIBILITY THEREOF.
|
| 576 |
+
|
| 577 |
+
6. This License Agreement will automatically terminate upon a material
|
| 578 |
+
breach of its terms and conditions.
|
| 579 |
+
|
| 580 |
+
7. Nothing in this License Agreement shall be deemed to create any
|
| 581 |
+
relationship of agency, partnership, or joint venture between MDT and
|
| 582 |
+
Licensee. This License Agreement does not grant permission to use MDT
|
| 583 |
+
trademarks or trade name in a trademark sense to endorse or promote
|
| 584 |
+
products or services of Licensee, or any third party.
|
| 585 |
+
|
| 586 |
+
8. By copying, installing or otherwise using matplotlib ,
|
| 587 |
+
Licensee agrees to be bound by the terms and conditions of this License
|
| 588 |
+
Agreement.
|
| 589 |
+
|
| 590 |
+
For third-party cmr10.pfb:
|
| 591 |
+
The cmr10.pfb file is a Type-1 version of one of Knuth's Computer Modern fonts.
|
| 592 |
+
It is included here as test data only, but the following license applies.
|
| 593 |
+
|
| 594 |
+
Copyright (c) 1997, 2009, American Mathematical Society (http://www.ams.org).
|
| 595 |
+
All Rights Reserved.
|
| 596 |
+
|
| 597 |
+
"cmb10" is a Reserved Font Name for this Font Software.
|
| 598 |
+
"cmbsy10" is a Reserved Font Name for this Font Software.
|
| 599 |
+
"cmbsy5" is a Reserved Font Name for this Font Software.
|
| 600 |
+
"cmbsy6" is a Reserved Font Name for this Font Software.
|
| 601 |
+
"cmbsy7" is a Reserved Font Name for this Font Software.
|
| 602 |
+
"cmbsy8" is a Reserved Font Name for this Font Software.
|
| 603 |
+
"cmbsy9" is a Reserved Font Name for this Font Software.
|
| 604 |
+
"cmbx10" is a Reserved Font Name for this Font Software.
|
| 605 |
+
"cmbx12" is a Reserved Font Name for this Font Software.
|
| 606 |
+
"cmbx5" is a Reserved Font Name for this Font Software.
|
| 607 |
+
"cmbx6" is a Reserved Font Name for this Font Software.
|
| 608 |
+
"cmbx7" is a Reserved Font Name for this Font Software.
|
| 609 |
+
"cmbx8" is a Reserved Font Name for this Font Software.
|
| 610 |
+
"cmbx9" is a Reserved Font Name for this Font Software.
|
| 611 |
+
"cmbxsl10" is a Reserved Font Name for this Font Software.
|
| 612 |
+
"cmbxti10" is a Reserved Font Name for this Font Software.
|
| 613 |
+
"cmcsc10" is a Reserved Font Name for this Font Software.
|
| 614 |
+
"cmcsc8" is a Reserved Font Name for this Font Software.
|
| 615 |
+
"cmcsc9" is a Reserved Font Name for this Font Software.
|
| 616 |
+
"cmdunh10" is a Reserved Font Name for this Font Software.
|
| 617 |
+
"cmex10" is a Reserved Font Name for this Font Software.
|
| 618 |
+
"cmex7" is a Reserved Font Name for this Font Software.
|
| 619 |
+
"cmex8" is a Reserved Font Name for this Font Software.
|
| 620 |
+
"cmex9" is a Reserved Font Name for this Font Software.
|
| 621 |
+
"cmff10" is a Reserved Font Name for this Font Software.
|
| 622 |
+
"cmfi10" is a Reserved Font Name for this Font Software.
|
| 623 |
+
"cmfib8" is a Reserved Font Name for this Font Software.
|
| 624 |
+
"cminch" is a Reserved Font Name for this Font Software.
|
| 625 |
+
"cmitt10" is a Reserved Font Name for this Font Software.
|
| 626 |
+
"cmmi10" is a Reserved Font Name for this Font Software.
|
| 627 |
+
"cmmi12" is a Reserved Font Name for this Font Software.
|
| 628 |
+
"cmmi5" is a Reserved Font Name for this Font Software.
|
| 629 |
+
"cmmi6" is a Reserved Font Name for this Font Software.
|
| 630 |
+
"cmmi7" is a Reserved Font Name for this Font Software.
|
| 631 |
+
"cmmi8" is a Reserved Font Name for this Font Software.
|
| 632 |
+
"cmmi9" is a Reserved Font Name for this Font Software.
|
| 633 |
+
"cmmib10" is a Reserved Font Name for this Font Software.
|
| 634 |
+
"cmmib5" is a Reserved Font Name for this Font Software.
|
| 635 |
+
"cmmib6" is a Reserved Font Name for this Font Software.
|
| 636 |
+
"cmmib7" is a Reserved Font Name for this Font Software.
|
| 637 |
+
"cmmib8" is a Reserved Font Name for this Font Software.
|
| 638 |
+
"cmmib9" is a Reserved Font Name for this Font Software.
|
| 639 |
+
"cmr10" is a Reserved Font Name for this Font Software.
|
| 640 |
+
"cmr12" is a Reserved Font Name for this Font Software.
|
| 641 |
+
"cmr17" is a Reserved Font Name for this Font Software.
|
| 642 |
+
"cmr5" is a Reserved Font Name for this Font Software.
|
| 643 |
+
"cmr6" is a Reserved Font Name for this Font Software.
|
| 644 |
+
"cmr7" is a Reserved Font Name for this Font Software.
|
| 645 |
+
"cmr8" is a Reserved Font Name for this Font Software.
|
| 646 |
+
"cmr9" is a Reserved Font Name for this Font Software.
|
| 647 |
+
"cmsl10" is a Reserved Font Name for this Font Software.
|
| 648 |
+
"cmsl12" is a Reserved Font Name for this Font Software.
|
| 649 |
+
"cmsl8" is a Reserved Font Name for this Font Software.
|
| 650 |
+
"cmsl9" is a Reserved Font Name for this Font Software.
|
| 651 |
+
"cmsltt10" is a Reserved Font Name for this Font Software.
|
| 652 |
+
"cmss10" is a Reserved Font Name for this Font Software.
|
| 653 |
+
"cmss12" is a Reserved Font Name for this Font Software.
|
| 654 |
+
"cmss17" is a Reserved Font Name for this Font Software.
|
| 655 |
+
"cmss8" is a Reserved Font Name for this Font Software.
|
| 656 |
+
"cmss9" is a Reserved Font Name for this Font Software.
|
| 657 |
+
"cmssbx10" is a Reserved Font Name for this Font Software.
|
| 658 |
+
"cmssdc10" is a Reserved Font Name for this Font Software.
|
| 659 |
+
"cmssi10" is a Reserved Font Name for this Font Software.
|
| 660 |
+
"cmssi12" is a Reserved Font Name for this Font Software.
|
| 661 |
+
"cmssi17" is a Reserved Font Name for this Font Software.
|
| 662 |
+
"cmssi8" is a Reserved Font Name for this Font Software.
|
| 663 |
+
"cmssi9" is a Reserved Font Name for this Font Software.
|
| 664 |
+
"cmssq8" is a Reserved Font Name for this Font Software.
|
| 665 |
+
"cmssqi8" is a Reserved Font Name for this Font Software.
|
| 666 |
+
"cmsy10" is a Reserved Font Name for this Font Software.
|
| 667 |
+
"cmsy5" is a Reserved Font Name for this Font Software.
|
| 668 |
+
"cmsy6" is a Reserved Font Name for this Font Software.
|
| 669 |
+
"cmsy7" is a Reserved Font Name for this Font Software.
|
| 670 |
+
"cmsy8" is a Reserved Font Name for this Font Software.
|
| 671 |
+
"cmsy9" is a Reserved Font Name for this Font Software.
|
| 672 |
+
"cmtcsc10" is a Reserved Font Name for this Font Software.
|
| 673 |
+
"cmtex10" is a Reserved Font Name for this Font Software.
|
| 674 |
+
"cmtex8" is a Reserved Font Name for this Font Software.
|
| 675 |
+
"cmtex9" is a Reserved Font Name for this Font Software.
|
| 676 |
+
"cmti10" is a Reserved Font Name for this Font Software.
|
| 677 |
+
"cmti12" is a Reserved Font Name for this Font Software.
|
| 678 |
+
"cmti7" is a Reserved Font Name for this Font Software.
|
| 679 |
+
"cmti8" is a Reserved Font Name for this Font Software.
|
| 680 |
+
"cmti9" is a Reserved Font Name for this Font Software.
|
| 681 |
+
"cmtt10" is a Reserved Font Name for this Font Software.
|
| 682 |
+
"cmtt12" is a Reserved Font Name for this Font Software.
|
| 683 |
+
"cmtt8" is a Reserved Font Name for this Font Software.
|
| 684 |
+
"cmtt9" is a Reserved Font Name for this Font Software.
|
| 685 |
+
"cmu10" is a Reserved Font Name for this Font Software.
|
| 686 |
+
"cmvtt10" is a Reserved Font Name for this Font Software.
|
| 687 |
+
"euex10" is a Reserved Font Name for this Font Software.
|
| 688 |
+
"euex7" is a Reserved Font Name for this Font Software.
|
| 689 |
+
"euex8" is a Reserved Font Name for this Font Software.
|
| 690 |
+
"euex9" is a Reserved Font Name for this Font Software.
|
| 691 |
+
"eufb10" is a Reserved Font Name for this Font Software.
|
| 692 |
+
"eufb5" is a Reserved Font Name for this Font Software.
|
| 693 |
+
"eufb7" is a Reserved Font Name for this Font Software.
|
| 694 |
+
"eufm10" is a Reserved Font Name for this Font Software.
|
| 695 |
+
"eufm5" is a Reserved Font Name for this Font Software.
|
| 696 |
+
"eufm7" is a Reserved Font Name for this Font Software.
|
| 697 |
+
"eurb10" is a Reserved Font Name for this Font Software.
|
| 698 |
+
"eurb5" is a Reserved Font Name for this Font Software.
|
| 699 |
+
"eurb7" is a Reserved Font Name for this Font Software.
|
| 700 |
+
"eurm10" is a Reserved Font Name for this Font Software.
|
| 701 |
+
"eurm5" is a Reserved Font Name for this Font Software.
|
| 702 |
+
"eurm7" is a Reserved Font Name for this Font Software.
|
| 703 |
+
"eusb10" is a Reserved Font Name for this Font Software.
|
| 704 |
+
"eusb5" is a Reserved Font Name for this Font Software.
|
| 705 |
+
"eusb7" is a Reserved Font Name for this Font Software.
|
| 706 |
+
"eusm10" is a Reserved Font Name for this Font Software.
|
| 707 |
+
"eusm5" is a Reserved Font Name for this Font Software.
|
| 708 |
+
"eusm7" is a Reserved Font Name for this Font Software.
|
| 709 |
+
"lasy10" is a Reserved Font Name for this Font Software.
|
| 710 |
+
"lasy5" is a Reserved Font Name for this Font Software.
|
| 711 |
+
"lasy6" is a Reserved Font Name for this Font Software.
|
| 712 |
+
"lasy7" is a Reserved Font Name for this Font Software.
|
| 713 |
+
"lasy8" is a Reserved Font Name for this Font Software.
|
| 714 |
+
"lasy9" is a Reserved Font Name for this Font Software.
|
| 715 |
+
"lasyb10" is a Reserved Font Name for this Font Software.
|
| 716 |
+
"lcircle1" is a Reserved Font Name for this Font Software.
|
| 717 |
+
"lcirclew" is a Reserved Font Name for this Font Software.
|
| 718 |
+
"lcmss8" is a Reserved Font Name for this Font Software.
|
| 719 |
+
"lcmssb8" is a Reserved Font Name for this Font Software.
|
| 720 |
+
"lcmssi8" is a Reserved Font Name for this Font Software.
|
| 721 |
+
"line10" is a Reserved Font Name for this Font Software.
|
| 722 |
+
"linew10" is a Reserved Font Name for this Font Software.
|
| 723 |
+
"msam10" is a Reserved Font Name for this Font Software.
|
| 724 |
+
"msam5" is a Reserved Font Name for this Font Software.
|
| 725 |
+
"msam6" is a Reserved Font Name for this Font Software.
|
| 726 |
+
"msam7" is a Reserved Font Name for this Font Software.
|
| 727 |
+
"msam8" is a Reserved Font Name for this Font Software.
|
| 728 |
+
"msam9" is a Reserved Font Name for this Font Software.
|
| 729 |
+
"msbm10" is a Reserved Font Name for this Font Software.
|
| 730 |
+
"msbm5" is a Reserved Font Name for this Font Software.
|
| 731 |
+
"msbm6" is a Reserved Font Name for this Font Software.
|
| 732 |
+
"msbm7" is a Reserved Font Name for this Font Software.
|
| 733 |
+
"msbm8" is a Reserved Font Name for this Font Software.
|
| 734 |
+
"msbm9" is a Reserved Font Name for this Font Software.
|
| 735 |
+
"wncyb10" is a Reserved Font Name for this Font Software.
|
| 736 |
+
"wncyi10" is a Reserved Font Name for this Font Software.
|
| 737 |
+
"wncyr10" is a Reserved Font Name for this Font Software.
|
| 738 |
+
"wncysc10" is a Reserved Font Name for this Font Software.
|
| 739 |
+
"wncyss10" is a Reserved Font Name for this Font Software.
|
| 740 |
+
|
| 741 |
+
This Font Software is licensed under the SIL Open Font License, Version 1.1.
|
| 742 |
+
This license is copied below, and is also available with a FAQ at:
|
| 743 |
+
http://scripts.sil.org/OFL
|
| 744 |
+
|
| 745 |
+
-----------------------------------------------------------
|
| 746 |
+
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
|
| 747 |
+
-----------------------------------------------------------
|
| 748 |
+
|
| 749 |
+
PREAMBLE
|
| 750 |
+
The goals of the Open Font License (OFL) are to stimulate worldwide
|
| 751 |
+
development of collaborative font projects, to support the font creation
|
| 752 |
+
efforts of academic and linguistic communities, and to provide a free and
|
| 753 |
+
open framework in which fonts may be shared and improved in partnership
|
| 754 |
+
with others.
|
| 755 |
+
|
| 756 |
+
The OFL allows the licensed fonts to be used, studied, modified and
|
| 757 |
+
redistributed freely as long as they are not sold by themselves. The
|
| 758 |
+
fonts, including any derivative works, can be bundled, embedded,
|
| 759 |
+
redistributed and/or sold with any software provided that any reserved
|
| 760 |
+
names are not used by derivative works. The fonts and derivatives,
|
| 761 |
+
however, cannot be released under any other type of license. The
|
| 762 |
+
requirement for fonts to remain under this license does not apply
|
| 763 |
+
to any document created using the fonts or their derivatives.
|
| 764 |
+
|
| 765 |
+
DEFINITIONS
|
| 766 |
+
"Font Software" refers to the set of files released by the Copyright
|
| 767 |
+
Holder(s) under this license and clearly marked as such. This may
|
| 768 |
+
include source files, build scripts and documentation.
|
| 769 |
+
|
| 770 |
+
"Reserved Font Name" refers to any names specified as such after the
|
| 771 |
+
copyright statement(s).
|
| 772 |
+
|
| 773 |
+
"Original Version" refers to the collection of Font Software components as
|
| 774 |
+
distributed by the Copyright Holder(s).
|
| 775 |
+
|
| 776 |
+
"Modified Version" refers to any derivative made by adding to, deleting,
|
| 777 |
+
or substituting -- in part or in whole -- any of the components of the
|
| 778 |
+
Original Version, by changing formats or by porting the Font Software to a
|
| 779 |
+
new environment.
|
| 780 |
+
|
| 781 |
+
"Author" refers to any designer, engineer, programmer, technical
|
| 782 |
+
writer or other person who contributed to the Font Software.
|
| 783 |
+
|
| 784 |
+
PERMISSION & CONDITIONS
|
| 785 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
| 786 |
+
a copy of the Font Software, to use, study, copy, merge, embed, modify,
|
| 787 |
+
redistribute, and sell modified and unmodified copies of the Font
|
| 788 |
+
Software, subject to the following conditions:
|
| 789 |
+
|
| 790 |
+
1) Neither the Font Software nor any of its individual components,
|
| 791 |
+
in Original or Modified Versions, may be sold by itself.
|
| 792 |
+
|
| 793 |
+
2) Original or Modified Versions of the Font Software may be bundled,
|
| 794 |
+
redistributed and/or sold with any software, provided that each copy
|
| 795 |
+
contains the above copyright notice and this license. These can be
|
| 796 |
+
included either as stand-alone text files, human-readable headers or
|
| 797 |
+
in the appropriate machine-readable metadata fields within text or
|
| 798 |
+
binary files as long as those fields can be easily viewed by the user.
|
| 799 |
+
|
| 800 |
+
3) No Modified Version of the Font Software may use the Reserved Font
|
| 801 |
+
Name(s) unless explicit written permission is granted by the corresponding
|
| 802 |
+
Copyright Holder. This restriction only applies to the primary font name as
|
| 803 |
+
presented to the users.
|
| 804 |
+
|
| 805 |
+
4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font
|
| 806 |
+
Software shall not be used to promote, endorse or advertise any
|
| 807 |
+
Modified Version, except to acknowledge the contribution(s) of the
|
| 808 |
+
Copyright Holder(s) and the Author(s) or with their explicit written
|
| 809 |
+
permission.
|
| 810 |
+
|
| 811 |
+
5) The Font Software, modified or unmodified, in part or in whole,
|
| 812 |
+
must be distributed entirely under this license, and must not be
|
| 813 |
+
distributed under any other license. The requirement for fonts to
|
| 814 |
+
remain under this license does not apply to any document created
|
| 815 |
+
using the Font Software.
|
| 816 |
+
|
| 817 |
+
TERMINATION
|
| 818 |
+
This license becomes null and void if any of the above conditions are
|
| 819 |
+
not met.
|
| 820 |
+
|
| 821 |
+
DISCLAIMER
|
| 822 |
+
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 823 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
|
| 824 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
|
| 825 |
+
OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE
|
| 826 |
+
COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
| 827 |
+
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
|
| 828 |
+
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 829 |
+
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
|
| 830 |
+
OTHER DEALINGS IN THE FONT SOFTWARE.
|
| 831 |
+
|
| 832 |
+
For third-party BaKoMa:
|
| 833 |
+
BaKoMa Fonts Licence
|
| 834 |
+
--------------------
|
| 835 |
+
|
| 836 |
+
This licence covers two font packs (known as BaKoMa Fonts Colelction,
|
| 837 |
+
which is available at `CTAN:fonts/cm/ps-type1/bakoma/'):
|
| 838 |
+
|
| 839 |
+
1) BaKoMa-CM (1.1/12-Nov-94)
|
| 840 |
+
Computer Modern Fonts in PostScript Type 1 and TrueType font formats.
|
| 841 |
+
|
| 842 |
+
2) BaKoMa-AMS (1.2/19-Jan-95)
|
| 843 |
+
AMS TeX fonts in PostScript Type 1 and TrueType font formats.
|
| 844 |
+
|
| 845 |
+
Copyright (C) 1994, 1995, Basil K. Malyshev. All Rights Reserved.
|
| 846 |
+
|
| 847 |
+
Permission to copy and distribute these fonts for any purpose is
|
| 848 |
+
hereby granted without fee, provided that the above copyright notice,
|
| 849 |
+
author statement and this permission notice appear in all copies of
|
| 850 |
+
these fonts and related documentation.
|
| 851 |
+
|
| 852 |
+
Permission to modify and distribute modified fonts for any purpose is
|
| 853 |
+
hereby granted without fee, provided that the copyright notice,
|
| 854 |
+
author statement, this permission notice and location of original
|
| 855 |
+
fonts (http://www.ctan.org/tex-archive/fonts/cm/ps-type1/bakoma)
|
| 856 |
+
appear in all copies of modified fonts and related documentation.
|
| 857 |
+
|
| 858 |
+
Permission to use these fonts (embedding into PostScript, PDF, SVG
|
| 859 |
+
and printing by using any software) is hereby granted without fee.
|
| 860 |
+
It is not required to provide any notices about using these fonts.
|
| 861 |
+
|
| 862 |
+
Basil K. Malyshev
|
| 863 |
+
INSTITUTE FOR HIGH ENERGY PHYSICS
|
| 864 |
+
IHEP, OMVT
|
| 865 |
+
Moscow Region
|
| 866 |
+
142281 PROTVINO
|
| 867 |
+
RUSSIA
|
| 868 |
+
|
| 869 |
+
E-Mail: bakoma@mail.ru
|
| 870 |
+
or malyshev@mail.ihep.ru
|
| 871 |
+
|
| 872 |
+
For thirdparty carlogo:
|
| 873 |
+
----> we renamed carlito -> carlogo to comply with the terms <----
|
| 874 |
+
|
| 875 |
+
Copyright (c) 2010-2013 by tyPoland Lukasz Dziedzic with Reserved Font Name "Carlito".
|
| 876 |
+
|
| 877 |
+
This Font Software is licensed under the SIL Open Font License, Version 1.1.
|
| 878 |
+
This license is copied below, and is also available with a FAQ at: http://scripts.sil.org/OFL
|
| 879 |
+
|
| 880 |
+
-----------------------------------------------------------
|
| 881 |
+
SIL OPEN FONT LICENSE Version 1.1 - 26 February 2007
|
| 882 |
+
-----------------------------------------------------------
|
| 883 |
+
|
| 884 |
+
PREAMBLE
|
| 885 |
+
The goals of the Open Font License (OFL) are to stimulate worldwide development of collaborative font projects, to support the font creation efforts of academic and linguistic communities, and to provide a free and open framework in which fonts may be shared and improved in partnership with others.
|
| 886 |
+
|
| 887 |
+
The OFL allows the licensed fonts to be used, studied, modified and redistributed freely as long as they are not sold by themselves. The fonts, including any derivative works, can be bundled, embedded, redistributed and/or sold with any software provided that any reserved names are not used by derivative works. The fonts and derivatives, however, cannot be released under any other type of license. The requirement for fonts to remain under this license does not apply to any document created using the fonts or their derivatives.
|
| 888 |
+
|
| 889 |
+
DEFINITIONS
|
| 890 |
+
"Font Software" refers to the set of files released by the Copyright Holder(s) under this license and clearly marked as such. This may include source files, build scripts and documentation.
|
| 891 |
+
|
| 892 |
+
"Reserved Font Name" refers to any names specified as such after the copyright statement(s).
|
| 893 |
+
|
| 894 |
+
"Original Version" refers to the collection of Font Software components as distributed by the Copyright Holder(s).
|
| 895 |
+
|
| 896 |
+
"Modified Version" refers to any derivative made by adding to, deleting, or substituting -- in part or in whole -- any of the components of the Original Version, by changing formats or by porting the Font Software to a new environment.
|
| 897 |
+
|
| 898 |
+
"Author" refers to any designer, engineer, programmer, technical writer or other person who contributed to the Font Software.
|
| 899 |
+
|
| 900 |
+
PERMISSION & CONDITIONS
|
| 901 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of the Font Software, to use, study, copy, merge, embed, modify, redistribute, and sell modified and unmodified copies of the Font Software, subject to the following conditions:
|
| 902 |
+
|
| 903 |
+
1) Neither the Font Software nor any of its individual components, in Original or Modified Versions, may be sold by itself.
|
| 904 |
+
|
| 905 |
+
2) Original or Modified Versions of the Font Software may be bundled, redistributed and/or sold with any software, provided that each copy contains the above copyright notice and this license. These can be included either as stand-alone text files, human-readable headers or in the appropriate machine-readable metadata fields within text or binary files as long as those fields can be easily viewed by the user.
|
| 906 |
+
|
| 907 |
+
3) No Modified Version of the Font Software may use the Reserved Font Name(s) unless explicit written permission is granted by the corresponding Copyright Holder. This restriction only applies to the primary font name as presented to the users.
|
| 908 |
+
|
| 909 |
+
4) The name(s) of the Copyright Holder(s) or the Author(s) of the Font Software shall not be used to promote, endorse or advertise any Modified Version, except to acknowledge the contribution(s) of the Copyright Holder(s) and the Author(s) or with their explicit written permission.
|
| 910 |
+
|
| 911 |
+
5) The Font Software, modified or unmodified, in part or in whole, must be distributed entirely under this license, and must not be distributed under any other license. The requirement for fonts to remain under this license does not apply to any document created using the Font Software.
|
| 912 |
+
|
| 913 |
+
TERMINATION
|
| 914 |
+
This license becomes null and void if any of the above conditions are not met.
|
| 915 |
+
|
| 916 |
+
DISCLAIMER
|
| 917 |
+
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL THE COPYRIGHT HOLDER BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT SOFTWARE.
|
| 918 |
+
|
| 919 |
+
For third-party ColorBrewer Color Schemes:
|
| 920 |
+
Apache-Style Software License for ColorBrewer Color Schemes
|
| 921 |
+
|
| 922 |
+
Version 1.1
|
| 923 |
+
|
| 924 |
+
Copyright (c) 2002 Cynthia Brewer, Mark Harrower, and The Pennsylvania
|
| 925 |
+
State University. All rights reserved. Redistribution and use in source
|
| 926 |
+
and binary forms, with or without modification, are permitted provided
|
| 927 |
+
that the following conditions are met:
|
| 928 |
+
|
| 929 |
+
1. Redistributions as source code must retain the above copyright notice,
|
| 930 |
+
this list of conditions and the following disclaimer.
|
| 931 |
+
|
| 932 |
+
2. The end-user documentation included with the redistribution, if any,
|
| 933 |
+
must include the following acknowledgment: "This product includes color
|
| 934 |
+
specifications and designs developed by Cynthia Brewer
|
| 935 |
+
(http://colorbrewer.org/)." Alternately, this acknowledgment may appear in
|
| 936 |
+
the software itself, if and wherever such third-party acknowledgments
|
| 937 |
+
normally appear.
|
| 938 |
+
|
| 939 |
+
3. The name "ColorBrewer" must not be used to endorse or promote products
|
| 940 |
+
derived from this software without prior written permission. For written
|
| 941 |
+
permission, please contact Cynthia Brewer at cbrewer@psu.edu.
|
| 942 |
+
|
| 943 |
+
4. Products derived from this software may not be called "ColorBrewer",
|
| 944 |
+
nor may "ColorBrewer" appear in their name, without prior written
|
| 945 |
+
permission of Cynthia Brewer.
|
| 946 |
+
|
| 947 |
+
THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESSED OR IMPLIED WARRANTIES,
|
| 948 |
+
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
|
| 949 |
+
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
|
| 950 |
+
CYNTHIA BREWER, MARK HARROWER, OR THE PENNSYLVANIA STATE UNIVERSITY BE
|
| 951 |
+
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
| 952 |
+
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
| 953 |
+
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
|
| 954 |
+
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
|
| 955 |
+
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
|
| 956 |
+
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
|
| 957 |
+
POSSIBILITY OF SUCH DAMAGE.
|
| 958 |
+
|
| 959 |
+
For third-party JSXTOOLS_RESIZE_OBSERVER:
|
| 960 |
+
# CC0 1.0 Universal
|
| 961 |
+
|
| 962 |
+
## Statement of Purpose
|
| 963 |
+
|
| 964 |
+
The laws of most jurisdictions throughout the world automatically confer
|
| 965 |
+
exclusive Copyright and Related Rights (defined below) upon the creator and
|
| 966 |
+
subsequent owner(s) (each and all, an “owner”) of an original work of
|
| 967 |
+
authorship and/or a database (each, a “Work”).
|
| 968 |
+
|
| 969 |
+
Certain owners wish to permanently relinquish those rights to a Work for the
|
| 970 |
+
purpose of contributing to a commons of creative, cultural and scientific works
|
| 971 |
+
(“Commons”) that the public can reliably and without fear of later claims of
|
| 972 |
+
infringement build upon, modify, incorporate in other works, reuse and
|
| 973 |
+
redistribute as freely as possible in any form whatsoever and for any purposes,
|
| 974 |
+
including without limitation commercial purposes. These owners may contribute
|
| 975 |
+
to the Commons to promote the ideal of a free culture and the further
|
| 976 |
+
production of creative, cultural and scientific works, or to gain reputation or
|
| 977 |
+
greater distribution for their Work in part through the use and efforts of
|
| 978 |
+
others.
|
| 979 |
+
|
| 980 |
+
For these and/or other purposes and motivations, and without any expectation of
|
| 981 |
+
additional consideration or compensation, the person associating CC0 with a
|
| 982 |
+
Work (the “Affirmer”), to the extent that he or she is an owner of Copyright
|
| 983 |
+
and Related Rights in the Work, voluntarily elects to apply CC0 to the Work and
|
| 984 |
+
publicly distribute the Work under its terms, with knowledge of his or her
|
| 985 |
+
Copyright and Related Rights in the Work and the meaning and intended legal
|
| 986 |
+
effect of CC0 on those rights.
|
| 987 |
+
|
| 988 |
+
1. Copyright and Related Rights. A Work made available under CC0 may be
|
| 989 |
+
protected by copyright and related or neighboring rights (“Copyright and
|
| 990 |
+
Related Rights”). Copyright and Related Rights include, but are not limited
|
| 991 |
+
to, the following:
|
| 992 |
+
1. the right to reproduce, adapt, distribute, perform, display, communicate,
|
| 993 |
+
and translate a Work;
|
| 994 |
+
2. moral rights retained by the original author(s) and/or performer(s);
|
| 995 |
+
3. publicity and privacy rights pertaining to a person’s image or likeness
|
| 996 |
+
depicted in a Work;
|
| 997 |
+
4. rights protecting against unfair competition in regards to a Work,
|
| 998 |
+
subject to the limitations in paragraph 4(i), below;
|
| 999 |
+
5. rights protecting the extraction, dissemination, use and reuse of data in
|
| 1000 |
+
a Work;
|
| 1001 |
+
6. database rights (such as those arising under Directive 96/9/EC of the
|
| 1002 |
+
European Parliament and of the Council of 11 March 1996 on the legal
|
| 1003 |
+
protection of databases, and under any national implementation thereof,
|
| 1004 |
+
including any amended or successor version of such directive); and
|
| 1005 |
+
7. other similar, equivalent or corresponding rights throughout the world
|
| 1006 |
+
based on applicable law or treaty, and any national implementations
|
| 1007 |
+
thereof.
|
| 1008 |
+
|
| 1009 |
+
2. Waiver. To the greatest extent permitted by, but not in contravention of,
|
| 1010 |
+
applicable law, Affirmer hereby overtly, fully, permanently, irrevocably and
|
| 1011 |
+
unconditionally waives, abandons, and surrenders all of Affirmer’s Copyright
|
| 1012 |
+
and Related Rights and associated claims and causes of action, whether now
|
| 1013 |
+
known or unknown (including existing as well as future claims and causes of
|
| 1014 |
+
action), in the Work (i) in all territories worldwide, (ii) for the maximum
|
| 1015 |
+
duration provided by applicable law or treaty (including future time
|
| 1016 |
+
extensions), (iii) in any current or future medium and for any number of
|
| 1017 |
+
copies, and (iv) for any purpose whatsoever, including without limitation
|
| 1018 |
+
commercial, advertising or promotional purposes (the “Waiver”). Affirmer
|
| 1019 |
+
makes the Waiver for the benefit of each member of the public at large and
|
| 1020 |
+
to the detriment of Affirmer’s heirs and successors, fully intending that
|
| 1021 |
+
such Waiver shall not be subject to revocation, rescission, cancellation,
|
| 1022 |
+
termination, or any other legal or equitable action to disrupt the quiet
|
| 1023 |
+
enjoyment of the Work by the public as contemplated by Affirmer’s express
|
| 1024 |
+
Statement of Purpose.
|
| 1025 |
+
|
| 1026 |
+
3. Public License Fallback. Should any part of the Waiver for any reason be
|
| 1027 |
+
judged legally invalid or ineffective under applicable law, then the Waiver
|
| 1028 |
+
shall be preserved to the maximum extent permitted taking into account
|
| 1029 |
+
Affirmer’s express Statement of Purpose. In addition, to the extent the
|
| 1030 |
+
Waiver is so judged Affirmer hereby grants to each affected person a
|
| 1031 |
+
royalty-free, non transferable, non sublicensable, non exclusive,
|
| 1032 |
+
irrevocable and unconditional license to exercise Affirmer’s Copyright and
|
| 1033 |
+
Related Rights in the Work (i) in all territories worldwide, (ii) for the
|
| 1034 |
+
maximum duration provided by applicable law or treaty (including future time
|
| 1035 |
+
extensions), (iii) in any current or future medium and for any number of
|
| 1036 |
+
copies, and (iv) for any purpose whatsoever, including without limitation
|
| 1037 |
+
commercial, advertising or promotional purposes (the “License”). The License
|
| 1038 |
+
shall be deemed effective as of the date CC0 was applied by Affirmer to the
|
| 1039 |
+
Work. Should any part of the License for any reason be judged legally
|
| 1040 |
+
invalid or ineffective under applicable law, such partial invalidity or
|
| 1041 |
+
ineffectiveness shall not invalidate the remainder of the License, and in
|
| 1042 |
+
such case Affirmer hereby affirms that he or she will not (i) exercise any
|
| 1043 |
+
of his or her remaining Copyright and Related Rights in the Work or (ii)
|
| 1044 |
+
assert any associated claims and causes of action with respect to the Work,
|
| 1045 |
+
in either case contrary to Affirmer’s express Statement of Purpose.
|
| 1046 |
+
|
| 1047 |
+
4. Limitations and Disclaimers.
|
| 1048 |
+
1. No trademark or patent rights held by Affirmer are waived, abandoned,
|
| 1049 |
+
surrendered, licensed or otherwise affected by this document.
|
| 1050 |
+
2. Affirmer offers the Work as-is and makes no representations or warranties
|
| 1051 |
+
of any kind concerning the Work, express, implied, statutory or
|
| 1052 |
+
otherwise, including without limitation warranties of title,
|
| 1053 |
+
merchantability, fitness for a particular purpose, non infringement, or
|
| 1054 |
+
the absence of latent or other defects, accuracy, or the present or
|
| 1055 |
+
absence of errors, whether or not discoverable, all to the greatest
|
| 1056 |
+
extent permissible under applicable law.
|
| 1057 |
+
3. Affirmer disclaims responsibility for clearing rights of other persons
|
| 1058 |
+
that may apply to the Work or any use thereof, including without
|
| 1059 |
+
limitation any person’s Copyright and Related Rights in the Work.
|
| 1060 |
+
Further, Affirmer disclaims responsibility for obtaining any necessary
|
| 1061 |
+
consents, permissions or other rights required for any use of the Work.
|
| 1062 |
+
4. Affirmer understands and acknowledges that Creative Commons is not a
|
| 1063 |
+
party to this document and has no duty or obligation with respect to this
|
| 1064 |
+
CC0 or use of the Work.
|
| 1065 |
+
|
| 1066 |
+
For more information, please see
|
| 1067 |
+
http://creativecommons.org/publicdomain/zero/1.0/.
|
| 1068 |
+
|
| 1069 |
+
For third-party QT4_EDITOR:
|
| 1070 |
+
Module creating PyQt4 form dialogs/layouts to edit various type of parameters
|
| 1071 |
+
|
| 1072 |
+
|
| 1073 |
+
formlayout License Agreement (MIT License)
|
| 1074 |
+
------------------------------------------
|
| 1075 |
+
|
| 1076 |
+
Copyright (c) 2009 Pierre Raybaut
|
| 1077 |
+
|
| 1078 |
+
Permission is hereby granted, free of charge, to any person
|
| 1079 |
+
obtaining a copy of this software and associated documentation
|
| 1080 |
+
files (the "Software"), to deal in the Software without
|
| 1081 |
+
restriction, including without limitation the rights to use,
|
| 1082 |
+
copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 1083 |
+
copies of the Software, and to permit persons to whom the
|
| 1084 |
+
Software is furnished to do so, subject to the following
|
| 1085 |
+
conditions:
|
| 1086 |
+
|
| 1087 |
+
The above copyright notice and this permission notice shall be
|
| 1088 |
+
included in all copies or substantial portions of the Software.
|
| 1089 |
+
|
| 1090 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 1091 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
| 1092 |
+
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 1093 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
| 1094 |
+
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
| 1095 |
+
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
| 1096 |
+
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
| 1097 |
+
OTHER DEALINGS IN THE SOFTWARE.
|
| 1098 |
+
"""
|
| 1099 |
+
|
| 1100 |
+
For third-party SOLARIZED:
|
| 1101 |
+
https://github.com/altercation/solarized/blob/master/LICENSE
|
| 1102 |
+
Copyright (c) 2011 Ethan Schoonover
|
| 1103 |
+
|
| 1104 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
| 1105 |
+
of this software and associated documentation files (the "Software"), to deal
|
| 1106 |
+
in the Software without restriction, including without limitation the rights
|
| 1107 |
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
| 1108 |
+
copies of the Software, and to permit persons to whom the Software is
|
| 1109 |
+
furnished to do so, subject to the following conditions:
|
| 1110 |
+
|
| 1111 |
+
The above copyright notice and this permission notice shall be included in
|
| 1112 |
+
all copies or substantial portions of the Software.
|
| 1113 |
+
|
| 1114 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
| 1115 |
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
| 1116 |
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
| 1117 |
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
| 1118 |
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
| 1119 |
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
| 1120 |
+
THE SOFTWARE.
|
| 1121 |
+
|
| 1122 |
+
For third-party STIX:
|
| 1123 |
+
TERMS AND CONDITIONS
|
| 1124 |
+
|
| 1125 |
+
1. Permission is hereby granted, free of charge, to any person
|
| 1126 |
+
obtaining a copy of the STIX Fonts-TM set accompanying this license
|
| 1127 |
+
(collectively, the "Fonts") and the associated documentation files
|
| 1128 |
+
(collectively with the Fonts, the "Font Software"), to reproduce and
|
| 1129 |
+
distribute the Font Software, including the rights to use, copy, merge
|
| 1130 |
+
and publish copies of the Font Software, and to permit persons to whom
|
| 1131 |
+
the Font Software is furnished to do so same, subject to the following
|
| 1132 |
+
terms and conditions (the "License").
|
| 1133 |
+
|
| 1134 |
+
2. The following copyright and trademark notice and these Terms and
|
| 1135 |
+
Conditions shall be included in all copies of one or more of the Font
|
| 1136 |
+
typefaces and any derivative work created as permitted under this
|
| 1137 |
+
License:
|
| 1138 |
+
|
| 1139 |
+
Copyright (c) 2001-2005 by the STI Pub Companies, consisting of
|
| 1140 |
+
the American Institute of Physics, the American Chemical Society, the
|
| 1141 |
+
American Mathematical Society, the American Physical Society, Elsevier,
|
| 1142 |
+
Inc., and The Institute of Electrical and Electronic Engineers, Inc.
|
| 1143 |
+
Portions copyright (c) 1998-2003 by MicroPress, Inc. Portions copyright
|
| 1144 |
+
(c) 1990 by Elsevier, Inc. All rights reserved. STIX Fonts-TM is a
|
| 1145 |
+
trademark of The Institute of Electrical and Electronics Engineers, Inc.
|
| 1146 |
+
|
| 1147 |
+
3. You may (a) convert the Fonts from one format to another (e.g.,
|
| 1148 |
+
from TrueType to PostScript), in which case the normal and reasonable
|
| 1149 |
+
distortion that occurs during such conversion shall be permitted and (b)
|
| 1150 |
+
embed or include a subset of the Fonts in a document for the purposes of
|
| 1151 |
+
allowing users to read text in the document that utilizes the Fonts. In
|
| 1152 |
+
each case, you may use the STIX Fonts-TM mark to designate the resulting
|
| 1153 |
+
Fonts or subset of the Fonts.
|
| 1154 |
+
|
| 1155 |
+
4. You may also (a) add glyphs or characters to the Fonts, or modify
|
| 1156 |
+
the shape of existing glyphs, so long as the base set of glyphs is not
|
| 1157 |
+
removed and (b) delete glyphs or characters from the Fonts, provided
|
| 1158 |
+
that the resulting font set is distributed with the following
|
| 1159 |
+
disclaimer: "This [name] font does not include all the Unicode points
|
| 1160 |
+
covered in the STIX Fonts-TM set but may include others." In each case,
|
| 1161 |
+
the name used to denote the resulting font set shall not include the
|
| 1162 |
+
term "STIX" or any similar term.
|
| 1163 |
+
|
| 1164 |
+
5. You may charge a fee in connection with the distribution of the
|
| 1165 |
+
Font Software, provided that no copy of one or more of the individual
|
| 1166 |
+
Font typefaces that form the STIX Fonts-TM set may be sold by itself.
|
| 1167 |
+
|
| 1168 |
+
6. THE FONT SOFTWARE IS PROVIDED "AS IS," WITHOUT WARRANTY OF ANY
|
| 1169 |
+
KIND, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, ANY WARRANTIES
|
| 1170 |
+
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
|
| 1171 |
+
OF COPYRIGHT, PATENT, TRADEMARK OR OTHER RIGHT. IN NO EVENT SHALL
|
| 1172 |
+
MICROPRESS OR ANY OF THE STI PUB COMPANIES BE LIABLE FOR ANY CLAIM,
|
| 1173 |
+
DAMAGES OR OTHER LIABILITY, INCLUDING, BUT NOT LIMITED TO, ANY GENERAL,
|
| 1174 |
+
SPECIAL, INDIRECT, INCIDENTAL OR CONSEQUENTIAL DAMAGES, WHETHER IN AN
|
| 1175 |
+
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM OR OUT OF THE USE OR
|
| 1176 |
+
INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE FONT
|
| 1177 |
+
SOFTWARE.
|
| 1178 |
+
|
| 1179 |
+
7. Except as contained in the notice set forth in Section 2, the
|
| 1180 |
+
names MicroPress Inc. and STI Pub Companies, as well as the names of the
|
| 1181 |
+
companies/organizations that compose the STI Pub Companies, shall not be
|
| 1182 |
+
used in advertising or otherwise to promote the sale, use or other
|
| 1183 |
+
dealings in the Font Software without the prior written consent of the
|
| 1184 |
+
respective company or organization.
|
| 1185 |
+
|
| 1186 |
+
8. This License shall become null and void in the event of any
|
| 1187 |
+
material breach of the Terms and Conditions herein by licensee.
|
| 1188 |
+
|
| 1189 |
+
9. A substantial portion of the STIX Fonts set was developed by
|
| 1190 |
+
MicroPress Inc. for the STI Pub Companies. To obtain additional
|
| 1191 |
+
mathematical fonts, please contact MicroPress, Inc., 68-30 Harrow
|
| 1192 |
+
Street, Forest Hills, NY 11375, USA - Phone: (718) 575-1816.
|
| 1193 |
+
|
| 1194 |
+
For third-party YORICK:
|
| 1195 |
+
BSD-style license for gist/yorick colormaps.
|
| 1196 |
+
|
| 1197 |
+
Copyright:
|
| 1198 |
+
|
| 1199 |
+
Copyright (c) 1996. The Regents of the University of California.
|
| 1200 |
+
All rights reserved.
|
| 1201 |
+
|
| 1202 |
+
Permission to use, copy, modify, and distribute this software for any
|
| 1203 |
+
purpose without fee is hereby granted, provided that this entire
|
| 1204 |
+
notice is included in all copies of any software which is or includes
|
| 1205 |
+
a copy or modification of this software and in all copies of the
|
| 1206 |
+
supporting documentation for such software.
|
| 1207 |
+
|
| 1208 |
+
This work was produced at the University of California, Lawrence
|
| 1209 |
+
Livermore National Laboratory under contract no. W-7405-ENG-48 between
|
| 1210 |
+
the U.S. Department of Energy and The Regents of the University of
|
| 1211 |
+
California for the operation of UC LLNL.
|
| 1212 |
+
|
| 1213 |
+
|
| 1214 |
+
DISCLAIMER
|
| 1215 |
+
|
| 1216 |
+
This software was prepared as an account of work sponsored by an
|
| 1217 |
+
agency of the United States Government. Neither the United States
|
| 1218 |
+
Government nor the University of California nor any of their
|
| 1219 |
+
employees, makes any warranty, express or implied, or assumes any
|
| 1220 |
+
liability or responsibility for the accuracy, completeness, or
|
| 1221 |
+
usefulness of any information, apparatus, product, or process
|
| 1222 |
+
disclosed, or represents that its use would not infringe
|
| 1223 |
+
privately-owned rights. Reference herein to any specific commercial
|
| 1224 |
+
products, process, or service by trade name, trademark, manufacturer,
|
| 1225 |
+
or otherwise, does not necessarily constitute or imply its
|
| 1226 |
+
endorsement, recommendation, or favoring by the United States
|
| 1227 |
+
Government or the University of California. The views and opinions of
|
| 1228 |
+
authors expressed herein do not necessarily state or reflect those of
|
| 1229 |
+
the United States Government or the University of California, and
|
| 1230 |
+
shall not be used for advertising or product endorsement purposes.
|
| 1231 |
+
|
| 1232 |
+
|
| 1233 |
+
AUTHOR
|
| 1234 |
+
|
| 1235 |
+
David H. Munro wrote Yorick and Gist. Berkeley Yacc (byacc) generated
|
| 1236 |
+
the Yorick parser. The routines in Math are from LAPACK and FFTPACK;
|
| 1237 |
+
MathC contains C translations by David H. Munro. The algorithms for
|
| 1238 |
+
Yorick's random number generator and several special functions in
|
| 1239 |
+
Yorick/include were taken from Numerical Recipes by Press, et. al.,
|
| 1240 |
+
although the Yorick implementations are unrelated to those in
|
| 1241 |
+
Numerical Recipes. A small amount of code in Gist was adapted from
|
| 1242 |
+
the X11R4 release, copyright M.I.T. -- the complete copyright notice
|
| 1243 |
+
may be found in the (unused) file Gist/host.c.
|
| 1244 |
+
|
| 1245 |
+
|
| 1246 |
+
|
| 1247 |
+
Open Source Software Licensed under the PSF License Version 2:
|
| 1248 |
+
--------------------------------------------------------------------
|
| 1249 |
+
1. Python
|
| 1250 |
+
Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation. All rights reserved.
|
| 1251 |
+
|
| 1252 |
+
Copyright (c) 2000 BeOpen.com. All rights reserved.
|
| 1253 |
+
|
| 1254 |
+
Copyright (c) 1995-2001 Corporation for National Research Initiatives. All rights reserved.
|
| 1255 |
+
|
| 1256 |
+
Copyright (c) 1991-1995 Stichting Mathematisch Centrum. All rights reserved.
|
| 1257 |
+
|
| 1258 |
+
|
| 1259 |
+
Terms of the PSF License Version 2:
|
| 1260 |
+
--------------------------------------------------------------------
|
| 1261 |
+
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
| 1262 |
+
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
| 1263 |
+
otherwise using this software ("Python") in source or binary form and
|
| 1264 |
+
its associated documentation.
|
| 1265 |
+
|
| 1266 |
+
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
| 1267 |
+
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
| 1268 |
+
analyze, test, perform and/or display publicly, prepare derivative works,
|
| 1269 |
+
distribute, and otherwise use Python alone or in any derivative version,
|
| 1270 |
+
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
| 1271 |
+
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
| 1272 |
+
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018 Python Software Foundation; All
|
| 1273 |
+
Rights Reserved" are retained in Python alone or in any derivative version
|
| 1274 |
+
prepared by Licensee.
|
| 1275 |
+
|
| 1276 |
+
3. In the event Licensee prepares a derivative work that is based on
|
| 1277 |
+
or incorporates Python or any part thereof, and wants to make
|
| 1278 |
+
the derivative work available to others as provided herein, then
|
| 1279 |
+
Licensee hereby agrees to include in any such work a brief summary of
|
| 1280 |
+
the changes made to Python.
|
| 1281 |
+
|
| 1282 |
+
4. PSF is making Python available to Licensee on an "AS IS"
|
| 1283 |
+
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
| 1284 |
+
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
| 1285 |
+
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
| 1286 |
+
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
| 1287 |
+
INFRINGE ANY THIRD PARTY RIGHTS.
|
| 1288 |
+
|
| 1289 |
+
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
| 1290 |
+
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
| 1291 |
+
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
| 1292 |
+
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
| 1293 |
+
|
| 1294 |
+
6. This License Agreement will automatically terminate upon a material
|
| 1295 |
+
breach of its terms and conditions.
|
| 1296 |
+
|
| 1297 |
+
7. Nothing in this License Agreement shall be deemed to create any
|
| 1298 |
+
relationship of agency, partnership, or joint venture between PSF and
|
| 1299 |
+
Licensee. This License Agreement does not grant permission to use PSF
|
| 1300 |
+
trademarks or trade name in a trademark sense to endorse or promote
|
| 1301 |
+
products or services of Licensee, or any third party.
|
| 1302 |
+
|
| 1303 |
+
8. By copying, installing or otherwise using Python, Licensee
|
| 1304 |
+
agrees to be bound by the terms and conditions of this License
|
| 1305 |
+
Agreement.
|
| 1306 |
+
|
| 1307 |
+
|
| 1308 |
+
|
| 1309 |
+
Open Source Software Licensed under the MIT License:
|
| 1310 |
+
The below software in this distribution may have been modified by THL A29 Limited ("Tencent Modifications"). All Tencent Modifications are Copyright (C) 2021 THL A29 Limited.
|
| 1311 |
+
--------------------------------------------------------------------
|
| 1312 |
+
1. C^3 Framework
|
| 1313 |
+
Copyright (c) 2018 Junyu Gao
|
| 1314 |
+
|
| 1315 |
+
|
| 1316 |
+
Terms of the MIT License:
|
| 1317 |
+
--------------------------------------------------------------------
|
| 1318 |
+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
| 1319 |
+
|
| 1320 |
+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
| 1321 |
+
|
| 1322 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
| 1323 |
+
|
| 1324 |
+
|
| 1325 |
+
|
| 1326 |
+
Open Source Software Licensed under the MIT License:
|
| 1327 |
+
--------------------------------------------------------------------
|
| 1328 |
+
1. tensorboardX
|
| 1329 |
+
Copyright (c) 2017 Tzu-Wei Huang
|
| 1330 |
+
|
| 1331 |
+
|
| 1332 |
+
A copy of the MIT License is included in this file.
|
| 1333 |
+
|
| 1334 |
+
|
| 1335 |
+
|
| 1336 |
+
Open Source Software Licensed under the Apache License Version 2.0:
|
| 1337 |
+
The below software in this distribution may have been modified by Tencent.
|
| 1338 |
+
--------------------------------------------------------------------
|
| 1339 |
+
1. DETR
|
| 1340 |
+
Copyright 2020 - present, Facebook, Inc
|
| 1341 |
+
Please note this software has been modified by Tencent in this distribution.
|
| 1342 |
+
|
| 1343 |
+
|
| 1344 |
+
Terms of the Apache License Version 2.0:
|
| 1345 |
+
--------------------------------------------------------------------
|
| 1346 |
+
Apache License
|
| 1347 |
+
|
| 1348 |
+
Version 2.0, January 2004
|
| 1349 |
+
|
| 1350 |
+
http://www.apache.org/licenses/
|
| 1351 |
+
|
| 1352 |
+
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
| 1353 |
+
1. Definitions.
|
| 1354 |
+
|
| 1355 |
+
"License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document.
|
| 1356 |
+
|
| 1357 |
+
"Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License.
|
| 1358 |
+
|
| 1359 |
+
"Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
| 1360 |
+
|
| 1361 |
+
"You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License.
|
| 1362 |
+
|
| 1363 |
+
"Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files.
|
| 1364 |
+
|
| 1365 |
+
"Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types.
|
| 1366 |
+
|
| 1367 |
+
"Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below).
|
| 1368 |
+
|
| 1369 |
+
"Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof.
|
| 1370 |
+
|
| 1371 |
+
"Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution."
|
| 1372 |
+
|
| 1373 |
+
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work.
|
| 1374 |
+
|
| 1375 |
+
2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form.
|
| 1376 |
+
|
| 1377 |
+
3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed.
|
| 1378 |
+
|
| 1379 |
+
4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions:
|
| 1380 |
+
|
| 1381 |
+
You must give any other recipients of the Work or Derivative Works a copy of this License; and
|
| 1382 |
+
|
| 1383 |
+
You must cause any modified files to carry prominent notices stating that You changed the files; and
|
| 1384 |
+
|
| 1385 |
+
You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and
|
| 1386 |
+
|
| 1387 |
+
If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License.
|
| 1388 |
+
|
| 1389 |
+
You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License.
|
| 1390 |
+
|
| 1391 |
+
5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions.
|
| 1392 |
+
|
| 1393 |
+
6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file.
|
| 1394 |
+
|
| 1395 |
+
7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License.
|
| 1396 |
+
|
| 1397 |
+
8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages.
|
| 1398 |
+
|
| 1399 |
+
9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability.
|
| 1400 |
+
|
| 1401 |
+
END OF TERMS AND CONDITIONS
|
MANUAL_TESTING.md
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Manual Testing Checklist
|
| 2 |
+
|
| 3 |
+
- [ ] Image upload accepts JPG, JPEG, and PNG.
|
| 4 |
+
- [ ] Image processing completes and displays predicted points.
|
| 5 |
+
- [ ] Video upload accepts MP4, AVI, and MOV.
|
| 6 |
+
- [ ] Video processing supports shorter and longer videos without fixed duration limits.
|
| 7 |
+
- [ ] Confidence slider changes the number of detected points.
|
| 8 |
+
- [ ] Duplicate merge radius reduces duplicate detections on overlapping patches.
|
| 9 |
+
- [ ] Magnification improves visibility of small drone targets.
|
| 10 |
+
- [ ] Frame skip changes processing speed for video.
|
| 11 |
+
- [ ] Fast processing preset prioritizes speed.
|
| 12 |
+
- [ ] Balanced processing preset gives normal recommended behavior.
|
| 13 |
+
- [ ] Accurate processing preset processes more thoroughly and runs slower.
|
| 14 |
+
- [ ] Tracking keeps stable IDs across nearby moving points.
|
| 15 |
+
- [ ] Alert triggers show normal/advisory/warning/critical states at capacity thresholds.
|
| 16 |
+
- [ ] CSV export downloads and contains frame count fields.
|
| 17 |
+
- [ ] JSON export downloads and contains timeline data.
|
| 18 |
+
- [ ] Annotated video download works after processing.
|
| 19 |
+
- [ ] Invalid video upload shows a clean error message.
|
README.md
ADDED
|
@@ -0,0 +1,214 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Civic Pulse — Crowd Counting
|
| 3 |
+
emoji: 👥
|
| 4 |
+
colorFrom: teal
|
| 5 |
+
colorTo: indigo
|
| 6 |
+
sdk: docker
|
| 7 |
+
pinned: false
|
| 8 |
+
---
|
| 9 |
+
|
| 10 |
+
# Civic Pulse — Tactical Crowd Intelligence
|
| 11 |
+
|
| 12 |
+
A full-stack AI drone monitoring dashboard built on **P2PNet** (ICCV 2021).
|
| 13 |
+
FastAPI backend + React/Vite frontend with real-time WebSocket video streaming.
|
| 14 |
+
|
| 15 |
+
## 🚀 Live Deployment (Free Tier)
|
| 16 |
+
|
| 17 |
+
| Component | Platform | URL |
|
| 18 |
+
|-----------|----------|-----|
|
| 19 |
+
| **Frontend** | Vercel | `https://crowd-counting.vercel.app` |
|
| 20 |
+
| **Backend API** | FastAPI (Docker/HuggingFace Spaces) | `Set your deployed backend URL in frontend/.env.production` |
|
| 21 |
+
| **Model Weights** | HuggingFace Hub | `Set HF_WEIGHTS_REPO to your deployed weights repo` |
|
| 22 |
+
|
| 23 |
+
> ⚠️ The HF Space may sleep after 15 min of inactivity. Open the app 30 seconds before your demo.
|
| 24 |
+
|
| 25 |
+
## ⚡ Quick Local Setup
|
| 26 |
+
|
| 27 |
+
```bash
|
| 28 |
+
# Backend
|
| 29 |
+
pip install -r requirements.txt
|
| 30 |
+
uvicorn api:app --reload
|
| 31 |
+
|
| 32 |
+
# Frontend (in another terminal)
|
| 33 |
+
cd frontend
|
| 34 |
+
npm install
|
| 35 |
+
npm run dev
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
---
|
| 39 |
+
|
| 40 |
+
# P2PNet (ICCV2021 Oral Presentation)
|
| 41 |
+
|
| 42 |
+
This repository contains codes for the official implementation in PyTorch of **P2PNet** as described in [Rethinking Counting and Localization in Crowds: A Purely Point-Based Framework](https://arxiv.org/abs/2107.12746).
|
| 43 |
+
|
| 44 |
+
A brief introduction of P2PNet can be found at [机器之心 (almosthuman)](https://mp.weixin.qq.com/s?__biz=MzA3MzI4MjgzMw==&mid=2650827826&idx=3&sn=edd3d66444130fb34a59d08fab618a9e&chksm=84e5a84cb392215a005a3b3424f20a9d24dc525dcd933960035bf4b6aa740191b5ecb2b7b161&mpshare=1&scene=1&srcid=1004YEOC7HC9daYRYeUio7Xn&sharer_sharetime=1633675738338&sharer_shareid=7d375dccd3b2f9eec5f8b27ee7c04883&version=3.1.16.5505&platform=win#rd).
|
| 45 |
+
|
| 46 |
+
The codes is tested with PyTorch 1.5.0. It may not run with other versions.
|
| 47 |
+
|
| 48 |
+
## Visualized demos for P2PNet
|
| 49 |
+
<img src="vis/congested1.png" width="1000"/>
|
| 50 |
+
<img src="vis/congested2.png" width="1000"/>
|
| 51 |
+
<img src="vis/congested3.png" width="1000"/>
|
| 52 |
+
|
| 53 |
+
## The network
|
| 54 |
+
The overall architecture of the P2PNet. Built upon the VGG16, it firstly introduce an upsampling path to obtain fine-grained feature map.
|
| 55 |
+
Then it exploits two branches to simultaneously predict a set of point proposals and their confidence scores.
|
| 56 |
+
|
| 57 |
+
<img src="vis/net.png" width="1000"/>
|
| 58 |
+
|
| 59 |
+
## Comparison with state-of-the-art methods
|
| 60 |
+
The P2PNet achieved state-of-the-art performance on several challenging datasets with various densities.
|
| 61 |
+
|
| 62 |
+
| Methods | Venue | SHTechPartA <br> MAE/MSE |SHTechPartB <br> MAE/MSE | UCF_CC_50 <br> MAE/MSE | UCF_QNRF <br> MAE/MSE |
|
| 63 |
+
|:----:|:----:|:----:|:----:|:----:|:----:|
|
| 64 |
+
CAN | CVPR'19 | 62.3/100.0 | 7.8/12.2 | 212.2/**243.7** | 107.0/183.0 |
|
| 65 |
+
Bayesian+ | ICCV'19 | 62.8/101.8 | 7.7/12.7 | 229.3/308.2 | 88.7/154.8 |
|
| 66 |
+
S-DCNet | ICCV'19 | 58.3/95.0 | 6.7/10.7 | 204.2/301.3 | 104.4/176.1 |
|
| 67 |
+
SANet+SPANet | ICCV'19 | 59.4/92.5 | 6.5/**9.9** | 232.6/311.7 | -/- |
|
| 68 |
+
DUBNet | AAAI'20 | 64.6/106.8 | 7.7/12.5 | 243.8/329.3 | 105.6/180.5 |
|
| 69 |
+
SDANet | AAAI'20 | 63.6/101.8 | 7.8/10.2 | 227.6/316.4 | -/- |
|
| 70 |
+
ADSCNet | CVPR'20 | <u>55.4</u>/97.7 | <u>6.4</u>/11.3 | 198.4/267.3 | **71.3**/**132.5**|
|
| 71 |
+
ASNet | CVPR'20 | 57.78/<u>90.13</u> | -/- | <u>174.84</u>/<u>251.63</u> | 91.59/159.71 |
|
| 72 |
+
AMRNet | ECCV'20 | 61.59/98.36 | 7.02/11.00 | 184.0/265.8 | 86.6/152.2 |
|
| 73 |
+
AMSNet | ECCV'20 | 56.7/93.4 | 6.7/10.2 | 208.4/297.3 | 101.8/163.2|
|
| 74 |
+
DM-Count | NeurIPS'20 | 59.7/95.7 | 7.4/11.8 | 211.0/291.5 | 85.6/<u>148.3</u>|
|
| 75 |
+
**Ours** |- | **52.74**/**85.06** | **6.25**/**9.9** | **172.72**/256.18 | <u>85.32</u>/154.5 |
|
| 76 |
+
|
| 77 |
+
Comparison on the [NWPU-Crowd](https://www.crowdbenchmark.com/resultdetail.html?rid=81) dataset.
|
| 78 |
+
|
| 79 |
+
| Methods | MAE[O] |MSE[O] | MAE[L] | MAE[S] |
|
| 80 |
+
|:----:|:----:|:----:|:----:|:----:|
|
| 81 |
+
MCNN | 232.5|714.6 | 220.9|1171.9 |
|
| 82 |
+
SANet | 190.6 | 491.4 | 153.8 | 716.3|
|
| 83 |
+
CSRNet | 121.3 | 387.8 | 112.0 | <u>522.7</u> |
|
| 84 |
+
PCC-Net | 112.3 | 457.0 | 111.0 | 777.6 |
|
| 85 |
+
CANNet | 110.0 | 495.3 | 102.3 | 718.3|
|
| 86 |
+
Bayesian+ | 105.4 | 454.2 | 115.8 | 750.5 |
|
| 87 |
+
S-DCNet | 90.2 | 370.5 | **82.9** | 567.8 |
|
| 88 |
+
DM-Count | <u>88.4</u> | 388.6 | 88.0 | **498.0** |
|
| 89 |
+
**Ours** | **77.44**|**362** | <u>83.28</u>| 553.92 |
|
| 90 |
+
|
| 91 |
+
The overall performance for both counting and localization.
|
| 92 |
+
|
| 93 |
+
|nAP$_{\delta}$|SHTechPartA| SHTechPartB | UCF_CC_50 | UCF_QNRF | NWPU_Crowd |
|
| 94 |
+
|:----:|:----:|:----:|:----:|:----:|:----:|
|
| 95 |
+
$\delta=0.05$ | 10.9\% | 23.8\% | 5.0\% | 5.9\% | 12.9\% |
|
| 96 |
+
$\delta=0.25$ | 70.3\% | 84.2\% | 54.5\% | 55.4\% | 71.3\% |
|
| 97 |
+
$\delta=0.50$ | 90.1\% | 94.1\% | 88.1\% | 83.2\% | 89.1\% |
|
| 98 |
+
$\delta=\{{0.05:0.05:0.50}\}$ | 64.4\% | 76.3\% | 54.3\% | 53.1\% | 65.0\% |
|
| 99 |
+
|
| 100 |
+
Comparison for the localization performance in terms of F1-Measure on NWPU.
|
| 101 |
+
|
| 102 |
+
| Method| F1-Measure |Precision| Recall |
|
| 103 |
+
|:----:|:----:|:----:|:----:|
|
| 104 |
+
FasterRCNN | 0.068 | 0.958 | 0.035 |
|
| 105 |
+
TinyFaces | 0.567 | 0.529 | 0.611 |
|
| 106 |
+
RAZ | 0.599 | 0.666 | 0.543|
|
| 107 |
+
Crowd-SDNet | 0.637 | 0.651 | 0.624 |
|
| 108 |
+
PDRNet | 0.653 | 0.675 | 0.633 |
|
| 109 |
+
TopoCount | 0.692 | 0.683 | **0.701** |
|
| 110 |
+
D2CNet | <u>0.700</u> | **0.741** | 0.662 |
|
| 111 |
+
**Ours** |**0.712** | <u>0.729</u> | <u>0.695</u> |
|
| 112 |
+
|
| 113 |
+
## Installation
|
| 114 |
+
* Clone this repo into a directory named P2PNET_ROOT
|
| 115 |
+
* Organize your datasets as required
|
| 116 |
+
* Install Python dependencies. We use python 3.6.5 and pytorch 1.5.0
|
| 117 |
+
```
|
| 118 |
+
pip install -r requirements.txt
|
| 119 |
+
```
|
| 120 |
+
|
| 121 |
+
## Organize the counting dataset
|
| 122 |
+
We use a list file to collect all the images and their ground truth annotations in a counting dataset. When your dataset is organized as recommended in the following, the format of this list file is defined as:
|
| 123 |
+
```
|
| 124 |
+
train/scene01/img01.jpg train/scene01/img01.txt
|
| 125 |
+
train/scene01/img02.jpg train/scene01/img02.txt
|
| 126 |
+
...
|
| 127 |
+
train/scene02/img01.jpg train/scene02/img01.txt
|
| 128 |
+
```
|
| 129 |
+
|
| 130 |
+
### Dataset structures:
|
| 131 |
+
```
|
| 132 |
+
DATA_ROOT/
|
| 133 |
+
|->train/
|
| 134 |
+
| |->scene01/
|
| 135 |
+
| |->scene02/
|
| 136 |
+
| |->...
|
| 137 |
+
|->test/
|
| 138 |
+
| |->scene01/
|
| 139 |
+
| |->scene02/
|
| 140 |
+
| |->...
|
| 141 |
+
|->train.list
|
| 142 |
+
|->test.list
|
| 143 |
+
```
|
| 144 |
+
DATA_ROOT is your path containing the counting datasets.
|
| 145 |
+
|
| 146 |
+
### Annotations format
|
| 147 |
+
For the annotations of each image, we use a single txt file which contains one annotation per line. Note that indexing for pixel values starts at 0. The expected format of each line is:
|
| 148 |
+
```
|
| 149 |
+
x1 y1
|
| 150 |
+
x2 y2
|
| 151 |
+
...
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
## Training
|
| 155 |
+
|
| 156 |
+
The network can be trained using the `train.py` script. For training on SHTechPartA, use
|
| 157 |
+
|
| 158 |
+
```
|
| 159 |
+
CUDA_VISIBLE_DEVICES=0 python train.py --data_root $DATA_ROOT \
|
| 160 |
+
--dataset_file SHHA \
|
| 161 |
+
--epochs 3500 \
|
| 162 |
+
--lr_drop 3500 \
|
| 163 |
+
--output_dir ./logs \
|
| 164 |
+
--checkpoints_dir ./weights \
|
| 165 |
+
--tensorboard_dir ./logs \
|
| 166 |
+
--lr 0.0001 \
|
| 167 |
+
--lr_backbone 0.00001 \
|
| 168 |
+
--batch_size 8 \
|
| 169 |
+
--eval_freq 1 \
|
| 170 |
+
--gpu_id 0
|
| 171 |
+
```
|
| 172 |
+
By default, a periodic evaluation will be conducted on the validation set.
|
| 173 |
+
|
| 174 |
+
## Testing
|
| 175 |
+
|
| 176 |
+
A trained model (with an MAE of **51.96**) on SHTechPartA is available at "./weights", run the following commands to launch a visualization demo:
|
| 177 |
+
|
| 178 |
+
```
|
| 179 |
+
CUDA_VISIBLE_DEVICES=0 python run_test.py --weight_path ./weights/SHTechA.pth --output_dir ./logs/
|
| 180 |
+
```
|
| 181 |
+
|
| 182 |
+
## Civic Pulse Application
|
| 183 |
+
|
| 184 |
+
The supported application stack in this repository is:
|
| 185 |
+
|
| 186 |
+
- FastAPI backend in `api.py`
|
| 187 |
+
- React/Vite frontend in `frontend/`
|
| 188 |
+
|
| 189 |
+
The application loads `weights/SHTechA.pth` by default.
|
| 190 |
+
|
| 191 |
+
For this application, use the pretrained P2PNet weights directly for inference. Manual point-labeling and one-image fine-tuning are not part of the recommended workflow because they are too small and unstable to improve model quality.
|
| 192 |
+
|
| 193 |
+
## Acknowledgements
|
| 194 |
+
|
| 195 |
+
- Part of codes are borrowed from the [C^3 Framework](https://github.com/gjy3035/C-3-Framework).
|
| 196 |
+
- We refer to [DETR](https://github.com/facebookresearch/detr) to implement our matching strategy.
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
## Citing P2PNet
|
| 200 |
+
|
| 201 |
+
If you find P2PNet is useful in your project, please consider citing us:
|
| 202 |
+
|
| 203 |
+
```BibTeX
|
| 204 |
+
@inproceedings{song2021rethinking,
|
| 205 |
+
title={Rethinking Counting and Localization in Crowds: A Purely Point-Based Framework},
|
| 206 |
+
author={Song, Qingyu and Wang, Changan and Jiang, Zhengkai and Wang, Yabiao and Tai, Ying and Wang, Chengjie and Li, Jilin and Huang, Feiyue and Wu, Yang},
|
| 207 |
+
journal={Proceedings of the IEEE/CVF International Conference on Computer Vision},
|
| 208 |
+
year={2021}
|
| 209 |
+
}
|
| 210 |
+
```
|
| 211 |
+
|
| 212 |
+
## Related works from Tencent Youtu Lab
|
| 213 |
+
- [AAAI2021] To Choose or to Fuse? Scale Selection for Crowd Counting. ([paper link](https://ojs.aaai.org/index.php/AAAI/article/view/16360) & [codes](https://github.com/TencentYoutuResearch/CrowdCounting-SASNet))
|
| 214 |
+
- [ICCV2021] Uniformity in Heterogeneity: Diving Deep into Count Interval Partition for Crowd Counting. ([paper link](https://arxiv.org/abs/2107.12619) & [codes](https://github.com/TencentYoutuResearch/CrowdCounting-UEPNet))
|
alert_system.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
|
| 3 |
+
def render_alert(current_unique_count, threshold):
|
| 4 |
+
# Safety Check: If threshold is 0 somehow to avoid div zero error
|
| 5 |
+
if threshold <= 0:
|
| 6 |
+
threshold = 1
|
| 7 |
+
|
| 8 |
+
ratio = current_unique_count / threshold
|
| 9 |
+
if ratio >= 1.0:
|
| 10 |
+
st.error(f"🔴 CRITICAL ALERT: Venue Capacity Exceeded! ({current_unique_count:,} / {threshold:,})")
|
| 11 |
+
elif ratio >= 0.85:
|
| 12 |
+
st.warning(f"🟠 WARNING: Venue Capacity Nearing Limit! ({current_unique_count:,} / {threshold:,})")
|
| 13 |
+
elif ratio >= 0.70:
|
| 14 |
+
st.info(f"🟡 ADVISORY: Venue Filling Up. ({current_unique_count:,} / {threshold:,})")
|
| 15 |
+
else:
|
| 16 |
+
st.success(f"🟢 Venue Status Normal. ({current_unique_count:,} / {threshold:,})")
|
api.py
ADDED
|
@@ -0,0 +1,529 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
os.environ.setdefault('OMP_NUM_THREADS', '5') # Suppress sklearn KMeans MKL memory leak warning on Windows
|
| 3 |
+
from download_weights import ensure_weights
|
| 4 |
+
import io
|
| 5 |
+
import time
|
| 6 |
+
import shutil
|
| 7 |
+
import base64
|
| 8 |
+
import json
|
| 9 |
+
import asyncio
|
| 10 |
+
from fastapi import FastAPI, UploadFile, File, Form, HTTPException, WebSocket, WebSocketDisconnect
|
| 11 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 12 |
+
import torch
|
| 13 |
+
import torchvision.transforms as standard_transforms
|
| 14 |
+
import numpy as np
|
| 15 |
+
from PIL import Image
|
| 16 |
+
import cv2
|
| 17 |
+
from sklearn.cluster import KMeans
|
| 18 |
+
from sqlmodel import Session
|
| 19 |
+
|
| 20 |
+
from models import build_model
|
| 21 |
+
from tracker import Tracker
|
| 22 |
+
from database import init_db, engine, FlightReport
|
| 23 |
+
|
| 24 |
+
app = FastAPI()
|
| 25 |
+
|
| 26 |
+
# Allow both local dev and the deployed Vercel frontend.
|
| 27 |
+
# The ALLOWED_ORIGINS env var can be set on HF Spaces to your exact Vercel URL.
|
| 28 |
+
_raw_origins = os.environ.get(
|
| 29 |
+
"ALLOWED_ORIGINS",
|
| 30 |
+
"http://localhost:5173,http://127.0.0.1:5173,http://localhost:4173,http://127.0.0.1:4173,http://localhost:3000,http://127.0.0.1:3000"
|
| 31 |
+
)
|
| 32 |
+
ALLOWED_ORIGINS = [o.strip() for o in _raw_origins.split(",") if o.strip()]
|
| 33 |
+
|
| 34 |
+
app.add_middleware(
|
| 35 |
+
CORSMiddleware,
|
| 36 |
+
allow_origins=ALLOWED_ORIGINS,
|
| 37 |
+
allow_origin_regex=r"https://.*\.vercel\.app", # matches any Vercel deployment
|
| 38 |
+
allow_credentials=True,
|
| 39 |
+
allow_methods=["*"],
|
| 40 |
+
allow_headers=["*"],
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 44 |
+
TEMP_DIR = os.path.join(BASE_DIR, "temp_uploads")
|
| 45 |
+
os.makedirs(TEMP_DIR, exist_ok=True)
|
| 46 |
+
|
| 47 |
+
class Args:
|
| 48 |
+
def __init__(self):
|
| 49 |
+
self.backbone = 'vgg16_bn'
|
| 50 |
+
self.row = 2
|
| 51 |
+
self.line = 2
|
| 52 |
+
|
| 53 |
+
model = None
|
| 54 |
+
device = None
|
| 55 |
+
transform = None
|
| 56 |
+
|
| 57 |
+
@app.get("/")
|
| 58 |
+
async def health_check():
|
| 59 |
+
"""Health check — HuggingFace Spaces pings this to confirm the app is alive."""
|
| 60 |
+
return {"status": "ok", "model_loaded": model is not None}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@app.on_event("startup")
|
| 64 |
+
async def startup_event():
|
| 65 |
+
# Download weights from HuggingFace Hub if not present locally
|
| 66 |
+
ensure_weights()
|
| 67 |
+
init_db()
|
| 68 |
+
|
| 69 |
+
global model, device, transform
|
| 70 |
+
device_type = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 71 |
+
if device_type.type == 'cuda':
|
| 72 |
+
torch.backends.cudnn.benchmark = True
|
| 73 |
+
|
| 74 |
+
args = Args()
|
| 75 |
+
model_obj = build_model(args)
|
| 76 |
+
model_obj.to(device_type)
|
| 77 |
+
if device_type.type == 'cuda':
|
| 78 |
+
model_obj.to(memory_format=torch.channels_last)
|
| 79 |
+
|
| 80 |
+
weight_path = os.path.join(BASE_DIR, 'weights', 'SHTechA.pth')
|
| 81 |
+
if os.path.exists(weight_path):
|
| 82 |
+
checkpoint = torch.load(weight_path, map_location=device_type)
|
| 83 |
+
model_obj.load_state_dict(checkpoint['model'])
|
| 84 |
+
|
| 85 |
+
model_obj.eval()
|
| 86 |
+
|
| 87 |
+
transform_obj = standard_transforms.Compose([
|
| 88 |
+
standard_transforms.ToTensor(),
|
| 89 |
+
standard_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 90 |
+
])
|
| 91 |
+
|
| 92 |
+
model = model_obj
|
| 93 |
+
device = device_type
|
| 94 |
+
transform = transform_obj
|
| 95 |
+
|
| 96 |
+
def score_aware_merge(predictions, radius, orig_width, orig_height):
|
| 97 |
+
if not predictions: return []
|
| 98 |
+
predictions = sorted(predictions, key=lambda item: item[2], reverse=True)
|
| 99 |
+
final_points = []
|
| 100 |
+
radius_sq = radius * radius
|
| 101 |
+
for x, y, _ in predictions:
|
| 102 |
+
if not (0 <= x < orig_width and 0 <= y < orig_height): continue
|
| 103 |
+
duplicate = False
|
| 104 |
+
for fx, fy in final_points:
|
| 105 |
+
if (x - fx) ** 2 + (y - fy) ** 2 <= radius_sq:
|
| 106 |
+
duplicate = True
|
| 107 |
+
break
|
| 108 |
+
if not duplicate:
|
| 109 |
+
final_points.append([float(x), float(y)])
|
| 110 |
+
return final_points
|
| 111 |
+
|
| 112 |
+
def round_to_stride(value, stride=128):
|
| 113 |
+
return max(stride, int(np.ceil(value / stride) * stride))
|
| 114 |
+
|
| 115 |
+
def process_frame(img_raw, model, device, transform, threshold, max_dim=3840, magnification=1.5, patch_size=512, nms_radius=8.0, batch_size=8, patch_overlap=0.25, inference_strategy="Auto", full_frame_max_dim=1800, fencing_poly=None):
|
| 116 |
+
orig_width, orig_height = img_raw.size
|
| 117 |
+
work_width = int(orig_width * magnification)
|
| 118 |
+
work_height = int(orig_height * magnification)
|
| 119 |
+
|
| 120 |
+
if max_dim is not None and (work_width > max_dim or work_height > max_dim):
|
| 121 |
+
scale = max_dim / float(max(work_width, work_height))
|
| 122 |
+
work_width = int(work_width * scale)
|
| 123 |
+
work_height = int(work_height * scale)
|
| 124 |
+
magnification = work_width / float(orig_width)
|
| 125 |
+
|
| 126 |
+
resample_filter = getattr(Image, 'Resampling', Image).LANCZOS if hasattr(Image, 'Resampling') else getattr(Image, 'ANTIALIAS', 1)
|
| 127 |
+
img_magnified = img_raw.resize((work_width, work_height), resample_filter)
|
| 128 |
+
|
| 129 |
+
use_single_pass = inference_strategy == "Single Pass" or (inference_strategy == "Auto" and max(work_width, work_height) <= full_frame_max_dim)
|
| 130 |
+
|
| 131 |
+
final_points = []
|
| 132 |
+
if use_single_pass:
|
| 133 |
+
model_width = round_to_stride(work_width)
|
| 134 |
+
model_height = round_to_stride(work_height)
|
| 135 |
+
scale_x = model_width / float(orig_width)
|
| 136 |
+
scale_y = model_height / float(orig_height)
|
| 137 |
+
model_img = img_raw.resize((model_width, model_height), resample_filter)
|
| 138 |
+
samples = transform(model_img).unsqueeze(0).to(device, non_blocking=True)
|
| 139 |
+
if device.type == 'cuda': samples = samples.contiguous(memory_format=torch.channels_last)
|
| 140 |
+
|
| 141 |
+
with torch.inference_mode():
|
| 142 |
+
if device.type == 'cuda':
|
| 143 |
+
with torch.cuda.amp.autocast(): outputs = model(samples)
|
| 144 |
+
else: outputs = model(samples)
|
| 145 |
+
|
| 146 |
+
scores = torch.nn.functional.softmax(outputs['pred_logits'].float(), -1)[:, :, 1][0]
|
| 147 |
+
points = outputs['pred_points'][0].float()
|
| 148 |
+
mask = scores > threshold
|
| 149 |
+
selected_points = points[mask].detach().cpu().numpy()
|
| 150 |
+
selected_scores = scores[mask].detach().cpu().numpy()
|
| 151 |
+
predictions = []
|
| 152 |
+
for point, score in zip(selected_points, selected_scores):
|
| 153 |
+
predictions.append([point[0] / scale_x, point[1] / scale_y, float(score)])
|
| 154 |
+
final_points = score_aware_merge(predictions, nms_radius, orig_width, orig_height)
|
| 155 |
+
else:
|
| 156 |
+
pad_border = 256
|
| 157 |
+
new_width = ((work_width + (pad_border * 2) + patch_size - 1) // patch_size) * patch_size
|
| 158 |
+
new_height = ((work_height + (pad_border * 2) + patch_size - 1) // patch_size) * patch_size
|
| 159 |
+
|
| 160 |
+
img_padded = Image.new('RGB', (new_width, new_height), (0, 0, 0))
|
| 161 |
+
img_padded.paste(img_magnified, (pad_border, pad_border))
|
| 162 |
+
|
| 163 |
+
all_predictions = []
|
| 164 |
+
patch_overlap = min(max(float(patch_overlap), 0.0), 0.75)
|
| 165 |
+
stride = max(64, int(patch_size * (1.0 - patch_overlap)))
|
| 166 |
+
patch_jobs = []
|
| 167 |
+
|
| 168 |
+
for y in range(0, new_height - stride + 1, stride):
|
| 169 |
+
for x in range(0, new_width - stride + 1, stride):
|
| 170 |
+
if y + patch_size > new_height or x + patch_size > new_width: continue
|
| 171 |
+
patch = img_padded.crop((x, y, x + patch_size, y + patch_size))
|
| 172 |
+
patch_jobs.append((x, y, patch))
|
| 173 |
+
|
| 174 |
+
total_patches = len(patch_jobs)
|
| 175 |
+
batch_size = max(1, int(batch_size))
|
| 176 |
+
|
| 177 |
+
for start_idx in range(0, total_patches, batch_size):
|
| 178 |
+
batch_jobs = patch_jobs[start_idx:start_idx + batch_size]
|
| 179 |
+
patch_tensors = [transform(patch) for _, _, patch in batch_jobs]
|
| 180 |
+
samples = torch.stack(patch_tensors, dim=0).to(device, non_blocking=True)
|
| 181 |
+
if device.type == 'cuda': samples = samples.contiguous(memory_format=torch.channels_last)
|
| 182 |
+
|
| 183 |
+
with torch.inference_mode():
|
| 184 |
+
if device.type == 'cuda':
|
| 185 |
+
with torch.cuda.amp.autocast(): outputs = model(samples)
|
| 186 |
+
else: outputs = model(samples)
|
| 187 |
+
|
| 188 |
+
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'].float(), -1)[:, :, 1]
|
| 189 |
+
outputs_points = outputs['pred_points'].float()
|
| 190 |
+
|
| 191 |
+
for batch_idx, (x, y, _) in enumerate(batch_jobs):
|
| 192 |
+
mask = outputs_scores[batch_idx] > threshold
|
| 193 |
+
points = outputs_points[batch_idx][mask].detach().cpu().numpy()
|
| 194 |
+
scores = outputs_scores[batch_idx][mask].detach().cpu().numpy()
|
| 195 |
+
|
| 196 |
+
if len(points) > 0:
|
| 197 |
+
points[:, 0] += (x - pad_border)
|
| 198 |
+
points[:, 1] += (y - pad_border)
|
| 199 |
+
points = points / float(magnification)
|
| 200 |
+
for point, score in zip(points, scores):
|
| 201 |
+
all_predictions.append([point[0], point[1], float(score)])
|
| 202 |
+
|
| 203 |
+
final_points = score_aware_merge(all_predictions, nms_radius, orig_width, orig_height)
|
| 204 |
+
|
| 205 |
+
# Smart Zone Fencing filter
|
| 206 |
+
if fencing_poly and len(fencing_poly) > 2:
|
| 207 |
+
poly_arr = np.array([[p['x']*orig_width, p['y']*orig_height] for p in fencing_poly], dtype=np.int32)
|
| 208 |
+
filtered_pts = []
|
| 209 |
+
for pt in final_points:
|
| 210 |
+
# check if point is inside
|
| 211 |
+
if cv2.pointPolygonTest(poly_arr, (pt[0], pt[1]), False) >= 0:
|
| 212 |
+
filtered_pts.append(pt)
|
| 213 |
+
final_points = filtered_pts
|
| 214 |
+
|
| 215 |
+
return img_raw, len(final_points), final_points
|
| 216 |
+
|
| 217 |
+
def process_frame_with_oom_recovery(*args, batch_size=8, **kwargs):
|
| 218 |
+
current_batch_size = max(1, int(batch_size))
|
| 219 |
+
while current_batch_size >= 1:
|
| 220 |
+
try:
|
| 221 |
+
return process_frame(*args, batch_size=current_batch_size, **kwargs), current_batch_size
|
| 222 |
+
except RuntimeError as exc:
|
| 223 |
+
if "out of memory" not in str(exc).lower(): raise
|
| 224 |
+
if torch.cuda.is_available(): torch.cuda.empty_cache()
|
| 225 |
+
if current_batch_size == 1: raise
|
| 226 |
+
current_batch_size = max(1, current_batch_size // 2)
|
| 227 |
+
|
| 228 |
+
def generate_colors(n):
|
| 229 |
+
colors = []
|
| 230 |
+
base_hues = [30, 90, 150, 210, 270, 330]
|
| 231 |
+
for i in range(n):
|
| 232 |
+
h = base_hues[i % len(base_hues)]
|
| 233 |
+
hsv = np.uint8([[[h, 255, 255]]])
|
| 234 |
+
bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)[0][0]
|
| 235 |
+
colors.append((int(bgr[0]), int(bgr[1]), int(bgr[2])))
|
| 236 |
+
return colors
|
| 237 |
+
|
| 238 |
+
def draw_points(img, points, use_heatmap=False, use_clustering=False, use_motion_vectors=False, prev_points=None):
|
| 239 |
+
img_bgr = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
|
| 240 |
+
|
| 241 |
+
if use_heatmap:
|
| 242 |
+
h, w = img_bgr.shape[:2]
|
| 243 |
+
heatmap = np.zeros((h, w), dtype=np.float32)
|
| 244 |
+
for p in points:
|
| 245 |
+
px, py = int(p[0]), int(p[1])
|
| 246 |
+
if 0 <= px < w and 0 <= py < h:
|
| 247 |
+
r = 15
|
| 248 |
+
x_min, x_max = max(0, px-r), min(w, px+r+1)
|
| 249 |
+
y_min, y_max = max(0, py-r), min(h, py+r+1)
|
| 250 |
+
y, x = np.ogrid[y_min:y_max, x_min:x_max]
|
| 251 |
+
mask = np.exp(-((x - px)**2 + (y - py)**2) / (2 * 5**2))
|
| 252 |
+
heatmap[y_min:y_max, x_min:x_max] += mask
|
| 253 |
+
heatmap = np.clip(heatmap * 100, 0, 255).astype(np.uint8)
|
| 254 |
+
color_map = cv2.applyColorMap(heatmap, cv2.COLORMAP_INFERNO)
|
| 255 |
+
mask = (heatmap > 10).astype(np.float32)[:, :, np.newaxis]
|
| 256 |
+
img_bgr = (img_bgr * (1 - mask * 0.7) + color_map * (mask * 0.7)).astype(np.uint8)
|
| 257 |
+
|
| 258 |
+
elif use_clustering and len(points) >= 3:
|
| 259 |
+
num_clusters = min(len(points) // 10 + 1, 5)
|
| 260 |
+
# Guard: KMeans requires n_clusters <= n_samples
|
| 261 |
+
num_clusters = max(1, min(num_clusters, len(points)))
|
| 262 |
+
if num_clusters > 1:
|
| 263 |
+
pts_array = np.array([[p[0], p[1]] for p in points])
|
| 264 |
+
try:
|
| 265 |
+
kmeans = KMeans(n_clusters=num_clusters, n_init='auto', random_state=42).fit(pts_array)
|
| 266 |
+
labels = kmeans.labels_
|
| 267 |
+
colors = generate_colors(num_clusters)
|
| 268 |
+
for i, p in enumerate(points):
|
| 269 |
+
cv2.circle(img_bgr, (int(p[0]), int(p[1])), 3, colors[labels[i]], -1)
|
| 270 |
+
for c in range(num_clusters):
|
| 271 |
+
cluster_pts = pts_array[labels == c].astype(np.int32)
|
| 272 |
+
if len(cluster_pts) >= 3:
|
| 273 |
+
hull = cv2.convexHull(cluster_pts)
|
| 274 |
+
cv2.polylines(img_bgr, [hull], True, colors[c], 2)
|
| 275 |
+
except Exception:
|
| 276 |
+
# Fallback to plain dots if clustering fails for any reason
|
| 277 |
+
for p in points: cv2.circle(img_bgr, (int(p[0]), int(p[1])), 2, (184, 230, 0), -1)
|
| 278 |
+
else:
|
| 279 |
+
for p in points: cv2.circle(img_bgr, (int(p[0]), int(p[1])), 2, (184, 230, 0), -1)
|
| 280 |
+
else:
|
| 281 |
+
for p in points:
|
| 282 |
+
cv2.circle(img_bgr, (int(p[0]), int(p[1])), 2, (184, 230, 0), -1)
|
| 283 |
+
|
| 284 |
+
# GAP 5: Motion Vectors — draw arrows from prev positions to current
|
| 285 |
+
if use_motion_vectors and prev_points and len(prev_points) > 0 and len(points) > 0:
|
| 286 |
+
cur_arr = np.array([[p[0], p[1]] for p in points], dtype=np.float32)
|
| 287 |
+
prev_arr = np.array([[p[0], p[1]] for p in prev_points], dtype=np.float32)
|
| 288 |
+
# Match nearest neighbours between prev and current
|
| 289 |
+
for pp in prev_arr:
|
| 290 |
+
dists = np.sum((cur_arr - pp) ** 2, axis=1)
|
| 291 |
+
nearest_idx = int(np.argmin(dists))
|
| 292 |
+
if dists[nearest_idx] < 2500: # max 50px movement
|
| 293 |
+
cp = cur_arr[nearest_idx]
|
| 294 |
+
dx, dy = float(cp[0] - pp[0]), float(cp[1] - pp[1])
|
| 295 |
+
if abs(dx) > 1 or abs(dy) > 1: # only draw if actually moved
|
| 296 |
+
speed = (dx**2 + dy**2) ** 0.5
|
| 297 |
+
# Color from green (slow) to amber (fast)
|
| 298 |
+
t = min(speed / 30.0, 1.0)
|
| 299 |
+
color = (
|
| 300 |
+
int(11 * (1 - t) + 11 * t),
|
| 301 |
+
int(230 * (1 - t) + 158 * t),
|
| 302 |
+
int(184 * (1 - t) + 245 * t)
|
| 303 |
+
)
|
| 304 |
+
cv2.arrowedLine(
|
| 305 |
+
img_bgr,
|
| 306 |
+
(int(pp[0]), int(pp[1])),
|
| 307 |
+
(int(cp[0]), int(cp[1])),
|
| 308 |
+
color, 1, tipLength=0.3
|
| 309 |
+
)
|
| 310 |
+
|
| 311 |
+
return img_bgr
|
| 312 |
+
|
| 313 |
+
@app.post("/api/upload-video")
|
| 314 |
+
async def upload_video(file: UploadFile = File(...)):
|
| 315 |
+
# Sanitize filename to remove spaces/special chars that break WebSocket URLs
|
| 316 |
+
safe_name = "".join(c if c.isalnum() or c in '._-' else '_' for c in (file.filename or 'video'))
|
| 317 |
+
file_id = f"vid_{int(time.time())}_{safe_name}"
|
| 318 |
+
file_path = os.path.join(TEMP_DIR, file_id)
|
| 319 |
+
# Stream-write in chunks to avoid loading entire video into RAM
|
| 320 |
+
try:
|
| 321 |
+
with open(file_path, "wb") as out_f:
|
| 322 |
+
while True:
|
| 323 |
+
chunk = await file.read(1024 * 1024) # 1 MB chunks
|
| 324 |
+
if not chunk:
|
| 325 |
+
break
|
| 326 |
+
out_f.write(chunk)
|
| 327 |
+
except Exception as exc:
|
| 328 |
+
if os.path.exists(file_path):
|
| 329 |
+
os.remove(file_path)
|
| 330 |
+
raise HTTPException(status_code=500, detail=f"Video upload failed: {exc}")
|
| 331 |
+
return {"file_id": file_id, "size": os.path.getsize(file_path)}
|
| 332 |
+
|
| 333 |
+
@app.websocket("/api/stream-video/{file_id}")
|
| 334 |
+
async def stream_video(websocket: WebSocket, file_id: str):
|
| 335 |
+
await websocket.accept()
|
| 336 |
+
file_path = os.path.join(TEMP_DIR, file_id)
|
| 337 |
+
|
| 338 |
+
if not os.path.exists(file_path):
|
| 339 |
+
await websocket.send_json({"status": "error", "message": "Video file not found on server. Please upload again."})
|
| 340 |
+
await websocket.close()
|
| 341 |
+
return
|
| 342 |
+
|
| 343 |
+
# Guard: model must be loaded
|
| 344 |
+
if model is None:
|
| 345 |
+
await websocket.send_json({"status": "error", "message": "AI model not loaded yet. Please wait and retry."})
|
| 346 |
+
await websocket.close()
|
| 347 |
+
return
|
| 348 |
+
|
| 349 |
+
cap = None
|
| 350 |
+
try:
|
| 351 |
+
data = await websocket.receive_json()
|
| 352 |
+
settings_payload = data.get("settings", {})
|
| 353 |
+
|
| 354 |
+
confidence_threshold = float(settings_payload.get("confidenceThresh", 0.35))
|
| 355 |
+
magnification = float(settings_payload.get("magnification", 1.5))
|
| 356 |
+
nms_radius = float(settings_payload.get("nmsRadius", 9.0))
|
| 357 |
+
use_heatmap = bool(settings_payload.get("useHeatmap", False))
|
| 358 |
+
use_clustering = bool(settings_payload.get("useClustering", False))
|
| 359 |
+
use_motion_vecs = bool(settings_payload.get("useMotionVecs", False))
|
| 360 |
+
fencing_poly = settings_payload.get("fencingPolygon", [])
|
| 361 |
+
frame_skip = max(1, int(settings_payload.get("frameSkip", 3)))
|
| 362 |
+
patch_overlap = 0.25
|
| 363 |
+
capacity_limit = int(settings_payload.get("capacityLimit", 150))
|
| 364 |
+
|
| 365 |
+
cap = cv2.VideoCapture(file_path)
|
| 366 |
+
if not cap.isOpened():
|
| 367 |
+
await websocket.send_json({"status": "error", "message": f"Cannot open video file. Format may not be supported by OpenCV."})
|
| 368 |
+
return
|
| 369 |
+
|
| 370 |
+
total_video_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) or 1
|
| 371 |
+
tracker = Tracker(max_distance=50.0, max_age=5)
|
| 372 |
+
|
| 373 |
+
frames_processed = 0
|
| 374 |
+
total_unique = 0
|
| 375 |
+
peak_crowd = 0
|
| 376 |
+
total_anomalies = 0
|
| 377 |
+
capacity_breached = False
|
| 378 |
+
prev_raw_points = []
|
| 379 |
+
|
| 380 |
+
while cap.isOpened():
|
| 381 |
+
ret, frame = cap.read()
|
| 382 |
+
if not ret:
|
| 383 |
+
break
|
| 384 |
+
|
| 385 |
+
if frames_processed % frame_skip == 0:
|
| 386 |
+
try:
|
| 387 |
+
img_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 388 |
+
pil_img = Image.fromarray(img_rgb)
|
| 389 |
+
|
| 390 |
+
(_, count, raw_points), _ = process_frame_with_oom_recovery(
|
| 391 |
+
pil_img, model, device, transform, confidence_threshold,
|
| 392 |
+
max_dim=1920, magnification=magnification, nms_radius=nms_radius,
|
| 393 |
+
batch_size=4, patch_overlap=patch_overlap, inference_strategy="Auto",
|
| 394 |
+
fencing_poly=fencing_poly
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
if count > peak_crowd: peak_crowd = count
|
| 398 |
+
if count > capacity_limit: capacity_breached = True
|
| 399 |
+
|
| 400 |
+
img_bgr = draw_points(pil_img, raw_points, use_heatmap, use_clustering,
|
| 401 |
+
use_motion_vecs, prev_raw_points)
|
| 402 |
+
prev_raw_points = raw_points[:]
|
| 403 |
+
|
| 404 |
+
active_tracks, cumulative_unique, anomaly = tracker.update(img_bgr, raw_points)
|
| 405 |
+
total_unique = cumulative_unique
|
| 406 |
+
if anomaly:
|
| 407 |
+
total_anomalies += 1
|
| 408 |
+
|
| 409 |
+
for t in active_tracks:
|
| 410 |
+
color = (11, 158, 245) if (anomaly and hasattr(t, 'velocity') and t.velocity > 35) else (0, 255, 255)
|
| 411 |
+
cv2.circle(img_bgr, (int(t.pt[0]), int(t.pt[1])), 4, color, -1)
|
| 412 |
+
|
| 413 |
+
_, buffer = cv2.imencode('.jpg', img_bgr, [cv2.IMWRITE_JPEG_QUALITY, 80])
|
| 414 |
+
encoded_img = base64.b64encode(buffer).decode('utf-8')
|
| 415 |
+
|
| 416 |
+
progress = round(frames_processed / total_video_frames * 100)
|
| 417 |
+
await websocket.send_json({
|
| 418 |
+
"status": "playing",
|
| 419 |
+
"frame": frames_processed,
|
| 420 |
+
"count": count,
|
| 421 |
+
"total_unique": total_unique,
|
| 422 |
+
"anomalyEvent": anomaly,
|
| 423 |
+
"progress": progress,
|
| 424 |
+
"imageB64": encoded_img
|
| 425 |
+
})
|
| 426 |
+
|
| 427 |
+
except Exception as frame_err:
|
| 428 |
+
print(f"[Frame {frames_processed} error]: {frame_err}")
|
| 429 |
+
# Skip this frame and continue rather than crashing the whole stream
|
| 430 |
+
|
| 431 |
+
frames_processed += 1
|
| 432 |
+
await asyncio.sleep(0.001)
|
| 433 |
+
|
| 434 |
+
# Release BEFORE any file operations so Windows unlocks it
|
| 435 |
+
cap.release()
|
| 436 |
+
cap = None
|
| 437 |
+
|
| 438 |
+
# Log to DB
|
| 439 |
+
try:
|
| 440 |
+
with Session(engine) as db_session:
|
| 441 |
+
record = FlightReport(
|
| 442 |
+
filename=file_id,
|
| 443 |
+
max_capacity_breached=capacity_breached,
|
| 444 |
+
peak_crowd_count=peak_crowd,
|
| 445 |
+
duration_frames=frames_processed,
|
| 446 |
+
chaos_anomalies=total_anomalies
|
| 447 |
+
)
|
| 448 |
+
db_session.add(record)
|
| 449 |
+
db_session.commit()
|
| 450 |
+
except Exception as db_err:
|
| 451 |
+
print(f"[DB error]: {db_err}")
|
| 452 |
+
|
| 453 |
+
await websocket.send_json({"status": "done", "total_unique": total_unique})
|
| 454 |
+
|
| 455 |
+
except WebSocketDisconnect:
|
| 456 |
+
print("[WebSocket] client disconnected")
|
| 457 |
+
except Exception as e:
|
| 458 |
+
print(f"[Stream error]: {e}")
|
| 459 |
+
try:
|
| 460 |
+
await websocket.send_json({"status": "error", "message": str(e)})
|
| 461 |
+
except Exception:
|
| 462 |
+
pass
|
| 463 |
+
finally:
|
| 464 |
+
# Make sure cap is released before file deletion
|
| 465 |
+
if cap is not None:
|
| 466 |
+
cap.release()
|
| 467 |
+
# Retry deletion — Windows may keep handle briefly after cap.release()
|
| 468 |
+
for _attempt in range(5):
|
| 469 |
+
try:
|
| 470 |
+
if os.path.exists(file_path):
|
| 471 |
+
os.remove(file_path)
|
| 472 |
+
break
|
| 473 |
+
except PermissionError:
|
| 474 |
+
import time as _t
|
| 475 |
+
_t.sleep(0.3)
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
@app.post("/api/process-image")
|
| 479 |
+
async def process_image_api(
|
| 480 |
+
file: UploadFile = File(...),
|
| 481 |
+
confidence_threshold: float = Form(0.35),
|
| 482 |
+
magnification: float = Form(1.5),
|
| 483 |
+
nms_radius: float = Form(9.0),
|
| 484 |
+
use_heatmap: str = Form("false"),
|
| 485 |
+
use_clustering: str = Form("false"),
|
| 486 |
+
use_motion_vectors: str = Form("false"),
|
| 487 |
+
fencing_polygon: str = Form("[]"),
|
| 488 |
+
inference_batch_size: int = Form(8),
|
| 489 |
+
patch_overlap: float = Form(0.25),
|
| 490 |
+
max_resolution: int = Form(3840),
|
| 491 |
+
inference_strategy: str = Form("Auto")
|
| 492 |
+
):
|
| 493 |
+
try:
|
| 494 |
+
fencing_poly = json.loads(fencing_polygon) if fencing_polygon else []
|
| 495 |
+
contents = await file.read()
|
| 496 |
+
image = Image.open(io.BytesIO(contents)).convert('RGB')
|
| 497 |
+
|
| 498 |
+
start_time = time.perf_counter()
|
| 499 |
+
|
| 500 |
+
(processed_img, count, points), used_batch_size = process_frame_with_oom_recovery(
|
| 501 |
+
image, model, device, transform, confidence_threshold,
|
| 502 |
+
max_dim=max_resolution, magnification=magnification,
|
| 503 |
+
nms_radius=nms_radius, batch_size=inference_batch_size,
|
| 504 |
+
patch_overlap=patch_overlap, inference_strategy=inference_strategy,
|
| 505 |
+
fencing_poly=fencing_poly
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
elapsed = time.perf_counter() - start_time
|
| 509 |
+
img_bgr = draw_points(
|
| 510 |
+
image, points,
|
| 511 |
+
use_heatmap.lower() == 'true',
|
| 512 |
+
use_clustering.lower() == 'true',
|
| 513 |
+
use_motion_vectors.lower() == 'true',
|
| 514 |
+
None # no prev_points for single image
|
| 515 |
+
)
|
| 516 |
+
|
| 517 |
+
_, buffer = cv2.imencode('.jpg', img_bgr)
|
| 518 |
+
encoded_img = base64.b64encode(buffer).decode('utf-8')
|
| 519 |
+
|
| 520 |
+
return {
|
| 521 |
+
"count": count, "elapsed": elapsed,
|
| 522 |
+
"usedBatchSize": used_batch_size, "imageB64": encoded_img
|
| 523 |
+
}
|
| 524 |
+
except Exception as e:
|
| 525 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 526 |
+
|
| 527 |
+
if __name__ == "__main__":
|
| 528 |
+
import uvicorn
|
| 529 |
+
uvicorn.run("api:app", host="0.0.0.0", port=8000, reload=False)
|
app.py
ADDED
|
@@ -0,0 +1,606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
import cv2
|
| 3 |
+
import torch
|
| 4 |
+
import torchvision.transforms as standard_transforms
|
| 5 |
+
import numpy as np
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import os
|
| 8 |
+
import tempfile
|
| 9 |
+
import time
|
| 10 |
+
import warnings
|
| 11 |
+
warnings.filterwarnings('ignore')
|
| 12 |
+
|
| 13 |
+
from tracker import Tracker
|
| 14 |
+
from report_generator import ReportGenerator
|
| 15 |
+
from alert_system import render_alert
|
| 16 |
+
|
| 17 |
+
from app_enhancements import confidence_interval, load_config, save_config
|
| 18 |
+
from models import build_model
|
| 19 |
+
|
| 20 |
+
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
|
| 21 |
+
CONFIG_PATH = os.path.join(BASE_DIR, "civic_pulse_config.json")
|
| 22 |
+
MODE_SETTINGS = {
|
| 23 |
+
"Fast": {
|
| 24 |
+
"patch_overlap": 0.0,
|
| 25 |
+
"frame_skip": 5,
|
| 26 |
+
"magnification": 1.25,
|
| 27 |
+
"confidence_threshold": 0.5,
|
| 28 |
+
"nms_radius": 10.0,
|
| 29 |
+
"tracker_max_distance": 65.0,
|
| 30 |
+
"inference_batch_size": 16,
|
| 31 |
+
},
|
| 32 |
+
"Balanced": {
|
| 33 |
+
"patch_overlap": 0.25,
|
| 34 |
+
"frame_skip": 2,
|
| 35 |
+
"magnification": 1.5,
|
| 36 |
+
"confidence_threshold": 0.35,
|
| 37 |
+
"nms_radius": 9.0,
|
| 38 |
+
"tracker_max_distance": 55.0,
|
| 39 |
+
"inference_batch_size": 8,
|
| 40 |
+
},
|
| 41 |
+
"Accurate": {
|
| 42 |
+
"patch_overlap": 0.5,
|
| 43 |
+
"frame_skip": 1,
|
| 44 |
+
"magnification": 2.0,
|
| 45 |
+
"confidence_threshold": 0.25,
|
| 46 |
+
"nms_radius": 7.0,
|
| 47 |
+
"tracker_max_distance": 45.0,
|
| 48 |
+
"inference_batch_size": 4,
|
| 49 |
+
},
|
| 50 |
+
}
|
| 51 |
+
DEFAULT_CONFIG = {
|
| 52 |
+
"processing_mode": "Balanced",
|
| 53 |
+
"max_resolution": 3840,
|
| 54 |
+
"magnification": 1.5,
|
| 55 |
+
"confidence_threshold": 0.35,
|
| 56 |
+
"nms_radius": 9.0,
|
| 57 |
+
"tracker_max_distance": 55.0,
|
| 58 |
+
"inference_batch_size": 8,
|
| 59 |
+
"inference_strategy": "Auto",
|
| 60 |
+
"venue_capacity": 15000,
|
| 61 |
+
}
|
| 62 |
+
saved_config = load_config(CONFIG_PATH, DEFAULT_CONFIG)
|
| 63 |
+
|
| 64 |
+
# Custom wrapper to provide args to the model builder
|
| 65 |
+
class Args:
|
| 66 |
+
def __init__(self):
|
| 67 |
+
self.backbone = 'vgg16_bn'
|
| 68 |
+
self.row = 2
|
| 69 |
+
self.line = 2
|
| 70 |
+
|
| 71 |
+
# Page Configuration
|
| 72 |
+
st.set_page_config(page_title="Civic Pulse Dashboard", page_icon="🚁", layout="wide")
|
| 73 |
+
|
| 74 |
+
st.title("🚁 Civic Pulse: Drone Crowd Monitor")
|
| 75 |
+
st.markdown("Upload drone imagery or video for AI-based crowd counting using P2PNet.")
|
| 76 |
+
|
| 77 |
+
# Sidebar Configuration
|
| 78 |
+
st.sidebar.header("Processing Settings")
|
| 79 |
+
processing_mode = st.sidebar.selectbox(
|
| 80 |
+
"Processing Mode",
|
| 81 |
+
("Balanced", "Fast", "Accurate"),
|
| 82 |
+
index=("Balanced", "Fast", "Accurate").index(saved_config.get("processing_mode", "Balanced")),
|
| 83 |
+
help="Fast uses fewer overlapping patches. Accurate uses more overlap for better boundary coverage. Balanced is recommended."
|
| 84 |
+
)
|
| 85 |
+
preset_values = MODE_SETTINGS[processing_mode]
|
| 86 |
+
use_preset_values = st.sidebar.checkbox(
|
| 87 |
+
"Use Recommended Detection Preset",
|
| 88 |
+
value=True,
|
| 89 |
+
help="Recommended for drone/top-down images. Turn off only if you want to manually tune every setting."
|
| 90 |
+
)
|
| 91 |
+
max_resolution = st.sidebar.slider("Max GPU Resolution Bounds", min_value=720, max_value=8000, value=int(saved_config.get("max_resolution", 3840)), step=120, help="Prevents RAM crashes. Patches are generated within this bound.")
|
| 92 |
+
inference_strategy = st.sidebar.selectbox(
|
| 93 |
+
"Inference Strategy",
|
| 94 |
+
("Auto", "Single Pass", "Tiled"),
|
| 95 |
+
index=("Auto", "Single Pass", "Tiled").index(saved_config.get("inference_strategy", "Auto")),
|
| 96 |
+
help="Single Pass is much faster for normal images. Tiled is only for very large images. Auto chooses for you."
|
| 97 |
+
)
|
| 98 |
+
if use_preset_values:
|
| 99 |
+
magnification = preset_values["magnification"]
|
| 100 |
+
confidence_threshold = preset_values["confidence_threshold"]
|
| 101 |
+
nms_radius = preset_values["nms_radius"]
|
| 102 |
+
tracker_max_distance = preset_values["tracker_max_distance"]
|
| 103 |
+
inference_batch_size = preset_values["inference_batch_size"]
|
| 104 |
+
st.sidebar.info(
|
| 105 |
+
f"Preset active: magnification {magnification}x, confidence {confidence_threshold}, "
|
| 106 |
+
f"merge radius {nms_radius}px, batch {inference_batch_size}."
|
| 107 |
+
)
|
| 108 |
+
else:
|
| 109 |
+
magnification = st.sidebar.slider("Micro-Target Magnification", min_value=1.0, max_value=3.0, value=float(saved_config.get("magnification", preset_values["magnification"])), step=0.1, help="Scales up tiny drone targets so the AI can physically see them.")
|
| 110 |
+
confidence_threshold = st.sidebar.slider("Confidence Threshold", min_value=0.05, max_value=1.0, value=float(saved_config.get("confidence_threshold", preset_values["confidence_threshold"])), step=0.05, help="Decrease to catch missed people, increase to reduce false positives.")
|
| 111 |
+
nms_radius = st.sidebar.slider("Duplicate Merge Radius (px)", min_value=2.0, max_value=30.0, value=float(saved_config.get("nms_radius", preset_values["nms_radius"])), step=1.0, help="Merges overlapping patch detections. Increase if duplicate dots appear.")
|
| 112 |
+
tracker_max_distance = st.sidebar.slider("Tracker Match Radius (px)", min_value=10.0, max_value=150.0, value=float(saved_config.get("tracker_max_distance", preset_values["tracker_max_distance"])), step=5.0, help="Maximum motion allowed when matching people between processed video frames.")
|
| 113 |
+
inference_batch_size = st.sidebar.slider("P2PNet Patch Batch Size", min_value=1, max_value=32, value=int(saved_config.get("inference_batch_size", preset_values["inference_batch_size"])), step=1, help="Processes tiled patches in batches for faster P2PNet inference. Lower it if memory is limited.")
|
| 114 |
+
input_type = st.sidebar.radio("Select Input Type", ("Image", "Video"))
|
| 115 |
+
uploaded_file = st.sidebar.file_uploader(f"Upload {input_type}", type=["png", "jpg", "jpeg"] if input_type == "Image" else ["mp4", "avi", "mov"])
|
| 116 |
+
|
| 117 |
+
st.sidebar.markdown("---")
|
| 118 |
+
st.sidebar.header("Alert Settings")
|
| 119 |
+
venue_capacity = st.sidebar.slider("Venue Max Capacity", min_value=100, max_value=50000, value=int(saved_config.get("venue_capacity", 15000)), step=100)
|
| 120 |
+
if st.sidebar.button("Save Current Settings"):
|
| 121 |
+
save_config(CONFIG_PATH, {
|
| 122 |
+
"processing_mode": processing_mode,
|
| 123 |
+
"max_resolution": max_resolution,
|
| 124 |
+
"magnification": magnification,
|
| 125 |
+
"confidence_threshold": confidence_threshold,
|
| 126 |
+
"nms_radius": nms_radius,
|
| 127 |
+
"tracker_max_distance": tracker_max_distance,
|
| 128 |
+
"inference_batch_size": inference_batch_size,
|
| 129 |
+
"inference_strategy": inference_strategy,
|
| 130 |
+
"venue_capacity": venue_capacity,
|
| 131 |
+
})
|
| 132 |
+
st.sidebar.success("Settings saved.")
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
@st.cache_resource
|
| 136 |
+
def load_model():
|
| 137 |
+
"""Load the P2PNet model into GPU if available."""
|
| 138 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 139 |
+
if device.type == 'cuda':
|
| 140 |
+
torch.backends.cudnn.benchmark = True
|
| 141 |
+
|
| 142 |
+
args = Args()
|
| 143 |
+
model = build_model(args)
|
| 144 |
+
model.to(device)
|
| 145 |
+
if device.type == 'cuda':
|
| 146 |
+
model.to(memory_format=torch.channels_last)
|
| 147 |
+
|
| 148 |
+
# Load weights
|
| 149 |
+
weight_path = os.path.join(BASE_DIR, 'weights', 'SHTechA.pth')
|
| 150 |
+
if os.path.exists(weight_path):
|
| 151 |
+
checkpoint = torch.load(weight_path, map_location=device)
|
| 152 |
+
model.load_state_dict(checkpoint['model'])
|
| 153 |
+
else:
|
| 154 |
+
st.sidebar.error(f"Weights not found at {weight_path}. Model will perform poorly.")
|
| 155 |
+
|
| 156 |
+
model.eval()
|
| 157 |
+
|
| 158 |
+
transform = standard_transforms.Compose([
|
| 159 |
+
standard_transforms.ToTensor(),
|
| 160 |
+
standard_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 161 |
+
])
|
| 162 |
+
|
| 163 |
+
return model, device, transform
|
| 164 |
+
|
| 165 |
+
model, device, transform = load_model()
|
| 166 |
+
|
| 167 |
+
def format_elapsed(seconds):
|
| 168 |
+
if seconds < 60:
|
| 169 |
+
return f"{seconds:.1f}s"
|
| 170 |
+
minutes, remaining = divmod(seconds, 60)
|
| 171 |
+
return f"{int(minutes)}m {remaining:.1f}s"
|
| 172 |
+
|
| 173 |
+
|
| 174 |
+
def score_aware_merge(predictions, radius, orig_width, orig_height):
|
| 175 |
+
if not predictions:
|
| 176 |
+
return []
|
| 177 |
+
predictions = sorted(predictions, key=lambda item: item[2], reverse=True)
|
| 178 |
+
final_points = []
|
| 179 |
+
radius_sq = radius * radius
|
| 180 |
+
for x, y, _ in predictions:
|
| 181 |
+
if not (0 <= x < orig_width and 0 <= y < orig_height):
|
| 182 |
+
continue
|
| 183 |
+
duplicate = False
|
| 184 |
+
for fx, fy in final_points:
|
| 185 |
+
if (x - fx) ** 2 + (y - fy) ** 2 <= radius_sq:
|
| 186 |
+
duplicate = True
|
| 187 |
+
break
|
| 188 |
+
if not duplicate:
|
| 189 |
+
final_points.append([float(x), float(y)])
|
| 190 |
+
return final_points
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def round_to_stride(value, stride=128):
|
| 194 |
+
return max(stride, int(np.ceil(value / stride) * stride))
|
| 195 |
+
|
| 196 |
+
|
| 197 |
+
def process_frame(
|
| 198 |
+
img_raw,
|
| 199 |
+
model,
|
| 200 |
+
device,
|
| 201 |
+
transform,
|
| 202 |
+
threshold,
|
| 203 |
+
max_dim=3840,
|
| 204 |
+
magnification=1.5,
|
| 205 |
+
patch_size=512,
|
| 206 |
+
nms_radius=8.0,
|
| 207 |
+
batch_size=8,
|
| 208 |
+
patch_overlap=0.25,
|
| 209 |
+
inference_strategy="Auto",
|
| 210 |
+
full_frame_max_dim=1800,
|
| 211 |
+
progress_callback=None,
|
| 212 |
+
):
|
| 213 |
+
"""Process a PIL Image using Multi-Scale Tiling Inference."""
|
| 214 |
+
orig_width, orig_height = img_raw.size
|
| 215 |
+
|
| 216 |
+
# 1. Magnify
|
| 217 |
+
work_width = int(orig_width * magnification)
|
| 218 |
+
work_height = int(orig_height * magnification)
|
| 219 |
+
|
| 220 |
+
if max_dim is not None and (work_width > max_dim or work_height > max_dim):
|
| 221 |
+
scale = max_dim / float(max(work_width, work_height))
|
| 222 |
+
work_width = int(work_width * scale)
|
| 223 |
+
work_height = int(work_height * scale)
|
| 224 |
+
magnification = work_width / float(orig_width)
|
| 225 |
+
|
| 226 |
+
resample_filter = getattr(Image, 'Resampling', Image).LANCZOS if hasattr(Image, 'Resampling') else getattr(Image, 'ANTIALIAS', 1)
|
| 227 |
+
img_magnified = img_raw.resize((work_width, work_height), resample_filter)
|
| 228 |
+
|
| 229 |
+
use_single_pass = inference_strategy == "Single Pass" or (
|
| 230 |
+
inference_strategy == "Auto" and max(work_width, work_height) <= full_frame_max_dim
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
if use_single_pass:
|
| 234 |
+
model_width = round_to_stride(work_width)
|
| 235 |
+
model_height = round_to_stride(work_height)
|
| 236 |
+
scale_x = model_width / float(orig_width)
|
| 237 |
+
scale_y = model_height / float(orig_height)
|
| 238 |
+
model_img = img_raw.resize((model_width, model_height), resample_filter)
|
| 239 |
+
samples = transform(model_img).unsqueeze(0).to(device, non_blocking=True)
|
| 240 |
+
if device.type == 'cuda':
|
| 241 |
+
samples = samples.contiguous(memory_format=torch.channels_last)
|
| 242 |
+
|
| 243 |
+
with torch.inference_mode():
|
| 244 |
+
if device.type == 'cuda':
|
| 245 |
+
with torch.cuda.amp.autocast():
|
| 246 |
+
outputs = model(samples)
|
| 247 |
+
else:
|
| 248 |
+
outputs = model(samples)
|
| 249 |
+
|
| 250 |
+
scores = torch.nn.functional.softmax(outputs['pred_logits'].float(), -1)[:, :, 1][0]
|
| 251 |
+
points = outputs['pred_points'][0].float()
|
| 252 |
+
mask = scores > threshold
|
| 253 |
+
selected_points = points[mask].detach().cpu().numpy()
|
| 254 |
+
selected_scores = scores[mask].detach().cpu().numpy()
|
| 255 |
+
predictions = []
|
| 256 |
+
for point, score in zip(selected_points, selected_scores):
|
| 257 |
+
predictions.append([point[0] / scale_x, point[1] / scale_y, float(score)])
|
| 258 |
+
if progress_callback is not None:
|
| 259 |
+
progress_callback(1.0, 1, 1)
|
| 260 |
+
final_points = score_aware_merge(predictions, nms_radius, orig_width, orig_height)
|
| 261 |
+
return img_raw, len(final_points), final_points
|
| 262 |
+
|
| 263 |
+
# 2. Symmetrical Boundary Padding to eliminate corner-blindness
|
| 264 |
+
pad_border = 256
|
| 265 |
+
new_width = ((work_width + (pad_border * 2) + patch_size - 1) // patch_size) * patch_size
|
| 266 |
+
new_height = ((work_height + (pad_border * 2) + patch_size - 1) // patch_size) * patch_size
|
| 267 |
+
|
| 268 |
+
img_padded = Image.new('RGB', (new_width, new_height), (0, 0, 0))
|
| 269 |
+
img_padded.paste(img_magnified, (pad_border, pad_border))
|
| 270 |
+
|
| 271 |
+
all_predictions = []
|
| 272 |
+
patch_overlap = min(max(float(patch_overlap), 0.0), 0.75)
|
| 273 |
+
stride = max(64, int(patch_size * (1.0 - patch_overlap)))
|
| 274 |
+
patch_jobs = []
|
| 275 |
+
|
| 276 |
+
# 3. Patch Gridding Inference
|
| 277 |
+
for y in range(0, new_height - stride + 1, stride):
|
| 278 |
+
for x in range(0, new_width - stride + 1, stride):
|
| 279 |
+
if y + patch_size > new_height or x + patch_size > new_width: continue
|
| 280 |
+
patch = img_padded.crop((x, y, x + patch_size, y + patch_size))
|
| 281 |
+
patch_jobs.append((x, y, patch))
|
| 282 |
+
|
| 283 |
+
total_patches = len(patch_jobs)
|
| 284 |
+
batch_size = max(1, int(batch_size))
|
| 285 |
+
|
| 286 |
+
for start_idx in range(0, total_patches, batch_size):
|
| 287 |
+
batch_jobs = patch_jobs[start_idx:start_idx + batch_size]
|
| 288 |
+
patch_tensors = [transform(patch) for _, _, patch in batch_jobs]
|
| 289 |
+
samples = torch.stack(patch_tensors, dim=0).to(device, non_blocking=True)
|
| 290 |
+
if device.type == 'cuda':
|
| 291 |
+
samples = samples.contiguous(memory_format=torch.channels_last)
|
| 292 |
+
|
| 293 |
+
with torch.inference_mode():
|
| 294 |
+
if device.type == 'cuda':
|
| 295 |
+
with torch.cuda.amp.autocast():
|
| 296 |
+
outputs = model(samples)
|
| 297 |
+
else:
|
| 298 |
+
outputs = model(samples)
|
| 299 |
+
|
| 300 |
+
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'].float(), -1)[:, :, 1]
|
| 301 |
+
outputs_points = outputs['pred_points'].float()
|
| 302 |
+
|
| 303 |
+
for batch_idx, (x, y, _) in enumerate(batch_jobs):
|
| 304 |
+
mask = outputs_scores[batch_idx] > threshold
|
| 305 |
+
points = outputs_points[batch_idx][mask].detach().cpu().numpy()
|
| 306 |
+
scores = outputs_scores[batch_idx][mask].detach().cpu().numpy()
|
| 307 |
+
|
| 308 |
+
if len(points) > 0:
|
| 309 |
+
# Subtract the padding offset to map correctly back to original frame
|
| 310 |
+
points[:, 0] += (x - pad_border)
|
| 311 |
+
points[:, 1] += (y - pad_border)
|
| 312 |
+
points = points / float(magnification)
|
| 313 |
+
for point, score in zip(points, scores):
|
| 314 |
+
all_predictions.append([point[0], point[1], float(score)])
|
| 315 |
+
|
| 316 |
+
if progress_callback is not None and total_patches > 0:
|
| 317 |
+
done = min(start_idx + len(batch_jobs), total_patches)
|
| 318 |
+
progress_callback(done / total_patches, done, total_patches)
|
| 319 |
+
|
| 320 |
+
final_points = score_aware_merge(all_predictions, nms_radius, orig_width, orig_height)
|
| 321 |
+
|
| 322 |
+
return img_raw, len(final_points), final_points
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
def process_frame_with_oom_recovery(*args, batch_size=8, **kwargs):
|
| 326 |
+
current_batch_size = max(1, int(batch_size))
|
| 327 |
+
while current_batch_size >= 1:
|
| 328 |
+
try:
|
| 329 |
+
result = process_frame(*args, batch_size=current_batch_size, **kwargs)
|
| 330 |
+
return result, current_batch_size
|
| 331 |
+
except RuntimeError as exc:
|
| 332 |
+
if "out of memory" not in str(exc).lower():
|
| 333 |
+
raise
|
| 334 |
+
if torch.cuda.is_available():
|
| 335 |
+
torch.cuda.empty_cache()
|
| 336 |
+
if current_batch_size == 1:
|
| 337 |
+
raise
|
| 338 |
+
current_batch_size = max(1, current_batch_size // 2)
|
| 339 |
+
|
| 340 |
+
|
| 341 |
+
# Main Interface
|
| 342 |
+
if uploaded_file is not None:
|
| 343 |
+
if input_type == "Image":
|
| 344 |
+
col1, col2 = st.columns(2)
|
| 345 |
+
|
| 346 |
+
# Original Image
|
| 347 |
+
image = Image.open(uploaded_file).convert('RGB')
|
| 348 |
+
col1.subheader("Original Image")
|
| 349 |
+
col1.image(image, use_container_width=True)
|
| 350 |
+
|
| 351 |
+
# Processing
|
| 352 |
+
if st.sidebar.button("Process Image"):
|
| 353 |
+
image_progress = st.progress(0)
|
| 354 |
+
image_status = st.empty()
|
| 355 |
+
image_start = time.perf_counter()
|
| 356 |
+
|
| 357 |
+
def update_image_progress(ratio, done, total):
|
| 358 |
+
elapsed = time.perf_counter() - image_start
|
| 359 |
+
image_progress.progress(ratio)
|
| 360 |
+
image_status.text(f"Processing image patches: {done}/{total} ({ratio * 100:.1f}%) | Elapsed: {format_elapsed(elapsed)}")
|
| 361 |
+
|
| 362 |
+
(processed_img, count, points), used_batch_size = process_frame_with_oom_recovery(
|
| 363 |
+
image,
|
| 364 |
+
model,
|
| 365 |
+
device,
|
| 366 |
+
transform,
|
| 367 |
+
confidence_threshold,
|
| 368 |
+
max_dim=max_resolution,
|
| 369 |
+
magnification=magnification,
|
| 370 |
+
nms_radius=nms_radius,
|
| 371 |
+
batch_size=inference_batch_size,
|
| 372 |
+
patch_overlap=MODE_SETTINGS[processing_mode]["patch_overlap"],
|
| 373 |
+
inference_strategy=inference_strategy,
|
| 374 |
+
progress_callback=update_image_progress,
|
| 375 |
+
)
|
| 376 |
+
image_elapsed = time.perf_counter() - image_start
|
| 377 |
+
image_progress.progress(1.0)
|
| 378 |
+
image_status.text(f"Image processing complete: 100.0% | Processed time: {format_elapsed(image_elapsed)}")
|
| 379 |
+
|
| 380 |
+
img_draw = cv2.cvtColor(np.array(processed_img), cv2.COLOR_RGB2BGR)
|
| 381 |
+
for p in points:
|
| 382 |
+
cv2.circle(img_draw, (int(p[0]), int(p[1])), 2, (0, 0, 255), -1)
|
| 383 |
+
|
| 384 |
+
img_draw_rgb = cv2.cvtColor(img_draw, cv2.COLOR_BGR2RGB)
|
| 385 |
+
|
| 386 |
+
col2.subheader("Processed Analysis")
|
| 387 |
+
col2.image(img_draw_rgb, use_container_width=True)
|
| 388 |
+
st.sidebar.metric(label="Detected Crowd Count", value=count)
|
| 389 |
+
st.sidebar.metric(label="Processed Time", value=format_elapsed(image_elapsed))
|
| 390 |
+
st.sidebar.metric(label="Used Batch Size", value=used_batch_size)
|
| 391 |
+
if count < 10:
|
| 392 |
+
st.warning(
|
| 393 |
+
"Low detection count. For top-down drone/crosswalk images, use Processing Mode = Accurate. "
|
| 394 |
+
"If still low, turn off recommended preset and reduce Confidence Threshold toward 0.05."
|
| 395 |
+
)
|
| 396 |
+
|
| 397 |
+
elif input_type == "Video":
|
| 398 |
+
st.subheader("Video Processing")
|
| 399 |
+
st.caption("Upload any supported video duration. Processing progress is calculated from the video's total frame count, not a fixed 30-second limit.")
|
| 400 |
+
|
| 401 |
+
frame_skip = st.sidebar.slider(
|
| 402 |
+
"Frame Skip (Process Every Nth Frame)",
|
| 403 |
+
min_value=1,
|
| 404 |
+
max_value=30,
|
| 405 |
+
value=MODE_SETTINGS[processing_mode]["frame_skip"],
|
| 406 |
+
step=1,
|
| 407 |
+
help="Higher values process video faster by analyzing fewer frames. Processing Mode sets a good starting value."
|
| 408 |
+
)
|
| 409 |
+
|
| 410 |
+
if st.sidebar.button("Process Video"):
|
| 411 |
+
# Save temp file because cv2.VideoCapture requires a file path
|
| 412 |
+
tfile = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
|
| 413 |
+
tfile.write(uploaded_file.getvalue())
|
| 414 |
+
tfile.close() # Fixes Windows [WinError 32] lock
|
| 415 |
+
|
| 416 |
+
cap = cv2.VideoCapture(tfile.name)
|
| 417 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
| 418 |
+
if fps == 0:
|
| 419 |
+
fps = 30
|
| 420 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
| 421 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
| 422 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 423 |
+
if width <= 0 or height <= 0:
|
| 424 |
+
cap.release()
|
| 425 |
+
os.remove(tfile.name)
|
| 426 |
+
st.error("Could not read video dimensions. Please upload a valid video file.")
|
| 427 |
+
st.stop()
|
| 428 |
+
video_duration = total_frames / float(fps) if total_frames > 0 else 0
|
| 429 |
+
st.info(
|
| 430 |
+
f"Video loaded: {width}x{height}, {fps} FPS, "
|
| 431 |
+
f"{total_frames if total_frames > 0 else 'unknown'} frames"
|
| 432 |
+
f"{f', duration {format_elapsed(video_duration)}' if video_duration > 0 else ''}."
|
| 433 |
+
)
|
| 434 |
+
|
| 435 |
+
# Ensure multiples of 2 for codecs
|
| 436 |
+
new_width = width if width % 2 == 0 else width - 1
|
| 437 |
+
new_height = height if height % 2 == 0 else height - 1
|
| 438 |
+
|
| 439 |
+
tfile_out = tempfile.NamedTemporaryFile(delete=False, suffix='.mp4')
|
| 440 |
+
tfile_out.close() # Fixes Windows [WinError 32] lock
|
| 441 |
+
# mp4v is a safe fallback codec for cv2.VideoWriter
|
| 442 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
| 443 |
+
out = cv2.VideoWriter(tfile_out.name, fourcc, fps, (new_width, new_height))
|
| 444 |
+
|
| 445 |
+
progress_bar = st.progress(0)
|
| 446 |
+
status_text = st.empty()
|
| 447 |
+
elapsed_text = st.empty()
|
| 448 |
+
video_start = time.perf_counter()
|
| 449 |
+
|
| 450 |
+
peak_count = 0
|
| 451 |
+
frames_processed = 0
|
| 452 |
+
frames_analyzed = 0
|
| 453 |
+
crowd_timeline = []
|
| 454 |
+
analyzed_counts = []
|
| 455 |
+
live_count = 0
|
| 456 |
+
used_batch_size = inference_batch_size
|
| 457 |
+
last_out_frame = np.zeros((new_height, new_width, 3), dtype=np.uint8)
|
| 458 |
+
|
| 459 |
+
# --- Analytics Inits ---
|
| 460 |
+
tracker = Tracker(max_distance=tracker_max_distance, max_age=5)
|
| 461 |
+
report = ReportGenerator()
|
| 462 |
+
total_unique = 0
|
| 463 |
+
|
| 464 |
+
# Helper for consistent coloring based on unique ID
|
| 465 |
+
def get_color(track_id):
|
| 466 |
+
np.random.seed(track_id)
|
| 467 |
+
return tuple(int(x) for x in np.random.randint(0, 255, 3))
|
| 468 |
+
|
| 469 |
+
while cap.isOpened():
|
| 470 |
+
ret, frame = cap.read()
|
| 471 |
+
if not ret:
|
| 472 |
+
break
|
| 473 |
+
|
| 474 |
+
# Check for frame skip optimization
|
| 475 |
+
if frames_processed % frame_skip == 0:
|
| 476 |
+
# Convert cv2 frame (BGR) to PIL RGB
|
| 477 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
| 478 |
+
pil_img = Image.fromarray(frame_rgb)
|
| 479 |
+
|
| 480 |
+
# Process single frame safely
|
| 481 |
+
(img_out, live_count, raw_points), used_batch_size = process_frame_with_oom_recovery(
|
| 482 |
+
pil_img,
|
| 483 |
+
model,
|
| 484 |
+
device,
|
| 485 |
+
transform,
|
| 486 |
+
confidence_threshold,
|
| 487 |
+
max_dim=max_resolution,
|
| 488 |
+
magnification=magnification,
|
| 489 |
+
nms_radius=nms_radius,
|
| 490 |
+
batch_size=inference_batch_size,
|
| 491 |
+
patch_overlap=MODE_SETTINGS[processing_mode]["patch_overlap"],
|
| 492 |
+
inference_strategy=inference_strategy,
|
| 493 |
+
)
|
| 494 |
+
frames_analyzed += 1
|
| 495 |
+
analyzed_counts.append(live_count)
|
| 496 |
+
|
| 497 |
+
img_out = img_out.resize((new_width, new_height))
|
| 498 |
+
img_out_bgr = cv2.cvtColor(np.array(img_out), cv2.COLOR_RGB2BGR)
|
| 499 |
+
|
| 500 |
+
# Track
|
| 501 |
+
active_tracks, cumulative_unique, anomaly = tracker.update(img_out_bgr, raw_points)
|
| 502 |
+
total_unique = cumulative_unique
|
| 503 |
+
|
| 504 |
+
# Draw faintly all raw points
|
| 505 |
+
for p in raw_points:
|
| 506 |
+
cv2.circle(img_out_bgr, (int(p[0]), int(p[1])), 2, (0, 0, 100), -1)
|
| 507 |
+
|
| 508 |
+
# Draw strongly unique IDs
|
| 509 |
+
for t in active_tracks:
|
| 510 |
+
color = get_color(t.id)
|
| 511 |
+
cv2.circle(img_out_bgr, (int(t.pt[0]), int(t.pt[1])), 4, color, -1)
|
| 512 |
+
if anomaly:
|
| 513 |
+
st.warning("High motion anomaly detected in the current processed segment.")
|
| 514 |
+
|
| 515 |
+
last_out_frame = img_out_bgr
|
| 516 |
+
|
| 517 |
+
if live_count > peak_count:
|
| 518 |
+
peak_count = live_count
|
| 519 |
+
|
| 520 |
+
current_time_sec = frames_processed / float(fps)
|
| 521 |
+
report.add_frame_data(frames_processed, current_time_sec, live_count, total_unique)
|
| 522 |
+
else:
|
| 523 |
+
# For skipped frames, duplicate the last drawn frame for continuous smooth playback
|
| 524 |
+
pass
|
| 525 |
+
|
| 526 |
+
crowd_timeline.append(live_count)
|
| 527 |
+
out.write(last_out_frame)
|
| 528 |
+
|
| 529 |
+
frames_processed += 1
|
| 530 |
+
if total_frames > 0:
|
| 531 |
+
encoded_status = min(frames_processed / total_frames, 1.0)
|
| 532 |
+
progress_bar.progress(encoded_status)
|
| 533 |
+
elapsed = time.perf_counter() - video_start
|
| 534 |
+
status_text.text(f"Processing video: {encoded_status * 100:.1f}% | Frame {frames_processed}/{total_frames} | Unique Targets: {total_unique}")
|
| 535 |
+
elapsed_text.text(f"Elapsed processing time: {format_elapsed(elapsed)}")
|
| 536 |
+
else:
|
| 537 |
+
elapsed = time.perf_counter() - video_start
|
| 538 |
+
status_text.text(f"Processing frame {frames_processed}... (Unique Targets: {total_unique})")
|
| 539 |
+
elapsed_text.text(f"Elapsed processing time: {format_elapsed(elapsed)}")
|
| 540 |
+
|
| 541 |
+
cap.release()
|
| 542 |
+
out.release()
|
| 543 |
+
os.remove(tfile.name)
|
| 544 |
+
|
| 545 |
+
video_elapsed = time.perf_counter() - video_start
|
| 546 |
+
progress_bar.progress(1.0)
|
| 547 |
+
status_text.text(f"Processing Complete: 100.0% | Finalizing video output...")
|
| 548 |
+
elapsed_text.text(f"Total processed time: {format_elapsed(video_elapsed)}")
|
| 549 |
+
|
| 550 |
+
# Try to encode the resulting MP4 in H264 for web browser compatibility using ffmpeg (if available)
|
| 551 |
+
web_friendly_mp4 = tfile_out.name.replace('.mp4', '_web.mp4')
|
| 552 |
+
result_video_path = tfile_out.name
|
| 553 |
+
|
| 554 |
+
try:
|
| 555 |
+
# Suppress output to avoid clutter, run synchronously
|
| 556 |
+
exit_code = os.system(f'ffmpeg -y -i "{tfile_out.name}" -vcodec libx264 -f mp4 "{web_friendly_mp4}" >nul 2>&1')
|
| 557 |
+
if exit_code == 0 and os.path.exists(web_friendly_mp4):
|
| 558 |
+
result_video_path = web_friendly_mp4
|
| 559 |
+
except Exception:
|
| 560 |
+
pass
|
| 561 |
+
|
| 562 |
+
st.success("Video Analytics Compilation Completed.")
|
| 563 |
+
|
| 564 |
+
# Subsystem Alerts
|
| 565 |
+
render_alert(total_unique, venue_capacity)
|
| 566 |
+
|
| 567 |
+
# Telemetry Metrics
|
| 568 |
+
m1, m2 = st.columns(2)
|
| 569 |
+
m1.metric(label="Peak Current Frame Count", value=peak_count)
|
| 570 |
+
m2.metric(label="Total Unique Individuals Tracked", value=total_unique)
|
| 571 |
+
st.metric(label="Total Processed Time", value=format_elapsed(video_elapsed))
|
| 572 |
+
ci_low, ci_high = confidence_interval(analyzed_counts)
|
| 573 |
+
s1, s2, s3 = st.columns(3)
|
| 574 |
+
s1.metric(label="Effective FPS", value=f"{frames_processed / video_elapsed:.2f}" if video_elapsed > 0 else "0.00")
|
| 575 |
+
s2.metric(label="Analyzed FPS", value=f"{frames_analyzed / video_elapsed:.2f}" if video_elapsed > 0 else "0.00")
|
| 576 |
+
s3.metric(label="95% Count CI", value=f"{ci_low:.1f} - {ci_high:.1f}")
|
| 577 |
+
st.caption(f"CUDA OOM recovery used batch size: {used_batch_size}")
|
| 578 |
+
if peak_count < 10:
|
| 579 |
+
st.warning(
|
| 580 |
+
"Low detection count. For top-down drone/crosswalk videos, use Processing Mode = Accurate. "
|
| 581 |
+
"If still low, turn off recommended preset and reduce Confidence Threshold toward 0.05."
|
| 582 |
+
)
|
| 583 |
+
|
| 584 |
+
st.subheader("Population History Dynamics")
|
| 585 |
+
st.line_chart(crowd_timeline)
|
| 586 |
+
|
| 587 |
+
st.subheader("Data Exports")
|
| 588 |
+
e1, e2 = st.columns(2)
|
| 589 |
+
if total_unique > 0:
|
| 590 |
+
e1.download_button(label="Download CSV Report", data=report.get_csv(), file_name="drone_report.csv", mime="text/csv")
|
| 591 |
+
e2.download_button(label="Download JSON Report", data=report.get_json(), file_name="drone_report.json", mime="application/json")
|
| 592 |
+
else:
|
| 593 |
+
st.info("No crowd targets detected to export.")
|
| 594 |
+
|
| 595 |
+
st.subheader("Simulated Telemetry Video Pipeline")
|
| 596 |
+
try:
|
| 597 |
+
video_bytes = open(result_video_path, 'rb').read()
|
| 598 |
+
st.video(video_bytes)
|
| 599 |
+
st.download_button(label="Download Analytics Video", data=video_bytes, file_name="analytics_overlay_output.mp4", mime="video/mp4")
|
| 600 |
+
except Exception as e:
|
| 601 |
+
st.error("Could not load the generated video for playback in Streamlit, but you can download it anyway.")
|
| 602 |
+
st.download_button(label="Download Analytics Video", data=open(tfile_out.name, 'rb').read(), file_name="analytics_overlay_output.mp4", mime="video/mp4")
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
else:
|
| 606 |
+
st.info("Please upload an image or video from the sidebar to begin.")
|
app_enhancements.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import math
|
| 3 |
+
import os
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
import torch
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
PRESETS = {
|
| 10 |
+
"Fast": {
|
| 11 |
+
"magnification": 1.0,
|
| 12 |
+
"confidence_threshold": 0.55,
|
| 13 |
+
"nms_radius": 10.0,
|
| 14 |
+
"tracker_max_distance": 65.0,
|
| 15 |
+
"inference_batch_size": 16,
|
| 16 |
+
"frame_skip": 5,
|
| 17 |
+
"patch_overlap": 0.0,
|
| 18 |
+
},
|
| 19 |
+
"Balanced": {
|
| 20 |
+
"magnification": 1.5,
|
| 21 |
+
"confidence_threshold": 0.5,
|
| 22 |
+
"nms_radius": 8.0,
|
| 23 |
+
"tracker_max_distance": 50.0,
|
| 24 |
+
"inference_batch_size": 8,
|
| 25 |
+
"frame_skip": 2,
|
| 26 |
+
"patch_overlap": 0.25,
|
| 27 |
+
},
|
| 28 |
+
"Accurate": {
|
| 29 |
+
"magnification": 2.0,
|
| 30 |
+
"confidence_threshold": 0.45,
|
| 31 |
+
"nms_radius": 6.0,
|
| 32 |
+
"tracker_max_distance": 40.0,
|
| 33 |
+
"inference_batch_size": 4,
|
| 34 |
+
"frame_skip": 1,
|
| 35 |
+
"patch_overlap": 0.5,
|
| 36 |
+
},
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def load_config(config_path, defaults):
|
| 41 |
+
if os.path.exists(config_path):
|
| 42 |
+
with open(config_path, "r", encoding="utf-8") as f:
|
| 43 |
+
return {**defaults, **json.load(f)}
|
| 44 |
+
return defaults
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def save_config(config_path, config):
|
| 48 |
+
with open(config_path, "w", encoding="utf-8") as f:
|
| 49 |
+
json.dump(config, f, indent=2)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def run_with_oom_recovery(process_fn, *args, batch_size=8, min_batch_size=1, **kwargs):
|
| 53 |
+
current_batch = max(min_batch_size, int(batch_size))
|
| 54 |
+
while current_batch >= min_batch_size:
|
| 55 |
+
try:
|
| 56 |
+
return process_fn(*args, batch_size=current_batch, **kwargs), current_batch
|
| 57 |
+
except RuntimeError as exc:
|
| 58 |
+
if "out of memory" not in str(exc).lower():
|
| 59 |
+
raise
|
| 60 |
+
if torch.cuda.is_available():
|
| 61 |
+
torch.cuda.empty_cache()
|
| 62 |
+
if current_batch == min_batch_size:
|
| 63 |
+
raise
|
| 64 |
+
current_batch = max(min_batch_size, current_batch // 2)
|
| 65 |
+
raise RuntimeError("CUDA OOM recovery failed.")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def confidence_interval(values, z=1.96):
|
| 69 |
+
if not values:
|
| 70 |
+
return 0.0, 0.0
|
| 71 |
+
if len(values) == 1:
|
| 72 |
+
return float(values[0]), float(values[0])
|
| 73 |
+
mean = sum(values) / len(values)
|
| 74 |
+
variance = sum((x - mean) ** 2 for x in values) / (len(values) - 1)
|
| 75 |
+
margin = z * math.sqrt(variance) / math.sqrt(len(values))
|
| 76 |
+
return mean - margin, mean + margin
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def result_summary(start_time, frames_read, frames_analyzed, counts):
|
| 80 |
+
elapsed = max(time.perf_counter() - start_time, 1e-9)
|
| 81 |
+
ci_low, ci_high = confidence_interval(counts)
|
| 82 |
+
return {
|
| 83 |
+
"elapsed_sec": round(elapsed, 3),
|
| 84 |
+
"frames_read": frames_read,
|
| 85 |
+
"frames_analyzed": frames_analyzed,
|
| 86 |
+
"effective_fps": round(frames_read / elapsed, 3),
|
| 87 |
+
"analysis_fps": round(frames_analyzed / elapsed, 3),
|
| 88 |
+
"avg_count": round(sum(counts) / len(counts), 3) if counts else 0,
|
| 89 |
+
"peak_count": max(counts) if counts else 0,
|
| 90 |
+
"count_95ci_low": round(ci_low, 3),
|
| 91 |
+
"count_95ci_high": round(ci_high, 3),
|
| 92 |
+
}
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
APP_PASTE_SECTION = r'''
|
| 96 |
+
CONFIG_PATH = os.path.join(BASE_DIR, "civic_pulse_config.json")
|
| 97 |
+
PRESETS = {
|
| 98 |
+
"Fast": {"magnification": 1.0, "confidence_threshold": 0.55, "nms_radius": 10.0, "tracker_max_distance": 65.0, "inference_batch_size": 16, "frame_skip": 5, "patch_overlap": 0.0},
|
| 99 |
+
"Balanced": {"magnification": 1.5, "confidence_threshold": 0.5, "nms_radius": 8.0, "tracker_max_distance": 50.0, "inference_batch_size": 8, "frame_skip": 2, "patch_overlap": 0.25},
|
| 100 |
+
"Accurate": {"magnification": 2.0, "confidence_threshold": 0.45, "nms_radius": 6.0, "tracker_max_distance": 40.0, "inference_batch_size": 4, "frame_skip": 1, "patch_overlap": 0.5},
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
def process_with_oom_recovery(*args, batch_size, **kwargs):
|
| 104 |
+
while batch_size >= 1:
|
| 105 |
+
try:
|
| 106 |
+
return process_frame(*args, batch_size=batch_size, **kwargs), batch_size
|
| 107 |
+
except RuntimeError as exc:
|
| 108 |
+
if "out of memory" not in str(exc).lower():
|
| 109 |
+
raise
|
| 110 |
+
if torch.cuda.is_available():
|
| 111 |
+
torch.cuda.empty_cache()
|
| 112 |
+
if batch_size == 1:
|
| 113 |
+
raise
|
| 114 |
+
batch_size = max(1, batch_size // 2)
|
| 115 |
+
'''
|
benchmark.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import csv
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import time
|
| 6 |
+
from itertools import product
|
| 7 |
+
|
| 8 |
+
import cv2
|
| 9 |
+
import numpy as np
|
| 10 |
+
import torch
|
| 11 |
+
import torchvision.transforms as standard_transforms
|
| 12 |
+
from PIL import Image
|
| 13 |
+
from scipy.spatial import cKDTree
|
| 14 |
+
|
| 15 |
+
from models import build_model
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Args:
|
| 19 |
+
backbone = "vgg16_bn"
|
| 20 |
+
row = 2
|
| 21 |
+
line = 2
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def load_model(weight_path):
|
| 25 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 26 |
+
if device.type == "cuda":
|
| 27 |
+
torch.backends.cudnn.benchmark = True
|
| 28 |
+
model = build_model(Args()).to(device).eval()
|
| 29 |
+
if os.path.exists(weight_path):
|
| 30 |
+
checkpoint = torch.load(weight_path, map_location=device)
|
| 31 |
+
model.load_state_dict(checkpoint["model"])
|
| 32 |
+
transform = standard_transforms.Compose([
|
| 33 |
+
standard_transforms.ToTensor(),
|
| 34 |
+
standard_transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 35 |
+
std=[0.229, 0.224, 0.225]),
|
| 36 |
+
])
|
| 37 |
+
return model, device, transform
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def merge_points(points, radius=8.0):
|
| 41 |
+
if not points:
|
| 42 |
+
return []
|
| 43 |
+
pts = np.array(points, dtype=np.float32)
|
| 44 |
+
tree = cKDTree(pts)
|
| 45 |
+
suppressed = set()
|
| 46 |
+
for i, j in tree.query_pairs(r=radius):
|
| 47 |
+
if i not in suppressed and j not in suppressed:
|
| 48 |
+
suppressed.add(j)
|
| 49 |
+
return [pts[i].tolist() for i in range(len(pts)) if i not in suppressed]
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
def infer_frame(image, model, device, transform, confidence, magnification, batch_size, patch_overlap):
|
| 53 |
+
orig_w, orig_h = image.size
|
| 54 |
+
patch_size = 512
|
| 55 |
+
pad = 256
|
| 56 |
+
work_w, work_h = int(orig_w * magnification), int(orig_h * magnification)
|
| 57 |
+
scale = min(1.0, 3840 / float(max(work_w, work_h)))
|
| 58 |
+
work_w, work_h = int(work_w * scale), int(work_h * scale)
|
| 59 |
+
magnification = work_w / float(orig_w)
|
| 60 |
+
resample_filter = getattr(Image, "Resampling", Image).LANCZOS if hasattr(Image, "Resampling") else getattr(Image, "ANTIALIAS", 1)
|
| 61 |
+
image = image.resize((work_w, work_h), resample_filter)
|
| 62 |
+
padded_w = ((work_w + pad * 2 + patch_size - 1) // patch_size) * patch_size
|
| 63 |
+
padded_h = ((work_h + pad * 2 + patch_size - 1) // patch_size) * patch_size
|
| 64 |
+
padded = Image.new("RGB", (padded_w, padded_h), (0, 0, 0))
|
| 65 |
+
padded.paste(image, (pad, pad))
|
| 66 |
+
stride = max(64, int(patch_size * (1.0 - patch_overlap)))
|
| 67 |
+
jobs = []
|
| 68 |
+
for y in range(0, padded_h - stride + 1, stride):
|
| 69 |
+
for x in range(0, padded_w - stride + 1, stride):
|
| 70 |
+
if x + patch_size <= padded_w and y + patch_size <= padded_h:
|
| 71 |
+
jobs.append((x, y, padded.crop((x, y, x + patch_size, y + patch_size))))
|
| 72 |
+
|
| 73 |
+
all_points = []
|
| 74 |
+
for start in range(0, len(jobs), batch_size):
|
| 75 |
+
batch = jobs[start:start + batch_size]
|
| 76 |
+
samples = torch.stack([transform(patch) for _, _, patch in batch]).to(device)
|
| 77 |
+
with torch.inference_mode():
|
| 78 |
+
if device.type == "cuda":
|
| 79 |
+
with torch.cuda.amp.autocast():
|
| 80 |
+
out = model(samples)
|
| 81 |
+
else:
|
| 82 |
+
out = model(samples)
|
| 83 |
+
scores = torch.nn.functional.softmax(out["pred_logits"].float(), -1)[:, :, 1]
|
| 84 |
+
points = out["pred_points"].float()
|
| 85 |
+
for idx, (x, y, _) in enumerate(batch):
|
| 86 |
+
selected = points[idx][scores[idx] > confidence].detach().cpu().numpy()
|
| 87 |
+
if len(selected):
|
| 88 |
+
selected[:, 0] += x - pad
|
| 89 |
+
selected[:, 1] += y - pad
|
| 90 |
+
selected /= float(magnification)
|
| 91 |
+
all_points.extend([
|
| 92 |
+
p.tolist() for p in selected
|
| 93 |
+
if 0 <= p[0] < orig_w and 0 <= p[1] < orig_h
|
| 94 |
+
])
|
| 95 |
+
return merge_points(all_points)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def run_config(video, model, device, transform, cfg, max_frames):
|
| 99 |
+
cap = cv2.VideoCapture(video)
|
| 100 |
+
total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
| 101 |
+
frames_read = 0
|
| 102 |
+
frames_analyzed = 0
|
| 103 |
+
counts = []
|
| 104 |
+
start = time.perf_counter()
|
| 105 |
+
while cap.isOpened():
|
| 106 |
+
ret, frame = cap.read()
|
| 107 |
+
if not ret or (max_frames and frames_read >= max_frames):
|
| 108 |
+
break
|
| 109 |
+
if frames_read % cfg["frame_skip"] == 0:
|
| 110 |
+
image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
| 111 |
+
points = infer_frame(image, model, device, transform, cfg["confidence"], cfg["magnification"], cfg["batch_size"], cfg["patch_overlap"])
|
| 112 |
+
counts.append(len(points))
|
| 113 |
+
frames_analyzed += 1
|
| 114 |
+
frames_read += 1
|
| 115 |
+
cap.release()
|
| 116 |
+
elapsed = time.perf_counter() - start
|
| 117 |
+
return {
|
| 118 |
+
**cfg,
|
| 119 |
+
"video_frames": total_frames,
|
| 120 |
+
"frames_read": frames_read,
|
| 121 |
+
"frames_analyzed": frames_analyzed,
|
| 122 |
+
"elapsed_sec": round(elapsed, 4),
|
| 123 |
+
"effective_fps": round(frames_read / elapsed, 4) if elapsed else 0,
|
| 124 |
+
"analysis_fps": round(frames_analyzed / elapsed, 4) if elapsed else 0,
|
| 125 |
+
"avg_count": round(float(np.mean(counts)), 4) if counts else 0,
|
| 126 |
+
"max_count": int(max(counts)) if counts else 0,
|
| 127 |
+
"std_count": round(float(np.std(counts)), 4) if counts else 0,
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def recommendations(rows):
|
| 132 |
+
return {
|
| 133 |
+
"fast": max(rows, key=lambda row: row["effective_fps"]),
|
| 134 |
+
"balanced": min(rows, key=lambda row: (row["std_count"], -row["effective_fps"])),
|
| 135 |
+
"accurate": max(rows, key=lambda row: (row["patch_overlap"], row["magnification"], -row["frame_skip"])),
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def main():
|
| 140 |
+
parser = argparse.ArgumentParser()
|
| 141 |
+
parser.add_argument("--video", required=True)
|
| 142 |
+
parser.add_argument("--weights", default=os.path.join("weights", "SHTechA.pth"))
|
| 143 |
+
parser.add_argument("--output_dir", default="benchmark_results")
|
| 144 |
+
parser.add_argument("--max_frames", type=int, default=120)
|
| 145 |
+
args = parser.parse_args()
|
| 146 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 147 |
+
model, device, transform = load_model(args.weights)
|
| 148 |
+
configs = []
|
| 149 |
+
for frame_skip, mag, batch, conf in product([1, 2, 5], [1.0, 1.5, 2.0], [4, 8, 16], [0.45, 0.5, 0.55]):
|
| 150 |
+
configs.append({
|
| 151 |
+
"frame_skip": frame_skip,
|
| 152 |
+
"magnification": mag,
|
| 153 |
+
"batch_size": batch,
|
| 154 |
+
"confidence": conf,
|
| 155 |
+
"patch_overlap": 0.5 if frame_skip == 1 else 0.25 if frame_skip == 2 else 0.0,
|
| 156 |
+
})
|
| 157 |
+
rows = [run_config(args.video, model, device, transform, cfg, args.max_frames) for cfg in configs]
|
| 158 |
+
recs = recommendations(rows)
|
| 159 |
+
csv_path = os.path.join(args.output_dir, "benchmark_results.csv")
|
| 160 |
+
json_path = os.path.join(args.output_dir, "benchmark_results.json")
|
| 161 |
+
with open(csv_path, "w", newline="", encoding="utf-8") as f:
|
| 162 |
+
writer = csv.DictWriter(f, fieldnames=list(rows[0].keys()))
|
| 163 |
+
writer.writeheader()
|
| 164 |
+
writer.writerows(rows)
|
| 165 |
+
with open(json_path, "w", encoding="utf-8") as f:
|
| 166 |
+
json.dump({"results": rows, "recommendations": recs}, f, indent=2)
|
| 167 |
+
print(json.dumps({"csv": csv_path, "json": json_path, "recommendations": recs}, indent=2))
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
if __name__ == "__main__":
|
| 171 |
+
main()
|
crowd_datasets/SHHA/SHHA.py
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import random
|
| 3 |
+
import torch
|
| 4 |
+
import numpy as np
|
| 5 |
+
from torch.utils.data import Dataset
|
| 6 |
+
from PIL import Image
|
| 7 |
+
import cv2
|
| 8 |
+
import glob
|
| 9 |
+
import scipy.io as io
|
| 10 |
+
import re
|
| 11 |
+
|
| 12 |
+
class SHHA(Dataset):
|
| 13 |
+
def __init__(
|
| 14 |
+
self,
|
| 15 |
+
data_root,
|
| 16 |
+
transform=None,
|
| 17 |
+
train=False,
|
| 18 |
+
patch=False,
|
| 19 |
+
flip=False,
|
| 20 |
+
train_list="shanghai_tech_part_a_train.list",
|
| 21 |
+
eval_list="shanghai_tech_part_a_test.list",
|
| 22 |
+
):
|
| 23 |
+
self.root_path = data_root
|
| 24 |
+
self.train_lists = train_list
|
| 25 |
+
self.eval_list = eval_list
|
| 26 |
+
# there may exist multiple list files
|
| 27 |
+
self.img_list_file = self.train_lists.split(',')
|
| 28 |
+
if train:
|
| 29 |
+
self.img_list_file = self.train_lists.split(',')
|
| 30 |
+
else:
|
| 31 |
+
self.img_list_file = self.eval_list.split(',')
|
| 32 |
+
|
| 33 |
+
self.img_map = {}
|
| 34 |
+
self.img_list = []
|
| 35 |
+
# loads the image/gt pairs
|
| 36 |
+
for _, train_list in enumerate(self.img_list_file):
|
| 37 |
+
train_list = train_list.strip()
|
| 38 |
+
with open(os.path.join(self.root_path, train_list)) as fin:
|
| 39 |
+
for line in fin:
|
| 40 |
+
if len(line) < 2:
|
| 41 |
+
continue
|
| 42 |
+
line = line.strip()
|
| 43 |
+
if "\t" in line:
|
| 44 |
+
img_path, gt_path = line.split("\t", 1)
|
| 45 |
+
else:
|
| 46 |
+
line = line.split()
|
| 47 |
+
if len(line) < 2:
|
| 48 |
+
continue
|
| 49 |
+
img_path = line[0]
|
| 50 |
+
gt_path = line[1]
|
| 51 |
+
img_path = img_path.strip()
|
| 52 |
+
gt_path = gt_path.strip()
|
| 53 |
+
if not os.path.isabs(img_path):
|
| 54 |
+
img_path = os.path.join(self.root_path, img_path)
|
| 55 |
+
if not os.path.isabs(gt_path):
|
| 56 |
+
gt_path = os.path.join(self.root_path, gt_path)
|
| 57 |
+
self.img_map[img_path] = gt_path
|
| 58 |
+
self.img_list = sorted(list(self.img_map.keys()))
|
| 59 |
+
# number of samples
|
| 60 |
+
self.nSamples = len(self.img_list)
|
| 61 |
+
|
| 62 |
+
self.transform = transform
|
| 63 |
+
self.train = train
|
| 64 |
+
self.patch = patch
|
| 65 |
+
self.flip = flip
|
| 66 |
+
|
| 67 |
+
def __len__(self):
|
| 68 |
+
return self.nSamples
|
| 69 |
+
|
| 70 |
+
def __getitem__(self, index):
|
| 71 |
+
assert index <= len(self), 'index range error'
|
| 72 |
+
|
| 73 |
+
img_path = self.img_list[index]
|
| 74 |
+
gt_path = self.img_map[img_path]
|
| 75 |
+
# load image and ground truth
|
| 76 |
+
img, point = load_data((img_path, gt_path), self.train)
|
| 77 |
+
# applu augumentation
|
| 78 |
+
if self.transform is not None:
|
| 79 |
+
img = self.transform(img)
|
| 80 |
+
|
| 81 |
+
if self.train:
|
| 82 |
+
# data augmentation -> random scale
|
| 83 |
+
scale_range = [0.7, 1.3]
|
| 84 |
+
min_size = min(img.shape[1:])
|
| 85 |
+
scale = random.uniform(*scale_range)
|
| 86 |
+
# scale the image and points
|
| 87 |
+
if scale * min_size > 128:
|
| 88 |
+
img = torch.nn.functional.upsample_bilinear(img.unsqueeze(0), scale_factor=scale).squeeze(0)
|
| 89 |
+
point *= scale
|
| 90 |
+
# random crop augumentaiton
|
| 91 |
+
if self.train and self.patch:
|
| 92 |
+
img, point = random_crop(img, point)
|
| 93 |
+
for i, _ in enumerate(point):
|
| 94 |
+
point[i] = torch.Tensor(point[i])
|
| 95 |
+
# random flipping
|
| 96 |
+
if random.random() > 0.5 and self.train and self.flip:
|
| 97 |
+
# random flip
|
| 98 |
+
img = torch.Tensor(img[:, :, :, ::-1].copy())
|
| 99 |
+
for i, _ in enumerate(point):
|
| 100 |
+
point[i][:, 0] = 128 - point[i][:, 0]
|
| 101 |
+
|
| 102 |
+
if not self.train:
|
| 103 |
+
point = [point]
|
| 104 |
+
|
| 105 |
+
img = torch.Tensor(img)
|
| 106 |
+
# pack up related infos
|
| 107 |
+
target = [{} for i in range(len(point))]
|
| 108 |
+
for i, _ in enumerate(point):
|
| 109 |
+
target[i]['point'] = torch.Tensor(point[i])
|
| 110 |
+
image_stem = os.path.splitext(os.path.basename(img_path))[0]
|
| 111 |
+
digits = re.findall(r'\d+', image_stem)
|
| 112 |
+
image_id = int(digits[-1]) if digits else index
|
| 113 |
+
image_id = torch.Tensor([image_id]).long()
|
| 114 |
+
target[i]['image_id'] = image_id
|
| 115 |
+
target[i]['labels'] = torch.ones([point[i].shape[0]]).long()
|
| 116 |
+
|
| 117 |
+
return img, target
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def load_data(img_gt_path, train):
|
| 121 |
+
img_path, gt_path = img_gt_path
|
| 122 |
+
# load the images
|
| 123 |
+
img = cv2.imread(img_path)
|
| 124 |
+
img = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
|
| 125 |
+
# load ground truth points
|
| 126 |
+
points = []
|
| 127 |
+
with open(gt_path) as f_label:
|
| 128 |
+
for line in f_label:
|
| 129 |
+
x = float(line.strip().split(' ')[0])
|
| 130 |
+
y = float(line.strip().split(' ')[1])
|
| 131 |
+
points.append([x, y])
|
| 132 |
+
|
| 133 |
+
return img, np.array(points)
|
| 134 |
+
|
| 135 |
+
# random crop augumentation
|
| 136 |
+
def random_crop(img, den, num_patch=4):
|
| 137 |
+
half_h = 128
|
| 138 |
+
half_w = 128
|
| 139 |
+
result_img = np.zeros([num_patch, img.shape[0], half_h, half_w])
|
| 140 |
+
result_den = []
|
| 141 |
+
# crop num_patch for each image
|
| 142 |
+
for i in range(num_patch):
|
| 143 |
+
start_h = random.randint(0, img.size(1) - half_h)
|
| 144 |
+
start_w = random.randint(0, img.size(2) - half_w)
|
| 145 |
+
end_h = start_h + half_h
|
| 146 |
+
end_w = start_w + half_w
|
| 147 |
+
# copy the cropped rect
|
| 148 |
+
result_img[i] = img[:, start_h:end_h, start_w:end_w]
|
| 149 |
+
# copy the cropped points
|
| 150 |
+
idx = (den[:, 0] >= start_w) & (den[:, 0] <= end_w) & (den[:, 1] >= start_h) & (den[:, 1] <= end_h)
|
| 151 |
+
# shift the corrdinates
|
| 152 |
+
record_den = den[idx]
|
| 153 |
+
record_den[:, 0] -= start_w
|
| 154 |
+
record_den[:, 1] -= start_h
|
| 155 |
+
|
| 156 |
+
result_den.append(record_den)
|
| 157 |
+
|
| 158 |
+
return result_img, result_den
|
crowd_datasets/SHHA/__init__.py
ADDED
|
File without changes
|
crowd_datasets/SHHA/loading_data.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torchvision.transforms as standard_transforms
|
| 2 |
+
from .SHHA import SHHA
|
| 3 |
+
|
| 4 |
+
# DeNormalize used to get original images
|
| 5 |
+
class DeNormalize(object):
|
| 6 |
+
def __init__(self, mean, std):
|
| 7 |
+
self.mean = mean
|
| 8 |
+
self.std = std
|
| 9 |
+
|
| 10 |
+
def __call__(self, tensor):
|
| 11 |
+
for t, m, s in zip(tensor, self.mean, self.std):
|
| 12 |
+
t.mul_(s).add_(m)
|
| 13 |
+
return tensor
|
| 14 |
+
|
| 15 |
+
def loading_data(data_root):
|
| 16 |
+
# the pre-proccssing transform
|
| 17 |
+
transform = standard_transforms.Compose([
|
| 18 |
+
standard_transforms.ToTensor(),
|
| 19 |
+
standard_transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 20 |
+
std=[0.229, 0.224, 0.225]),
|
| 21 |
+
])
|
| 22 |
+
# create the training dataset
|
| 23 |
+
train_set = SHHA(data_root, train=True, transform=transform, patch=True, flip=True)
|
| 24 |
+
# create the validation dataset
|
| 25 |
+
val_set = SHHA(data_root, train=False, transform=transform)
|
| 26 |
+
|
| 27 |
+
return train_set, val_set
|
crowd_datasets/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# build dataset according to given 'dataset_file'
|
| 2 |
+
def build_dataset(args):
|
| 3 |
+
dataset_file = args.dataset_file.upper()
|
| 4 |
+
if dataset_file == 'SHHA':
|
| 5 |
+
from crowd_datasets.SHHA.loading_data import loading_data
|
| 6 |
+
return loading_data
|
| 7 |
+
|
| 8 |
+
raise ValueError(f"Unsupported dataset_file: {args.dataset_file}. Use SHHA.")
|
database.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from sqlmodel import SQLModel, Field, create_engine, Session
|
| 2 |
+
|
| 3 |
+
class FlightReport(SQLModel, table=True):
|
| 4 |
+
id: int | None = Field(default=None, primary_key=True)
|
| 5 |
+
filename: str
|
| 6 |
+
max_capacity_breached: bool
|
| 7 |
+
peak_crowd_count: int
|
| 8 |
+
duration_frames: int
|
| 9 |
+
chaos_anomalies: int
|
| 10 |
+
|
| 11 |
+
engine = create_engine("sqlite:///crowd_data.db")
|
| 12 |
+
|
| 13 |
+
def init_db():
|
| 14 |
+
SQLModel.metadata.create_all(engine)
|
download_weights.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
download_weights.py
|
| 3 |
+
Auto-downloads the P2PNet model weights from HuggingFace Hub if they are
|
| 4 |
+
not present locally. Called at FastAPI startup so the container always has
|
| 5 |
+
the weights without committing the 82 MB .pth file to Git.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
HF_WEIGHTS_REPO = os.environ.get(
|
| 12 |
+
"HF_WEIGHTS_REPO",
|
| 13 |
+
"YOUR_HF_USERNAME/crowd-counting-weights",
|
| 14 |
+
)
|
| 15 |
+
WEIGHTS_FILENAME = "SHTechA.pth"
|
| 16 |
+
WEIGHTS_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "weights")
|
| 17 |
+
WEIGHTS_PATH = os.path.join(WEIGHTS_DIR, WEIGHTS_FILENAME)
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def ensure_weights() -> None:
|
| 21 |
+
"""Download model weights from HuggingFace Hub if not present locally."""
|
| 22 |
+
if os.path.exists(WEIGHTS_PATH):
|
| 23 |
+
print(f"[Weights] Found at {WEIGHTS_PATH} - skipping download.")
|
| 24 |
+
return
|
| 25 |
+
|
| 26 |
+
repo_is_placeholder = (
|
| 27 |
+
not HF_WEIGHTS_REPO
|
| 28 |
+
or "YOUR_HF_USERNAME" in HF_WEIGHTS_REPO
|
| 29 |
+
or "your-username" in HF_WEIGHTS_REPO.lower()
|
| 30 |
+
)
|
| 31 |
+
if repo_is_placeholder:
|
| 32 |
+
print("[Weights] No valid HuggingFace weights repo configured - skipping download.")
|
| 33 |
+
print("[Weights] The model will run without pretrained weights.")
|
| 34 |
+
return
|
| 35 |
+
|
| 36 |
+
print(
|
| 37 |
+
f"[Weights] Not found locally. Downloading '{WEIGHTS_FILENAME}' "
|
| 38 |
+
f"from HuggingFace Hub repo '{HF_WEIGHTS_REPO}' ..."
|
| 39 |
+
)
|
| 40 |
+
os.makedirs(WEIGHTS_DIR, exist_ok=True)
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
from huggingface_hub import hf_hub_download
|
| 44 |
+
|
| 45 |
+
downloaded = hf_hub_download(
|
| 46 |
+
repo_id=HF_WEIGHTS_REPO,
|
| 47 |
+
filename=WEIGHTS_FILENAME,
|
| 48 |
+
local_dir=WEIGHTS_DIR,
|
| 49 |
+
)
|
| 50 |
+
print(f"[Weights] Downloaded successfully -> {downloaded}")
|
| 51 |
+
except Exception as exc:
|
| 52 |
+
print(f"[Weights] WARNING: Could not download weights - {exc}")
|
| 53 |
+
print("[Weights] The model will run without pretrained weights.")
|
engine.py
ADDED
|
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Train and eval functions used in main.py
|
| 4 |
+
Mostly copy-paste from DETR (https://github.com/facebookresearch/detr).
|
| 5 |
+
"""
|
| 6 |
+
import math
|
| 7 |
+
import os
|
| 8 |
+
import sys
|
| 9 |
+
from typing import Iterable
|
| 10 |
+
|
| 11 |
+
import torch
|
| 12 |
+
|
| 13 |
+
import util.misc as utils
|
| 14 |
+
from util.misc import NestedTensor
|
| 15 |
+
import numpy as np
|
| 16 |
+
import time
|
| 17 |
+
import torchvision.transforms as standard_transforms
|
| 18 |
+
import cv2
|
| 19 |
+
|
| 20 |
+
class DeNormalize(object):
|
| 21 |
+
def __init__(self, mean, std):
|
| 22 |
+
self.mean = mean
|
| 23 |
+
self.std = std
|
| 24 |
+
|
| 25 |
+
def __call__(self, tensor):
|
| 26 |
+
for t, m, s in zip(tensor, self.mean, self.std):
|
| 27 |
+
t.mul_(s).add_(m)
|
| 28 |
+
return tensor
|
| 29 |
+
|
| 30 |
+
def vis(samples, targets, pred, vis_dir, des=None):
|
| 31 |
+
'''
|
| 32 |
+
samples -> tensor: [batch, 3, H, W]
|
| 33 |
+
targets -> list of dict: [{'points':[], 'image_id': str}]
|
| 34 |
+
pred -> list: [num_preds, 2]
|
| 35 |
+
'''
|
| 36 |
+
gts = [t['point'].tolist() for t in targets]
|
| 37 |
+
|
| 38 |
+
pil_to_tensor = standard_transforms.ToTensor()
|
| 39 |
+
|
| 40 |
+
restore_transform = standard_transforms.Compose([
|
| 41 |
+
DeNormalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 42 |
+
standard_transforms.ToPILImage()
|
| 43 |
+
])
|
| 44 |
+
# draw one by one
|
| 45 |
+
for idx in range(samples.shape[0]):
|
| 46 |
+
sample = restore_transform(samples[idx])
|
| 47 |
+
sample = pil_to_tensor(sample.convert('RGB')).numpy() * 255
|
| 48 |
+
sample_gt = sample.transpose([1, 2, 0])[:, :, ::-1].astype(np.uint8).copy()
|
| 49 |
+
sample_pred = sample.transpose([1, 2, 0])[:, :, ::-1].astype(np.uint8).copy()
|
| 50 |
+
|
| 51 |
+
max_len = np.max(sample_gt.shape)
|
| 52 |
+
|
| 53 |
+
size = 2
|
| 54 |
+
# draw gt
|
| 55 |
+
for t in gts[idx]:
|
| 56 |
+
sample_gt = cv2.circle(sample_gt, (int(t[0]), int(t[1])), size, (0, 255, 0), -1)
|
| 57 |
+
# draw predictions
|
| 58 |
+
for p in pred[idx]:
|
| 59 |
+
sample_pred = cv2.circle(sample_pred, (int(p[0]), int(p[1])), size, (0, 0, 255), -1)
|
| 60 |
+
|
| 61 |
+
name = targets[idx]['image_id']
|
| 62 |
+
# save the visualized images
|
| 63 |
+
if des is not None:
|
| 64 |
+
cv2.imwrite(os.path.join(vis_dir, '{}_{}_gt_{}_pred_{}_gt.jpg'.format(int(name),
|
| 65 |
+
des, len(gts[idx]), len(pred[idx]))), sample_gt)
|
| 66 |
+
cv2.imwrite(os.path.join(vis_dir, '{}_{}_gt_{}_pred_{}_pred.jpg'.format(int(name),
|
| 67 |
+
des, len(gts[idx]), len(pred[idx]))), sample_pred)
|
| 68 |
+
else:
|
| 69 |
+
cv2.imwrite(
|
| 70 |
+
os.path.join(vis_dir, '{}_gt_{}_pred_{}_gt.jpg'.format(int(name), len(gts[idx]), len(pred[idx]))),
|
| 71 |
+
sample_gt)
|
| 72 |
+
cv2.imwrite(
|
| 73 |
+
os.path.join(vis_dir, '{}_gt_{}_pred_{}_pred.jpg'.format(int(name), len(gts[idx]), len(pred[idx]))),
|
| 74 |
+
sample_pred)
|
| 75 |
+
|
| 76 |
+
# the training routine
|
| 77 |
+
def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module,
|
| 78 |
+
data_loader: Iterable, optimizer: torch.optim.Optimizer,
|
| 79 |
+
device: torch.device, epoch: int, max_norm: float = 0):
|
| 80 |
+
model.train()
|
| 81 |
+
criterion.train()
|
| 82 |
+
metric_logger = utils.MetricLogger(delimiter=" ")
|
| 83 |
+
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
|
| 84 |
+
# iterate all training samples
|
| 85 |
+
for samples, targets in data_loader:
|
| 86 |
+
samples = samples.to(device)
|
| 87 |
+
targets = [{k: v.to(device) for k, v in t.items()} for t in targets]
|
| 88 |
+
# forward
|
| 89 |
+
outputs = model(samples)
|
| 90 |
+
# calc the losses
|
| 91 |
+
loss_dict = criterion(outputs, targets)
|
| 92 |
+
weight_dict = criterion.weight_dict
|
| 93 |
+
losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict)
|
| 94 |
+
|
| 95 |
+
# reduce all losses
|
| 96 |
+
loss_dict_reduced = utils.reduce_dict(loss_dict)
|
| 97 |
+
loss_dict_reduced_unscaled = {f'{k}_unscaled': v
|
| 98 |
+
for k, v in loss_dict_reduced.items()}
|
| 99 |
+
loss_dict_reduced_scaled = {k: v * weight_dict[k]
|
| 100 |
+
for k, v in loss_dict_reduced.items() if k in weight_dict}
|
| 101 |
+
losses_reduced_scaled = sum(loss_dict_reduced_scaled.values())
|
| 102 |
+
|
| 103 |
+
loss_value = losses_reduced_scaled.item()
|
| 104 |
+
|
| 105 |
+
if not math.isfinite(loss_value):
|
| 106 |
+
print("Loss is {}, stopping training".format(loss_value))
|
| 107 |
+
print(loss_dict_reduced)
|
| 108 |
+
sys.exit(1)
|
| 109 |
+
# backward
|
| 110 |
+
optimizer.zero_grad()
|
| 111 |
+
losses.backward()
|
| 112 |
+
if max_norm > 0:
|
| 113 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
|
| 114 |
+
optimizer.step()
|
| 115 |
+
# update logger
|
| 116 |
+
metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled)
|
| 117 |
+
metric_logger.update(lr=optimizer.param_groups[0]["lr"])
|
| 118 |
+
# gather the stats from all processes
|
| 119 |
+
metric_logger.synchronize_between_processes()
|
| 120 |
+
print("Averaged stats:", metric_logger)
|
| 121 |
+
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
|
| 122 |
+
|
| 123 |
+
# the inference routine
|
| 124 |
+
@torch.no_grad()
|
| 125 |
+
def evaluate_crowd_no_overlap(model, data_loader, device, vis_dir=None):
|
| 126 |
+
model.eval()
|
| 127 |
+
|
| 128 |
+
metric_logger = utils.MetricLogger(delimiter=" ")
|
| 129 |
+
metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}'))
|
| 130 |
+
# run inference on all images to calc MAE
|
| 131 |
+
maes = []
|
| 132 |
+
mses = []
|
| 133 |
+
for samples, targets in data_loader:
|
| 134 |
+
samples = samples.to(device)
|
| 135 |
+
|
| 136 |
+
outputs = model(samples)
|
| 137 |
+
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0]
|
| 138 |
+
|
| 139 |
+
outputs_points = outputs['pred_points'][0]
|
| 140 |
+
|
| 141 |
+
gt_cnt = targets[0]['point'].shape[0]
|
| 142 |
+
# 0.5 is used by default
|
| 143 |
+
threshold = 0.5
|
| 144 |
+
|
| 145 |
+
points = outputs_points[outputs_scores > threshold].detach().cpu().numpy().tolist()
|
| 146 |
+
predict_cnt = int((outputs_scores > threshold).sum())
|
| 147 |
+
# if specified, save the visualized images
|
| 148 |
+
if vis_dir is not None:
|
| 149 |
+
vis(samples, targets, [points], vis_dir)
|
| 150 |
+
# accumulate MAE, MSE
|
| 151 |
+
mae = abs(predict_cnt - gt_cnt)
|
| 152 |
+
mse = (predict_cnt - gt_cnt) * (predict_cnt - gt_cnt)
|
| 153 |
+
maes.append(float(mae))
|
| 154 |
+
mses.append(float(mse))
|
| 155 |
+
# calc MAE, MSE
|
| 156 |
+
mae = np.mean(maes)
|
| 157 |
+
mse = np.sqrt(np.mean(mses))
|
| 158 |
+
|
| 159 |
+
return mae, mse
|
evaluate.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import csv
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
import cv2
|
| 7 |
+
import numpy as np
|
| 8 |
+
import torch
|
| 9 |
+
import torchvision.transforms as standard_transforms
|
| 10 |
+
from PIL import Image
|
| 11 |
+
from scipy.optimize import linear_sum_assignment
|
| 12 |
+
from scipy.spatial import cKDTree
|
| 13 |
+
|
| 14 |
+
from models import build_model
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class Args:
|
| 18 |
+
backbone = "vgg16_bn"
|
| 19 |
+
row = 2
|
| 20 |
+
line = 2
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def load_model(weight_path):
|
| 24 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 25 |
+
model = build_model(Args()).to(device).eval()
|
| 26 |
+
if os.path.exists(weight_path):
|
| 27 |
+
checkpoint = torch.load(weight_path, map_location=device)
|
| 28 |
+
model.load_state_dict(checkpoint["model"])
|
| 29 |
+
transform = standard_transforms.Compose([
|
| 30 |
+
standard_transforms.ToTensor(),
|
| 31 |
+
standard_transforms.Normalize(mean=[0.485, 0.456, 0.406],
|
| 32 |
+
std=[0.229, 0.224, 0.225]),
|
| 33 |
+
])
|
| 34 |
+
return model, device, transform
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def infer_points(image, model, device, transform, confidence=0.5, magnification=1.5, batch_size=8):
|
| 38 |
+
orig_w, orig_h = image.size
|
| 39 |
+
patch_size = 512
|
| 40 |
+
pad = 256
|
| 41 |
+
work_w, work_h = int(orig_w * magnification), int(orig_h * magnification)
|
| 42 |
+
scale = min(1.0, 3840 / float(max(work_w, work_h)))
|
| 43 |
+
work_w, work_h = int(work_w * scale), int(work_h * scale)
|
| 44 |
+
magnification = work_w / float(orig_w)
|
| 45 |
+
resample_filter = getattr(Image, "Resampling", Image).LANCZOS if hasattr(Image, "Resampling") else getattr(Image, "ANTIALIAS", 1)
|
| 46 |
+
image = image.resize((work_w, work_h), resample_filter)
|
| 47 |
+
padded_w = ((work_w + pad * 2 + patch_size - 1) // patch_size) * patch_size
|
| 48 |
+
padded_h = ((work_h + pad * 2 + patch_size - 1) // patch_size) * patch_size
|
| 49 |
+
padded = Image.new("RGB", (padded_w, padded_h), (0, 0, 0))
|
| 50 |
+
padded.paste(image, (pad, pad))
|
| 51 |
+
stride = patch_size // 2
|
| 52 |
+
jobs = []
|
| 53 |
+
for y in range(0, padded_h - stride + 1, stride):
|
| 54 |
+
for x in range(0, padded_w - stride + 1, stride):
|
| 55 |
+
if x + patch_size <= padded_w and y + patch_size <= padded_h:
|
| 56 |
+
jobs.append((x, y, padded.crop((x, y, x + patch_size, y + patch_size))))
|
| 57 |
+
all_points = []
|
| 58 |
+
for start in range(0, len(jobs), batch_size):
|
| 59 |
+
batch = jobs[start:start + batch_size]
|
| 60 |
+
samples = torch.stack([transform(patch) for _, _, patch in batch]).to(device)
|
| 61 |
+
with torch.inference_mode():
|
| 62 |
+
if device.type == "cuda":
|
| 63 |
+
with torch.cuda.amp.autocast():
|
| 64 |
+
out = model(samples)
|
| 65 |
+
else:
|
| 66 |
+
out = model(samples)
|
| 67 |
+
scores = torch.nn.functional.softmax(out["pred_logits"].float(), -1)[:, :, 1]
|
| 68 |
+
pred = out["pred_points"].float()
|
| 69 |
+
for idx, (x, y, _) in enumerate(batch):
|
| 70 |
+
pts = pred[idx][scores[idx] > confidence].detach().cpu().numpy()
|
| 71 |
+
if len(pts):
|
| 72 |
+
pts[:, 0] += x - pad
|
| 73 |
+
pts[:, 1] += y - pad
|
| 74 |
+
pts /= float(magnification)
|
| 75 |
+
all_points.extend([p.tolist() for p in pts if 0 <= p[0] < orig_w and 0 <= p[1] < orig_h])
|
| 76 |
+
if not all_points:
|
| 77 |
+
return []
|
| 78 |
+
pts = np.array(all_points, dtype=np.float32)
|
| 79 |
+
tree = cKDTree(pts)
|
| 80 |
+
suppressed = set()
|
| 81 |
+
for i, j in tree.query_pairs(r=8.0):
|
| 82 |
+
if i not in suppressed and j not in suppressed:
|
| 83 |
+
suppressed.add(j)
|
| 84 |
+
return [pts[i].tolist() for i in range(len(pts)) if i not in suppressed]
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def load_gt(path):
|
| 88 |
+
with open(path, "r", encoding="utf-8") as f:
|
| 89 |
+
data = json.load(f)
|
| 90 |
+
if isinstance(data, dict) and "annotations" in data:
|
| 91 |
+
data = data["annotations"]
|
| 92 |
+
if isinstance(data, dict):
|
| 93 |
+
return [{"image": image, "points": points} for image, points in data.items()]
|
| 94 |
+
return data
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def precision_recall(pred_points, gt_points, radius):
|
| 98 |
+
pred = np.array(pred_points, dtype=np.float32)
|
| 99 |
+
gt = np.array(gt_points, dtype=np.float32)
|
| 100 |
+
if len(pred) == 0 and len(gt) == 0:
|
| 101 |
+
return 1.0, 1.0, 0, 0, 0
|
| 102 |
+
if len(pred) == 0:
|
| 103 |
+
return 0.0, 0.0, 0, 0, len(gt)
|
| 104 |
+
if len(gt) == 0:
|
| 105 |
+
return 0.0, 0.0, 0, len(pred), 0
|
| 106 |
+
dist = np.linalg.norm(pred[:, None, :] - gt[None, :, :], axis=2)
|
| 107 |
+
rows, cols = linear_sum_assignment(dist)
|
| 108 |
+
matches = sum(1 for r, c in zip(rows, cols) if dist[r, c] <= radius)
|
| 109 |
+
fp = len(pred) - matches
|
| 110 |
+
fn = len(gt) - matches
|
| 111 |
+
precision = matches / (matches + fp) if matches + fp else 0.0
|
| 112 |
+
recall = matches / (matches + fn) if matches + fn else 0.0
|
| 113 |
+
return precision, recall, matches, fp, fn
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def draw_visual(image_path, gt_points, pred_points, output_path):
|
| 117 |
+
img = cv2.imread(image_path)
|
| 118 |
+
for x, y in gt_points:
|
| 119 |
+
cv2.circle(img, (int(x), int(y)), 4, (0, 255, 0), -1)
|
| 120 |
+
for x, y in pred_points:
|
| 121 |
+
cv2.circle(img, (int(x), int(y)), 3, (0, 0, 255), 1)
|
| 122 |
+
cv2.imwrite(output_path, img)
|
| 123 |
+
|
| 124 |
+
|
| 125 |
+
def main():
|
| 126 |
+
parser = argparse.ArgumentParser()
|
| 127 |
+
parser.add_argument("--images_dir", required=True)
|
| 128 |
+
parser.add_argument("--gt_json", required=True)
|
| 129 |
+
parser.add_argument("--weights", default=os.path.join("weights", "SHTechA.pth"))
|
| 130 |
+
parser.add_argument("--output_dir", default="eval_results")
|
| 131 |
+
parser.add_argument("--confidence", type=float, default=0.5)
|
| 132 |
+
args = parser.parse_args()
|
| 133 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 134 |
+
vis_dir = os.path.join(args.output_dir, "visualizations")
|
| 135 |
+
os.makedirs(vis_dir, exist_ok=True)
|
| 136 |
+
model, device, transform = load_model(args.weights)
|
| 137 |
+
rows = []
|
| 138 |
+
errors = []
|
| 139 |
+
squared_errors = []
|
| 140 |
+
for item in load_gt(args.gt_json):
|
| 141 |
+
image_name = item["image"]
|
| 142 |
+
gt_points = item.get("points", [])
|
| 143 |
+
image_path = image_name if os.path.isabs(image_name) else os.path.join(args.images_dir, image_name)
|
| 144 |
+
pred_points = infer_points(Image.open(image_path).convert("RGB"), model, device, transform, args.confidence)
|
| 145 |
+
err = abs(len(pred_points) - len(gt_points))
|
| 146 |
+
errors.append(err)
|
| 147 |
+
squared_errors.append(err ** 2)
|
| 148 |
+
row = {"image": os.path.basename(image_path), "gt_count": len(gt_points), "pred_count": len(pred_points), "abs_error": err, "sq_error": err ** 2}
|
| 149 |
+
for radius in [5, 10, 15, 20]:
|
| 150 |
+
p, r, m, fp, fn = precision_recall(pred_points, gt_points, radius)
|
| 151 |
+
row[f"precision_{radius}px"] = round(p, 4)
|
| 152 |
+
row[f"recall_{radius}px"] = round(r, 4)
|
| 153 |
+
row[f"matches_{radius}px"] = m
|
| 154 |
+
row[f"fp_{radius}px"] = fp
|
| 155 |
+
row[f"fn_{radius}px"] = fn
|
| 156 |
+
rows.append(row)
|
| 157 |
+
draw_visual(image_path, gt_points, pred_points, os.path.join(vis_dir, os.path.splitext(os.path.basename(image_path))[0] + "_eval.png"))
|
| 158 |
+
summary = {"mae": round(float(np.mean(errors)), 4) if errors else 0, "mse": round(float(np.mean(squared_errors)), 4) if squared_errors else 0, "images": len(rows)}
|
| 159 |
+
csv_path = os.path.join(args.output_dir, "evaluation.csv")
|
| 160 |
+
json_path = os.path.join(args.output_dir, "evaluation_summary.json")
|
| 161 |
+
with open(csv_path, "w", newline="", encoding="utf-8") as f:
|
| 162 |
+
writer = csv.DictWriter(f, fieldnames=list(rows[0].keys()) if rows else ["image"])
|
| 163 |
+
writer.writeheader()
|
| 164 |
+
writer.writerows(rows)
|
| 165 |
+
with open(json_path, "w", encoding="utf-8") as f:
|
| 166 |
+
json.dump({"summary": summary, "rows": rows}, f, indent=2)
|
| 167 |
+
print(json.dumps({"csv": csv_path, "json": json_path, "visualizations": vis_dir, "summary": summary}, indent=2))
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
if __name__ == "__main__":
|
| 171 |
+
main()
|
frontend/.env.example
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copy this file to .env.local for local development, or to .env.production for prod builds.
|
| 2 |
+
# The app defaults to http://localhost:8000 if this is not set.
|
| 3 |
+
|
| 4 |
+
# Backend API URL (your HuggingFace Spaces URL or local server)
|
| 5 |
+
VITE_API_URL=http://localhost:8000
|
frontend/.env.production
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Production API URL — points to your HuggingFace Spaces backend
|
| 2 |
+
# Replace with your actual HF Spaces URL after deployment
|
| 3 |
+
VITE_API_URL=
|
frontend/.gitignore
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Logs
|
| 2 |
+
logs
|
| 3 |
+
*.log
|
| 4 |
+
npm-debug.log*
|
| 5 |
+
yarn-debug.log*
|
| 6 |
+
yarn-error.log*
|
| 7 |
+
pnpm-debug.log*
|
| 8 |
+
lerna-debug.log*
|
| 9 |
+
|
| 10 |
+
node_modules
|
| 11 |
+
dist
|
| 12 |
+
dist-ssr
|
| 13 |
+
*.local
|
| 14 |
+
|
| 15 |
+
# Editor directories and files
|
| 16 |
+
.vscode/*
|
| 17 |
+
!.vscode/extensions.json
|
| 18 |
+
.idea
|
| 19 |
+
.DS_Store
|
| 20 |
+
*.suo
|
| 21 |
+
*.ntvs*
|
| 22 |
+
*.njsproj
|
| 23 |
+
*.sln
|
| 24 |
+
*.sw?
|
frontend/README.md
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# React + Vite
|
| 2 |
+
|
| 3 |
+
This template provides a minimal setup to get React working in Vite with HMR and some ESLint rules.
|
| 4 |
+
|
| 5 |
+
Currently, two official plugins are available:
|
| 6 |
+
|
| 7 |
+
- [@vitejs/plugin-react](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react) uses [Oxc](https://oxc.rs)
|
| 8 |
+
- [@vitejs/plugin-react-swc](https://github.com/vitejs/vite-plugin-react/blob/main/packages/plugin-react-swc) uses [SWC](https://swc.rs/)
|
| 9 |
+
|
| 10 |
+
## React Compiler
|
| 11 |
+
|
| 12 |
+
The React Compiler is not enabled on this template because of its impact on dev & build performances. To add it, see [this documentation](https://react.dev/learn/react-compiler/installation).
|
| 13 |
+
|
| 14 |
+
## Expanding the ESLint configuration
|
| 15 |
+
|
| 16 |
+
If you are developing a production application, we recommend using TypeScript with type-aware lint rules enabled. Check out the [TS template](https://github.com/vitejs/vite/tree/main/packages/create-vite/template-react-ts) for information on how to integrate TypeScript and [`typescript-eslint`](https://typescript-eslint.io) in your project.
|
frontend/eslint.config.js
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import js from '@eslint/js'
|
| 2 |
+
import globals from 'globals'
|
| 3 |
+
import reactHooks from 'eslint-plugin-react-hooks'
|
| 4 |
+
import reactRefresh from 'eslint-plugin-react-refresh'
|
| 5 |
+
import { defineConfig, globalIgnores } from 'eslint/config'
|
| 6 |
+
|
| 7 |
+
export default defineConfig([
|
| 8 |
+
globalIgnores(['dist']),
|
| 9 |
+
{
|
| 10 |
+
files: ['**/*.{js,jsx}'],
|
| 11 |
+
extends: [
|
| 12 |
+
js.configs.recommended,
|
| 13 |
+
reactHooks.configs.flat.recommended,
|
| 14 |
+
reactRefresh.configs.vite,
|
| 15 |
+
],
|
| 16 |
+
languageOptions: {
|
| 17 |
+
ecmaVersion: 2020,
|
| 18 |
+
globals: globals.browser,
|
| 19 |
+
parserOptions: {
|
| 20 |
+
ecmaVersion: 'latest',
|
| 21 |
+
ecmaFeatures: { jsx: true },
|
| 22 |
+
sourceType: 'module',
|
| 23 |
+
},
|
| 24 |
+
},
|
| 25 |
+
rules: {
|
| 26 |
+
'no-unused-vars': ['error', { varsIgnorePattern: '^[A-Z_]' }],
|
| 27 |
+
},
|
| 28 |
+
},
|
| 29 |
+
])
|
frontend/index.html
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<!doctype html>
|
| 2 |
+
<html lang="en">
|
| 3 |
+
<head>
|
| 4 |
+
<meta charset="UTF-8" />
|
| 5 |
+
<link rel="icon" type="image/svg+xml" href="/favicon.svg" />
|
| 6 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
|
| 7 |
+
<title>frontend</title>
|
| 8 |
+
</head>
|
| 9 |
+
<body>
|
| 10 |
+
<div id="root"></div>
|
| 11 |
+
<script type="module" src="/src/main.jsx"></script>
|
| 12 |
+
</body>
|
| 13 |
+
</html>
|
frontend/package-lock.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
frontend/package.json
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"name": "frontend",
|
| 3 |
+
"private": true,
|
| 4 |
+
"version": "0.0.0",
|
| 5 |
+
"type": "module",
|
| 6 |
+
"scripts": {
|
| 7 |
+
"dev": "vite",
|
| 8 |
+
"build": "vite build",
|
| 9 |
+
"lint": "eslint .",
|
| 10 |
+
"preview": "vite preview"
|
| 11 |
+
},
|
| 12 |
+
"dependencies": {
|
| 13 |
+
"lucide-react": "^1.8.0",
|
| 14 |
+
"react": "^19.2.4",
|
| 15 |
+
"react-dom": "^19.2.4",
|
| 16 |
+
"recharts": "^3.8.1"
|
| 17 |
+
},
|
| 18 |
+
"devDependencies": {
|
| 19 |
+
"@eslint/js": "^9.39.4",
|
| 20 |
+
"@types/react": "^19.2.14",
|
| 21 |
+
"@types/react-dom": "^19.2.3",
|
| 22 |
+
"@vitejs/plugin-react": "^6.0.1",
|
| 23 |
+
"eslint": "^9.39.4",
|
| 24 |
+
"eslint-plugin-react-hooks": "^7.0.1",
|
| 25 |
+
"eslint-plugin-react-refresh": "^0.5.2",
|
| 26 |
+
"globals": "^17.4.0",
|
| 27 |
+
"vite": "^8.0.4"
|
| 28 |
+
}
|
| 29 |
+
}
|
frontend/public/favicon.svg
ADDED
|
|
frontend/public/icons.svg
ADDED
|
|
frontend/src/App.css
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.counter {
|
| 2 |
+
font-size: 16px;
|
| 3 |
+
padding: 5px 10px;
|
| 4 |
+
border-radius: 5px;
|
| 5 |
+
color: var(--accent);
|
| 6 |
+
background: var(--accent-bg);
|
| 7 |
+
border: 2px solid transparent;
|
| 8 |
+
transition: border-color 0.3s;
|
| 9 |
+
margin-bottom: 24px;
|
| 10 |
+
|
| 11 |
+
&:hover {
|
| 12 |
+
border-color: var(--accent-border);
|
| 13 |
+
}
|
| 14 |
+
&:focus-visible {
|
| 15 |
+
outline: 2px solid var(--accent);
|
| 16 |
+
outline-offset: 2px;
|
| 17 |
+
}
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
.hero {
|
| 21 |
+
position: relative;
|
| 22 |
+
|
| 23 |
+
.base,
|
| 24 |
+
.framework,
|
| 25 |
+
.vite {
|
| 26 |
+
inset-inline: 0;
|
| 27 |
+
margin: 0 auto;
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
.base {
|
| 31 |
+
width: 170px;
|
| 32 |
+
position: relative;
|
| 33 |
+
z-index: 0;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
.framework,
|
| 37 |
+
.vite {
|
| 38 |
+
position: absolute;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
.framework {
|
| 42 |
+
z-index: 1;
|
| 43 |
+
top: 34px;
|
| 44 |
+
height: 28px;
|
| 45 |
+
transform: perspective(2000px) rotateZ(300deg) rotateX(44deg) rotateY(39deg)
|
| 46 |
+
scale(1.4);
|
| 47 |
+
}
|
| 48 |
+
|
| 49 |
+
.vite {
|
| 50 |
+
z-index: 0;
|
| 51 |
+
top: 107px;
|
| 52 |
+
height: 26px;
|
| 53 |
+
width: auto;
|
| 54 |
+
transform: perspective(2000px) rotateZ(300deg) rotateX(40deg) rotateY(39deg)
|
| 55 |
+
scale(0.8);
|
| 56 |
+
}
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
#center {
|
| 60 |
+
display: flex;
|
| 61 |
+
flex-direction: column;
|
| 62 |
+
gap: 25px;
|
| 63 |
+
place-content: center;
|
| 64 |
+
place-items: center;
|
| 65 |
+
flex-grow: 1;
|
| 66 |
+
|
| 67 |
+
@media (max-width: 1024px) {
|
| 68 |
+
padding: 32px 20px 24px;
|
| 69 |
+
gap: 18px;
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
#next-steps {
|
| 74 |
+
display: flex;
|
| 75 |
+
border-top: 1px solid var(--border);
|
| 76 |
+
text-align: left;
|
| 77 |
+
|
| 78 |
+
& > div {
|
| 79 |
+
flex: 1 1 0;
|
| 80 |
+
padding: 32px;
|
| 81 |
+
@media (max-width: 1024px) {
|
| 82 |
+
padding: 24px 20px;
|
| 83 |
+
}
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
.icon {
|
| 87 |
+
margin-bottom: 16px;
|
| 88 |
+
width: 22px;
|
| 89 |
+
height: 22px;
|
| 90 |
+
}
|
| 91 |
+
|
| 92 |
+
@media (max-width: 1024px) {
|
| 93 |
+
flex-direction: column;
|
| 94 |
+
text-align: center;
|
| 95 |
+
}
|
| 96 |
+
}
|
| 97 |
+
|
| 98 |
+
#docs {
|
| 99 |
+
border-right: 1px solid var(--border);
|
| 100 |
+
|
| 101 |
+
@media (max-width: 1024px) {
|
| 102 |
+
border-right: none;
|
| 103 |
+
border-bottom: 1px solid var(--border);
|
| 104 |
+
}
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
#next-steps ul {
|
| 108 |
+
list-style: none;
|
| 109 |
+
padding: 0;
|
| 110 |
+
display: flex;
|
| 111 |
+
gap: 8px;
|
| 112 |
+
margin: 32px 0 0;
|
| 113 |
+
|
| 114 |
+
.logo {
|
| 115 |
+
height: 18px;
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
a {
|
| 119 |
+
color: var(--text-h);
|
| 120 |
+
font-size: 16px;
|
| 121 |
+
border-radius: 6px;
|
| 122 |
+
background: var(--social-bg);
|
| 123 |
+
display: flex;
|
| 124 |
+
padding: 6px 12px;
|
| 125 |
+
align-items: center;
|
| 126 |
+
gap: 8px;
|
| 127 |
+
text-decoration: none;
|
| 128 |
+
transition: box-shadow 0.3s;
|
| 129 |
+
|
| 130 |
+
&:hover {
|
| 131 |
+
box-shadow: var(--shadow);
|
| 132 |
+
}
|
| 133 |
+
.button-icon {
|
| 134 |
+
height: 18px;
|
| 135 |
+
width: 18px;
|
| 136 |
+
}
|
| 137 |
+
}
|
| 138 |
+
|
| 139 |
+
@media (max-width: 1024px) {
|
| 140 |
+
margin-top: 20px;
|
| 141 |
+
flex-wrap: wrap;
|
| 142 |
+
justify-content: center;
|
| 143 |
+
|
| 144 |
+
li {
|
| 145 |
+
flex: 1 1 calc(50% - 8px);
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
a {
|
| 149 |
+
width: 100%;
|
| 150 |
+
justify-content: center;
|
| 151 |
+
box-sizing: border-box;
|
| 152 |
+
}
|
| 153 |
+
}
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
#spacer {
|
| 157 |
+
height: 88px;
|
| 158 |
+
border-top: 1px solid var(--border);
|
| 159 |
+
@media (max-width: 1024px) {
|
| 160 |
+
height: 48px;
|
| 161 |
+
}
|
| 162 |
+
}
|
| 163 |
+
|
| 164 |
+
.ticks {
|
| 165 |
+
position: relative;
|
| 166 |
+
width: 100%;
|
| 167 |
+
|
| 168 |
+
&::before,
|
| 169 |
+
&::after {
|
| 170 |
+
content: '';
|
| 171 |
+
position: absolute;
|
| 172 |
+
top: -4.5px;
|
| 173 |
+
border: 5px solid transparent;
|
| 174 |
+
}
|
| 175 |
+
|
| 176 |
+
&::before {
|
| 177 |
+
left: 0;
|
| 178 |
+
border-left-color: var(--border);
|
| 179 |
+
}
|
| 180 |
+
&::after {
|
| 181 |
+
right: 0;
|
| 182 |
+
border-right-color: var(--border);
|
| 183 |
+
}
|
| 184 |
+
}
|
frontend/src/App.jsx
ADDED
|
@@ -0,0 +1,1067 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import React, { useState, useRef, useEffect, useCallback, useMemo } from 'react';
|
| 2 |
+
import {
|
| 3 |
+
LineChart, Line, XAxis, YAxis, Tooltip,
|
| 4 |
+
ResponsiveContainer, CartesianGrid, ReferenceLine, Area, AreaChart
|
| 5 |
+
} from 'recharts';
|
| 6 |
+
import {
|
| 7 |
+
Shield, Activity, AlertTriangle, Zap, Target,
|
| 8 |
+
Eye, Layers, Radio, Cpu, Users,
|
| 9 |
+
TrendingUp, Download, Crosshair, Map, BarChart2,
|
| 10 |
+
Wifi, WifiOff, Play, Square, RotateCcw, Database,
|
| 11 |
+
Maximize2, Upload, GitBranch, Thermometer, Move,
|
| 12 |
+
ZoomIn, ZoomOut, Wind, Brain, Gauge, Sun, Moon
|
| 13 |
+
} from 'lucide-react';
|
| 14 |
+
import './index.css';
|
| 15 |
+
|
| 16 |
+
// ─── helpers ──────────────────────────────────────────────────
|
| 17 |
+
const nowStr = () => new Date().toLocaleTimeString('en-US', { hour12: false });
|
| 18 |
+
const uid = () => Math.random().toString(36).slice(2, 8);
|
| 19 |
+
const secStr = (ms) => {
|
| 20 |
+
const s = Math.floor(ms / 1000);
|
| 21 |
+
return `${String(Math.floor(s / 60)).padStart(2,'0')}:${String(s % 60).padStart(2,'0')}`;
|
| 22 |
+
};
|
| 23 |
+
|
| 24 |
+
// ─── API Base URLs (reads from env var, falls back to localhost for dev) ──────
|
| 25 |
+
const rawApiBase = (import.meta.env.VITE_API_URL || '').trim();
|
| 26 |
+
const hasPlaceholderApiBase =
|
| 27 |
+
!rawApiBase || /YOUR_(HF_USERNAME|USERNAME)|your_hf_username|your_username/i.test(rawApiBase);
|
| 28 |
+
const isLocalHost =
|
| 29 |
+
typeof window !== 'undefined' &&
|
| 30 |
+
['localhost', '127.0.0.1'].includes(window.location.hostname);
|
| 31 |
+
const fallbackApiBase =
|
| 32 |
+
typeof window === 'undefined'
|
| 33 |
+
? 'http://127.0.0.1:8000'
|
| 34 |
+
: (isLocalHost ? 'http://127.0.0.1:8000' : '');
|
| 35 |
+
const API_BASE = (hasPlaceholderApiBase ? fallbackApiBase : rawApiBase).replace(/\/$/, '');
|
| 36 |
+
const WS_BASE = API_BASE.replace(/^https:\/\//, 'wss://').replace(/^http:\/\//, 'ws://');
|
| 37 |
+
const API_CONFIG_ERROR = 'Backend API is not configured. Set VITE_API_URL to your FastAPI server URL.';
|
| 38 |
+
|
| 39 |
+
const THREAT = {
|
| 40 |
+
SAFE: { label: 'SAFE', cls: 'safe' },
|
| 41 |
+
MODERATE: { label: 'MODERATE', cls: 'moderate' },
|
| 42 |
+
DANGER: { label: 'DANGER', cls: 'danger' },
|
| 43 |
+
};
|
| 44 |
+
function getThreat(count, limit) {
|
| 45 |
+
const r = limit > 0 ? count / limit : 0;
|
| 46 |
+
if (r >= 1) return THREAT.DANGER;
|
| 47 |
+
if (r >= 0.75) return THREAT.MODERATE;
|
| 48 |
+
return THREAT.SAFE;
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
// Density classification per 10k pixel reference area
|
| 52 |
+
function getDensityLabel(count, limit) {
|
| 53 |
+
const r = limit > 0 ? count / limit : 0;
|
| 54 |
+
if (r >= 1) return { label: 'CRITICAL', cls: 'danger' };
|
| 55 |
+
if (r >= 0.75) return { label: 'HIGH', cls: 'moderate' };
|
| 56 |
+
if (r >= 0.40) return { label: 'MEDIUM', cls: 'blue' };
|
| 57 |
+
return { label: 'LOW', cls: '' };
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
// Simple linear regression on last N data points for predictive alerts
|
| 61 |
+
function predictNextFrameCount(history, n = 12) {
|
| 62 |
+
const data = history.slice(-n);
|
| 63 |
+
if (data.length < 3) return null;
|
| 64 |
+
const xs = data.map((_, i) => i);
|
| 65 |
+
const ys = data.map(d => d.count);
|
| 66 |
+
const xMean = xs.reduce((a, b) => a + b, 0) / xs.length;
|
| 67 |
+
const yMean = ys.reduce((a, b) => a + b, 0) / ys.length;
|
| 68 |
+
const num = xs.reduce((s, x, i) => s + (x - xMean) * (ys[i] - yMean), 0);
|
| 69 |
+
const den = xs.reduce((s, x) => s + (x - xMean) ** 2, 0);
|
| 70 |
+
if (den === 0) return null;
|
| 71 |
+
const slope = num / den;
|
| 72 |
+
const intercept = yMean - slope * xMean;
|
| 73 |
+
// predict 10 frames ahead
|
| 74 |
+
return Math.max(0, Math.round(slope * (xs.length + 10) + intercept));
|
| 75 |
+
}
|
| 76 |
+
|
| 77 |
+
// ─── Custom Recharts Tooltip ──────────────────────────────────
|
| 78 |
+
function CTooltip({ active, payload, label }) {
|
| 79 |
+
if (!active || !payload?.length) return null;
|
| 80 |
+
return (
|
| 81 |
+
<div className="custom-tooltip">
|
| 82 |
+
<div className="custom-tooltip-label">{label}</div>
|
| 83 |
+
<div className="custom-tooltip-value">{payload[0].value}</div>
|
| 84 |
+
</div>
|
| 85 |
+
);
|
| 86 |
+
}
|
| 87 |
+
|
| 88 |
+
// ─── Alert Item ───────────────────────────────────────────────
|
| 89 |
+
function AlertItem({ type, title, msg, time }) {
|
| 90 |
+
const Icon = type === 'danger' ? AlertTriangle : type === 'warning' ? Zap : Radio;
|
| 91 |
+
return (
|
| 92 |
+
<div className={`alert-item ${type}`}>
|
| 93 |
+
<div className="alert-icon"><Icon /></div>
|
| 94 |
+
<div className="alert-body">
|
| 95 |
+
<div className="alert-title">{title}</div>
|
| 96 |
+
<div className="alert-msg">{msg}</div>
|
| 97 |
+
<div className="alert-time">{time}</div>
|
| 98 |
+
</div>
|
| 99 |
+
</div>
|
| 100 |
+
);
|
| 101 |
+
}
|
| 102 |
+
|
| 103 |
+
// ─── Main App ─────────────────────────────────────────────────
|
| 104 |
+
export default function App() {
|
| 105 |
+
|
| 106 |
+
// ── Theme ──
|
| 107 |
+
const [theme, setTheme] = useState(() => localStorage.getItem('cp_theme') || 'dark');
|
| 108 |
+
useEffect(() => {
|
| 109 |
+
document.documentElement.setAttribute('data-theme', theme);
|
| 110 |
+
localStorage.setItem('cp_theme', theme);
|
| 111 |
+
}, [theme]);
|
| 112 |
+
const toggleTheme = () => setTheme(t => t === 'dark' ? 'light' : 'dark');
|
| 113 |
+
|
| 114 |
+
// ── File / mode ──
|
| 115 |
+
const [file, setFile] = useState(null);
|
| 116 |
+
const [fileType, setFileType] = useState('image');
|
| 117 |
+
const [preview, setPreview] = useState(null); // image preview URL
|
| 118 |
+
const [videoPreview, setVideoPreview] = useState(null); // video preview URL
|
| 119 |
+
const [resultImg, setResultImg] = useState(null);
|
| 120 |
+
const [loading, setLoading] = useState(false);
|
| 121 |
+
const [dragActive, setDragActive] = useState(false);
|
| 122 |
+
const [uploadProgress, setUploadProgress] = useState(0); // 0-100
|
| 123 |
+
|
| 124 |
+
// ── Analytics ──
|
| 125 |
+
const [stats, setStats] = useState({ count: 0, unique: 0, latency: 0, frames: 0 });
|
| 126 |
+
const [history, setHistory] = useState([]);
|
| 127 |
+
const [peakCount, setPeak] = useState(0);
|
| 128 |
+
const [alerts, setAlerts] = useState([]);
|
| 129 |
+
const [sessions, setSessions] = useState(() => {
|
| 130 |
+
try { return JSON.parse(localStorage.getItem('cp_sessions') || '[]'); }
|
| 131 |
+
catch { return []; }
|
| 132 |
+
});
|
| 133 |
+
const sessionStartTs = useRef(null);
|
| 134 |
+
|
| 135 |
+
// ── WebSocket ──
|
| 136 |
+
const wsRef = useRef(null);
|
| 137 |
+
const [wsConnected, setWsConnected] = useState(false);
|
| 138 |
+
|
| 139 |
+
// ── Settings ──
|
| 140 |
+
const [settings, setSettings] = useState({
|
| 141 |
+
heatmap: false,
|
| 142 |
+
clustering: false,
|
| 143 |
+
showPoints: true,
|
| 144 |
+
motionVecs: false,
|
| 145 |
+
zoning: false,
|
| 146 |
+
mode: 'Balanced',
|
| 147 |
+
capacity: 150,
|
| 148 |
+
magnification: 1.5,
|
| 149 |
+
nmsRadius: 9.0,
|
| 150 |
+
frameSkip: 3,
|
| 151 |
+
overlayOpacity: 100, // GAP 2: opacity slider
|
| 152 |
+
});
|
| 153 |
+
|
| 154 |
+
// ── Zoom / Pan (GAP 4) ──
|
| 155 |
+
const [zoom, setZoom] = useState(1);
|
| 156 |
+
const [pan, setPan] = useState({ x: 0, y: 0 });
|
| 157 |
+
const isPanning = useRef(false);
|
| 158 |
+
const panStart = useRef({ x: 0, y: 0 });
|
| 159 |
+
const panOrigin = useRef({ x: 0, y: 0 });
|
| 160 |
+
|
| 161 |
+
// ── Zone fencing ──
|
| 162 |
+
const [fencePoints, setFencePoints] = useState([]);
|
| 163 |
+
const [drawingFence, setDrawingFence] = useState(false);
|
| 164 |
+
const viewerRef = useRef(null);
|
| 165 |
+
const fileInputRef = useRef(null);
|
| 166 |
+
|
| 167 |
+
// ── Derived ──
|
| 168 |
+
const threat = getThreat(stats.count, settings.capacity);
|
| 169 |
+
const density = getDensityLabel(stats.count, settings.capacity);
|
| 170 |
+
const anomalyActive = alerts.some(a => a.type === 'danger' && Date.now() - a._ts < 5000);
|
| 171 |
+
const predicted = useMemo(() => predictNextFrameCount(history), [history]);
|
| 172 |
+
const predictAlert = predicted !== null && predicted > settings.capacity;
|
| 173 |
+
|
| 174 |
+
// ── Clock ──
|
| 175 |
+
const [clock, setClock] = useState(nowStr());
|
| 176 |
+
useEffect(() => {
|
| 177 |
+
const t = setInterval(() => setClock(nowStr()), 1000);
|
| 178 |
+
return () => clearInterval(t);
|
| 179 |
+
}, []);
|
| 180 |
+
|
| 181 |
+
// ── FPS ──
|
| 182 |
+
const lastHistLen = useRef(0);
|
| 183 |
+
const [fps, setFps] = useState(0);
|
| 184 |
+
useEffect(() => {
|
| 185 |
+
const t = setInterval(() => {
|
| 186 |
+
const delta = history.length - lastHistLen.current;
|
| 187 |
+
setFps(Math.max(0, Math.round(delta / Math.max(1, settings.frameSkip) * settings.frameSkip)));
|
| 188 |
+
lastHistLen.current = history.length;
|
| 189 |
+
}, 1000);
|
| 190 |
+
return () => clearInterval(t);
|
| 191 |
+
}, [history, settings.frameSkip]);
|
| 192 |
+
|
| 193 |
+
// ── Average density from history ──
|
| 194 |
+
const avgCount = history.length > 0
|
| 195 |
+
? Math.round(history.reduce((s, h) => s + h.count, 0) / history.length)
|
| 196 |
+
: 0;
|
| 197 |
+
|
| 198 |
+
// ── Alert helper ──
|
| 199 |
+
const addAlert = useCallback((type, title, msg) => {
|
| 200 |
+
const entry = { id: uid(), type, title, msg, time: nowStr(), _ts: Date.now() };
|
| 201 |
+
setAlerts(prev => [entry, ...prev].slice(0, 60));
|
| 202 |
+
}, []);
|
| 203 |
+
|
| 204 |
+
useEffect(() => {
|
| 205 |
+
if (hasPlaceholderApiBase) {
|
| 206 |
+
addAlert(
|
| 207 |
+
'warning',
|
| 208 |
+
'API Fallback Active',
|
| 209 |
+
API_BASE
|
| 210 |
+
? `Using fallback backend: ${API_BASE}. Set VITE_API_URL for deployed builds.`
|
| 211 |
+
: API_CONFIG_ERROR
|
| 212 |
+
);
|
| 213 |
+
}
|
| 214 |
+
}, [addAlert]);
|
| 215 |
+
|
| 216 |
+
// ── Predictive alert (GAP optional-7) ──
|
| 217 |
+
useEffect(() => {
|
| 218 |
+
if (predictAlert && history.length % 15 === 0 && history.length > 0) {
|
| 219 |
+
addAlert('warning', '🔮 Predictive Alert', `Model predicts ~${predicted} subjects in ~10 frames — limit ${settings.capacity}`);
|
| 220 |
+
}
|
| 221 |
+
}, [predictAlert, predicted, history.length, settings.capacity, addAlert]);
|
| 222 |
+
|
| 223 |
+
// ── File handling ──
|
| 224 |
+
const handleFile = useCallback((f) => {
|
| 225 |
+
setFile(f);
|
| 226 |
+
setResultImg(null);
|
| 227 |
+
setHistory([]);
|
| 228 |
+
setPeak(0);
|
| 229 |
+
setFencePoints([]);
|
| 230 |
+
setZoom(1);
|
| 231 |
+
setPan({ x: 0, y: 0 });
|
| 232 |
+
setUploadProgress(0);
|
| 233 |
+
setStats({ count: 0, unique: 0, latency: 0, frames: 0 });
|
| 234 |
+
sessionStartTs.current = Date.now();
|
| 235 |
+
// Revoke any old preview URLs to avoid memory leaks
|
| 236 |
+
if (preview) URL.revokeObjectURL(preview);
|
| 237 |
+
if (videoPreview) URL.revokeObjectURL(videoPreview);
|
| 238 |
+
if (f.type.startsWith('video')) {
|
| 239 |
+
setFileType('video');
|
| 240 |
+
setPreview(null);
|
| 241 |
+
setVideoPreview(URL.createObjectURL(f));
|
| 242 |
+
} else {
|
| 243 |
+
setFileType('image');
|
| 244 |
+
setPreview(URL.createObjectURL(f));
|
| 245 |
+
setVideoPreview(null);
|
| 246 |
+
}
|
| 247 |
+
addAlert('info', 'Feed Loaded', `${f.name.slice(0, 28)} (${(f.size/1024/1024).toFixed(1)} MB)`);
|
| 248 |
+
// eslint-disable-next-line react-hooks/exhaustive-deps
|
| 249 |
+
}, [addAlert]);
|
| 250 |
+
|
| 251 |
+
const onDrop = (e) => {
|
| 252 |
+
e.preventDefault(); setDragActive(false);
|
| 253 |
+
if (e.dataTransfer.files[0]) handleFile(e.dataTransfer.files[0]);
|
| 254 |
+
};
|
| 255 |
+
const onDrag = (e) => {
|
| 256 |
+
e.preventDefault();
|
| 257 |
+
setDragActive(e.type === 'dragenter' || e.type === 'dragover');
|
| 258 |
+
};
|
| 259 |
+
|
| 260 |
+
// ── Zoom via scroll wheel (GAP 4) ──
|
| 261 |
+
const onWheel = (e) => {
|
| 262 |
+
e.preventDefault();
|
| 263 |
+
setZoom(z => Math.min(5, Math.max(1, z - e.deltaY * 0.002)));
|
| 264 |
+
};
|
| 265 |
+
|
| 266 |
+
// ── Pan via mouse drag (GAP 4) ──
|
| 267 |
+
const onMouseDown = (e) => {
|
| 268 |
+
if (zoom <= 1) return;
|
| 269 |
+
isPanning.current = true;
|
| 270 |
+
panStart.current = { x: e.clientX, y: e.clientY };
|
| 271 |
+
panOrigin.current = { ...pan };
|
| 272 |
+
e.currentTarget.style.cursor = 'grabbing';
|
| 273 |
+
};
|
| 274 |
+
const onMouseMove = (e) => {
|
| 275 |
+
if (!isPanning.current) return;
|
| 276 |
+
setPan({
|
| 277 |
+
x: panOrigin.current.x + (e.clientX - panStart.current.x),
|
| 278 |
+
y: panOrigin.current.y + (e.clientY - panStart.current.y),
|
| 279 |
+
});
|
| 280 |
+
};
|
| 281 |
+
const onMouseUp = (e) => {
|
| 282 |
+
isPanning.current = false;
|
| 283 |
+
if (e.currentTarget) e.currentTarget.style.cursor = zoom > 1 ? 'grab' : 'crosshair';
|
| 284 |
+
};
|
| 285 |
+
|
| 286 |
+
// ── Zone fencing click ──
|
| 287 |
+
const onViewerClick = (e) => {
|
| 288 |
+
if (!drawingFence || !viewerRef.current || isPanning.current) return;
|
| 289 |
+
const r = viewerRef.current.getBoundingClientRect();
|
| 290 |
+
const xr = (e.clientX - r.left) / r.width;
|
| 291 |
+
const yr = (e.clientY - r.top) / r.height;
|
| 292 |
+
setFencePoints(fp => [...fp, { x: xr, y: yr }]);
|
| 293 |
+
};
|
| 294 |
+
|
| 295 |
+
// ── Wheel listener (must be non-passive) ──
|
| 296 |
+
useEffect(() => {
|
| 297 |
+
const el = viewerRef.current;
|
| 298 |
+
if (!el) return;
|
| 299 |
+
el.addEventListener('wheel', onWheel, { passive: false });
|
| 300 |
+
return () => el.removeEventListener('wheel', onWheel);
|
| 301 |
+
});
|
| 302 |
+
|
| 303 |
+
// ── Image scan ──
|
| 304 |
+
const executeImageScan = async () => {
|
| 305 |
+
if (!file) return;
|
| 306 |
+
setLoading(true);
|
| 307 |
+
addAlert('info', 'Scan Initiated', 'Neural engine processing frame...');
|
| 308 |
+
|
| 309 |
+
const form = new FormData();
|
| 310 |
+
form.append('file', file);
|
| 311 |
+
form.append('confidence_threshold',
|
| 312 |
+
settings.mode === 'Performance' ? '0.45' : settings.mode === 'Accuracy' ? '0.25' : '0.35');
|
| 313 |
+
form.append('magnification', parseFloat(settings.magnification).toFixed(2));
|
| 314 |
+
form.append('nms_radius', parseFloat(settings.nmsRadius).toFixed(2));
|
| 315 |
+
form.append('use_heatmap', String(settings.heatmap));
|
| 316 |
+
form.append('use_clustering', String(settings.clustering));
|
| 317 |
+
form.append('use_motion_vectors', String(settings.motionVecs));
|
| 318 |
+
form.append('fencing_polygon', JSON.stringify(fencePoints));
|
| 319 |
+
form.append('inference_batch_size', '8');
|
| 320 |
+
form.append('patch_overlap',
|
| 321 |
+
settings.mode === 'Performance' ? '0.0' : settings.mode === 'Accuracy' ? '0.5' : '0.25');
|
| 322 |
+
form.append('inference_strategy', 'Auto');
|
| 323 |
+
form.append('max_resolution', '3840');
|
| 324 |
+
|
| 325 |
+
try {
|
| 326 |
+
if (!API_BASE) throw new Error(API_CONFIG_ERROR);
|
| 327 |
+
const res = await fetch(`${API_BASE}/api/process-image`, { method: 'POST', body: form });
|
| 328 |
+
const responseText = await res.text();
|
| 329 |
+
let data;
|
| 330 |
+
try {
|
| 331 |
+
data = responseText ? JSON.parse(responseText) : {};
|
| 332 |
+
} catch {
|
| 333 |
+
data = { detail: responseText || `HTTP ${res.status}` };
|
| 334 |
+
}
|
| 335 |
+
if (!res.ok) throw new Error(data.detail || `Image scan failed (HTTP ${res.status})`);
|
| 336 |
+
if (data.detail) throw new Error(data.detail);
|
| 337 |
+
|
| 338 |
+
setResultImg(`data:image/jpeg;base64,${data.imageB64}`);
|
| 339 |
+
const c = data.count;
|
| 340 |
+
const ts = nowStr();
|
| 341 |
+
setStats(s => ({ ...s, count: c, unique: c, latency: data.elapsed }));
|
| 342 |
+
setPeak(p => Math.max(p, c));
|
| 343 |
+
setHistory([{ label: ts, count: c }]);
|
| 344 |
+
|
| 345 |
+
const t = getThreat(c, settings.capacity);
|
| 346 |
+
if (t === THREAT.DANGER) addAlert('danger', '⚠ Capacity Breach', `${c} subjects — limit ${settings.capacity}`);
|
| 347 |
+
else if (t === THREAT.MODERATE) addAlert('warning', 'Elevated Density', `Zone at ${Math.round(c / settings.capacity * 100)}%`);
|
| 348 |
+
else addAlert('info', 'Scan Complete', `${c} subjects in ${data.elapsed.toFixed(2)}s`);
|
| 349 |
+
|
| 350 |
+
} catch (err) {
|
| 351 |
+
addAlert('danger', 'Scan Failed', err?.message || 'Unknown image scan error');
|
| 352 |
+
} finally {
|
| 353 |
+
setLoading(false);
|
| 354 |
+
}
|
| 355 |
+
};
|
| 356 |
+
|
| 357 |
+
// ── Video stream ──
|
| 358 |
+
const streamVideo = async () => {
|
| 359 |
+
if (!file) return;
|
| 360 |
+
setLoading(true);
|
| 361 |
+
setHistory([]);
|
| 362 |
+
setPeak(0);
|
| 363 |
+
setUploadProgress(0);
|
| 364 |
+
addAlert('info', 'Uploading Video', `${file.name.slice(0,28)} — ${(file.size/1024/1024).toFixed(1)} MB`);
|
| 365 |
+
|
| 366 |
+
try {
|
| 367 |
+
if (!API_BASE) throw new Error(API_CONFIG_ERROR);
|
| 368 |
+
// Use XMLHttpRequest so we can track upload progress
|
| 369 |
+
const file_id = await new Promise((resolve, reject) => {
|
| 370 |
+
const xhr = new XMLHttpRequest();
|
| 371 |
+
const form = new FormData();
|
| 372 |
+
form.append('file', file);
|
| 373 |
+
|
| 374 |
+
xhr.open('POST', `${API_BASE}/api/upload-video`, true);
|
| 375 |
+
|
| 376 |
+
xhr.upload.onprogress = (e) => {
|
| 377 |
+
if (e.lengthComputable) {
|
| 378 |
+
setUploadProgress(Math.round((e.loaded / e.total) * 100));
|
| 379 |
+
}
|
| 380 |
+
};
|
| 381 |
+
|
| 382 |
+
xhr.onload = () => {
|
| 383 |
+
if (xhr.status === 200) {
|
| 384 |
+
try {
|
| 385 |
+
const data = JSON.parse(xhr.responseText);
|
| 386 |
+
resolve(data.file_id);
|
| 387 |
+
} catch {
|
| 388 |
+
reject(new Error('Invalid server response'));
|
| 389 |
+
}
|
| 390 |
+
} else {
|
| 391 |
+
try {
|
| 392 |
+
const err = JSON.parse(xhr.responseText);
|
| 393 |
+
reject(new Error(err.detail || `Upload failed (HTTP ${xhr.status})`));
|
| 394 |
+
} catch {
|
| 395 |
+
reject(new Error(`Upload failed (HTTP ${xhr.status})`));
|
| 396 |
+
}
|
| 397 |
+
}
|
| 398 |
+
};
|
| 399 |
+
xhr.onerror = () => reject(new Error('Network error during upload'));
|
| 400 |
+
xhr.send(form);
|
| 401 |
+
});
|
| 402 |
+
|
| 403 |
+
setUploadProgress(100);
|
| 404 |
+
addAlert('info', 'Upload Complete', 'Connecting to inference engine...');
|
| 405 |
+
|
| 406 |
+
const ws = new WebSocket(`${WS_BASE}/api/stream-video/${file_id}`);
|
| 407 |
+
wsRef.current = ws;
|
| 408 |
+
let lastCapacityAlertFrame = -999;
|
| 409 |
+
|
| 410 |
+
ws.onopen = () => {
|
| 411 |
+
setWsConnected(true);
|
| 412 |
+
addAlert('info', 'WebSocket Live', 'Real-time telemetry stream established');
|
| 413 |
+
ws.send(JSON.stringify({
|
| 414 |
+
settings: {
|
| 415 |
+
confidenceThresh:
|
| 416 |
+
settings.mode === 'Performance' ? 0.45 : settings.mode === 'Accuracy' ? 0.25 : 0.35,
|
| 417 |
+
magnification: parseFloat(parseFloat(settings.magnification).toFixed(2)),
|
| 418 |
+
nmsRadius: parseFloat(parseFloat(settings.nmsRadius).toFixed(2)),
|
| 419 |
+
useHeatmap: Boolean(settings.heatmap),
|
| 420 |
+
useClustering: Boolean(settings.clustering),
|
| 421 |
+
useMotionVecs: Boolean(settings.motionVecs),
|
| 422 |
+
frameSkip: Math.round(settings.frameSkip),
|
| 423 |
+
fencingPolygon: fencePoints,
|
| 424 |
+
capacityLimit: Math.round(settings.capacity),
|
| 425 |
+
}
|
| 426 |
+
}));
|
| 427 |
+
};
|
| 428 |
+
|
| 429 |
+
ws.onmessage = (e) => {
|
| 430 |
+
const payload = JSON.parse(e.data);
|
| 431 |
+
if (payload.status === 'playing') {
|
| 432 |
+
setResultImg(`data:image/jpeg;base64,${payload.imageB64}`);
|
| 433 |
+
const c = payload.count;
|
| 434 |
+
const ts = nowStr();
|
| 435 |
+
setStats(s => ({ ...s, count: c, unique: payload.total_unique, frames: payload.frame }));
|
| 436 |
+
setPeak(p => Math.max(p, c));
|
| 437 |
+
setHistory(h => [...h, { label: ts, count: c }]);
|
| 438 |
+
|
| 439 |
+
if (payload.anomalyEvent)
|
| 440 |
+
addAlert('danger', '⚠ CHAOS DETECTED', `Rapid movement at frame ${payload.frame}`);
|
| 441 |
+
|
| 442 |
+
const t = getThreat(c, settings.capacity);
|
| 443 |
+
if (t === THREAT.DANGER && payload.frame - lastCapacityAlertFrame > 30) {
|
| 444 |
+
lastCapacityAlertFrame = payload.frame;
|
| 445 |
+
addAlert('danger', 'Zone Overcrowding', `${c} subjects — ${Math.round(c / settings.capacity * 100)}%`);
|
| 446 |
+
}
|
| 447 |
+
} else if (payload.status === 'done') {
|
| 448 |
+
ws.close();
|
| 449 |
+
addAlert('info', 'Stream Complete', `${payload.total_unique ?? '?'} unique subjects archived`);
|
| 450 |
+
saveSession(file.name);
|
| 451 |
+
} else if (payload.status === 'error') {
|
| 452 |
+
ws.close();
|
| 453 |
+
addAlert('danger', 'Engine Error', payload.message || 'Unknown stream error');
|
| 454 |
+
}
|
| 455 |
+
};
|
| 456 |
+
|
| 457 |
+
ws.onerror = () => addAlert('danger', 'Stream Error', 'WebSocket connection failed');
|
| 458 |
+
ws.onclose = () => {
|
| 459 |
+
setWsConnected(false);
|
| 460 |
+
setLoading(false);
|
| 461 |
+
wsRef.current = null;
|
| 462 |
+
};
|
| 463 |
+
|
| 464 |
+
} catch (err) {
|
| 465 |
+
addAlert('danger', 'Upload Failed', err.message);
|
| 466 |
+
setLoading(false);
|
| 467 |
+
}
|
| 468 |
+
};
|
| 469 |
+
|
| 470 |
+
const terminateStream = () => {
|
| 471 |
+
wsRef.current?.close();
|
| 472 |
+
setLoading(false);
|
| 473 |
+
addAlert('warning', 'Stream Terminated', 'Operator manually terminated');
|
| 474 |
+
};
|
| 475 |
+
|
| 476 |
+
// ── Session save ──
|
| 477 |
+
const saveSession = (name) => {
|
| 478 |
+
const elapsed = sessionStartTs.current ? secStr(Date.now() - sessionStartTs.current) : '—';
|
| 479 |
+
const session = {
|
| 480 |
+
id: uid(), name, peak: peakCount,
|
| 481 |
+
avg: avgCount, alerts: alerts.length,
|
| 482 |
+
elapsed, time: new Date().toLocaleString(),
|
| 483 |
+
history: history.slice(-300),
|
| 484 |
+
};
|
| 485 |
+
setSessions(prev => {
|
| 486 |
+
const next = [session, ...prev].slice(0, 20);
|
| 487 |
+
localStorage.setItem('cp_sessions', JSON.stringify(next));
|
| 488 |
+
return next;
|
| 489 |
+
});
|
| 490 |
+
};
|
| 491 |
+
|
| 492 |
+
// ── Export ──
|
| 493 |
+
const exportReport = () => {
|
| 494 |
+
const report = {
|
| 495 |
+
generated: new Date().toISOString(),
|
| 496 |
+
file: file?.name, peakCount, avgCount,
|
| 497 |
+
alertCount: alerts.length, settings, history,
|
| 498 |
+
alerts: alerts.map(a => ({ time: a.time, type: a.type, title: a.title, msg: a.msg })),
|
| 499 |
+
};
|
| 500 |
+
const blob = new Blob([JSON.stringify(report, null, 2)], { type: 'application/json' });
|
| 501 |
+
const url = URL.createObjectURL(blob);
|
| 502 |
+
const a = document.createElement('a');
|
| 503 |
+
a.href = url; a.download = `civic_pulse_${Date.now()}.json`; a.click();
|
| 504 |
+
URL.revokeObjectURL(url);
|
| 505 |
+
addAlert('info', 'Report Exported', 'Analytics report downloaded');
|
| 506 |
+
};
|
| 507 |
+
|
| 508 |
+
// ── Fence SVG ──
|
| 509 |
+
const fenceSvg = viewerRef.current && fencePoints.length > 0 ? (() => {
|
| 510 |
+
const { offsetWidth: w, offsetHeight: h } = viewerRef.current;
|
| 511 |
+
return { pts: fencePoints.map(p => `${p.x * w},${p.y * h}`).join(' '), w, h };
|
| 512 |
+
})() : null;
|
| 513 |
+
|
| 514 |
+
const handleExecute = () => fileType === 'video' ? streamVideo() : executeImageScan();
|
| 515 |
+
const toggleSetting = (key) => setSettings(s => ({ ...s, [key]: !s[key] }));
|
| 516 |
+
const setSetting = (key, val) => setSettings(s => ({ ...s, [key]: val }));
|
| 517 |
+
const countClass = threat === THREAT.DANGER ? 'danger' : threat === THREAT.MODERATE ? 'moderate' : '';
|
| 518 |
+
|
| 519 |
+
// ──────────────────────────────────────────────
|
| 520 |
+
return (
|
| 521 |
+
<div className="dashboard">
|
| 522 |
+
|
| 523 |
+
{/* ════════ TOP NAVBAR ════════ */}
|
| 524 |
+
<nav className="navbar">
|
| 525 |
+
<div className="navbar-brand">
|
| 526 |
+
<div className="navbar-logo"><Shield /></div>
|
| 527 |
+
<div>
|
| 528 |
+
<div className="navbar-title">CIVIC PULSE</div>
|
| 529 |
+
<div className="navbar-subtitle">Tactical Crowd Intelligence</div>
|
| 530 |
+
</div>
|
| 531 |
+
</div>
|
| 532 |
+
|
| 533 |
+
<div className="navbar-center">
|
| 534 |
+
<div className={`threat-level ${threat.cls}`}>
|
| 535 |
+
<span className="threat-dot" />
|
| 536 |
+
THREAT: {threat.label}
|
| 537 |
+
</div>
|
| 538 |
+
{predictAlert && (
|
| 539 |
+
<div className="threat-level moderate" style={{ fontSize: '0.6rem' }}>
|
| 540 |
+
<Brain size={10} />
|
| 541 |
+
PREDICT: ~{predicted} SOON
|
| 542 |
+
</div>
|
| 543 |
+
)}
|
| 544 |
+
<div className={`ws-status ${wsConnected ? 'connected' : 'disconnected'}`}>
|
| 545 |
+
{wsConnected ? <Wifi size={12} /> : <WifiOff size={12} />}
|
| 546 |
+
{wsConnected ? 'STREAM LIVE' : 'STANDBY'}
|
| 547 |
+
</div>
|
| 548 |
+
</div>
|
| 549 |
+
|
| 550 |
+
<div className="navbar-stats">
|
| 551 |
+
<div className="nav-stat"><span className="nav-stat-label">FPS</span><span className="nav-stat-value">{fps}</span></div>
|
| 552 |
+
<div className="nav-stat"><span className="nav-stat-label">PEAK</span><span className="nav-stat-value">{peakCount}</span></div>
|
| 553 |
+
<div className="nav-stat"><span className="nav-stat-label">AVG</span><span className="nav-stat-value">{avgCount}</span></div>
|
| 554 |
+
<div className="nav-stat">
|
| 555 |
+
<span className="nav-stat-label">ALERTS</span>
|
| 556 |
+
<span className="nav-stat-value" style={{ color: alerts[0]?.type === 'danger' ? 'var(--danger)' : undefined }}>{alerts.length}</span>
|
| 557 |
+
</div>
|
| 558 |
+
<div className="nav-stat"><span className="nav-stat-label">TIME</span><span className="nav-stat-value">{clock}</span></div>
|
| 559 |
+
|
| 560 |
+
{/* ── Theme Toggle ── */}
|
| 561 |
+
<div style={{ display: 'flex', flexDirection: 'column', alignItems: 'center', gap: 3, marginLeft: 8 }}>
|
| 562 |
+
<span className="theme-label">{theme === 'dark' ? 'Dark' : 'Light'}</span>
|
| 563 |
+
<div
|
| 564 |
+
className="theme-toggle"
|
| 565 |
+
onClick={toggleTheme}
|
| 566 |
+
title={`Switch to ${theme === 'dark' ? 'Light' : 'Dark'} mode`}
|
| 567 |
+
>
|
| 568 |
+
<div className="theme-toggle-thumb">
|
| 569 |
+
{theme === 'dark' ? <Moon size={10} style={{ color: '#94a3b8' }} /> : <Sun size={10} style={{ color: '#fff' }} />}
|
| 570 |
+
</div>
|
| 571 |
+
</div>
|
| 572 |
+
</div>
|
| 573 |
+
</div>
|
| 574 |
+
</nav>
|
| 575 |
+
|
| 576 |
+
{/* ════════ LEFT PANEL ════════ */}
|
| 577 |
+
<aside className="left-panel panel">
|
| 578 |
+
|
| 579 |
+
{/* Upload */}
|
| 580 |
+
<div className="panel-section">
|
| 581 |
+
<div className="section-header">
|
| 582 |
+
<Upload size={16} className="section-icon" />
|
| 583 |
+
<span className="section-title">Ingest Data</span>
|
| 584 |
+
</div>
|
| 585 |
+
<div
|
| 586 |
+
className={`upload-zone ${dragActive ? 'drag-active' : ''}`}
|
| 587 |
+
onDragEnter={onDrag} onDragLeave={onDrag} onDragOver={onDrag} onDrop={onDrop}
|
| 588 |
+
onClick={() => fileInputRef.current?.click()}
|
| 589 |
+
>
|
| 590 |
+
{/* Explicit MIME types — critical for Windows file dialog */}
|
| 591 |
+
<input ref={fileInputRef} type="file"
|
| 592 |
+
accept="image/jpeg,image/png,image/gif,image/bmp,image/webp,image/tiff,video/mp4,video/avi,video/quicktime,video/x-matroska,video/webm,video/x-msvideo,video/*,image/*"
|
| 593 |
+
style={{ display: 'none' }}
|
| 594 |
+
onChange={e => e.target.files[0] && handleFile(e.target.files[0])} />
|
| 595 |
+
{/* Video preview thumbnail */}
|
| 596 |
+
{videoPreview ? (
|
| 597 |
+
<div style={{ width: '100%', marginBottom: 8 }}>
|
| 598 |
+
<video
|
| 599 |
+
src={videoPreview}
|
| 600 |
+
style={{ width: '100%', maxHeight: 80, borderRadius: 6, objectFit: 'cover', display: 'block' }}
|
| 601 |
+
muted preload="metadata"
|
| 602 |
+
/>
|
| 603 |
+
{/* Upload progress bar */}
|
| 604 |
+
{loading && uploadProgress < 100 && (
|
| 605 |
+
<div style={{ marginTop: 6, background: 'var(--bg-input)', borderRadius: 4, overflow: 'hidden', height: 4 }}>
|
| 606 |
+
<div style={{
|
| 607 |
+
height: '100%', borderRadius: 4,
|
| 608 |
+
background: 'linear-gradient(90deg, var(--primary), var(--blue))',
|
| 609 |
+
width: `${uploadProgress}%`,
|
| 610 |
+
transition: 'width 0.3s ease',
|
| 611 |
+
boxShadow: '0 0 6px var(--primary-glow)'
|
| 612 |
+
}} />
|
| 613 |
+
</div>
|
| 614 |
+
)}
|
| 615 |
+
</div>
|
| 616 |
+
) : (
|
| 617 |
+
<span className="upload-icon">🛰️</span>
|
| 618 |
+
)}
|
| 619 |
+
<div className="upload-title">
|
| 620 |
+
{file
|
| 621 |
+
? file.name.slice(0, 22)
|
| 622 |
+
: 'Load Aerial Feed'}
|
| 623 |
+
</div>
|
| 624 |
+
<div className="upload-sub">
|
| 625 |
+
{file
|
| 626 |
+
? `${(file.size/1024/1024).toFixed(1)} MB · ${fileType}${
|
| 627 |
+
loading && uploadProgress > 0 && uploadProgress < 100
|
| 628 |
+
? ` · uploading ${uploadProgress}%` : ''}`
|
| 629 |
+
: 'Image or Video (drag & drop)'}
|
| 630 |
+
</div>
|
| 631 |
+
</div>
|
| 632 |
+
</div>
|
| 633 |
+
|
| 634 |
+
{/* Overlay Toggles */}
|
| 635 |
+
<div className="panel-section">
|
| 636 |
+
<div className="section-header">
|
| 637 |
+
<Layers size={16} className="section-icon" />
|
| 638 |
+
<span className="section-title">Overlay Layers</span>
|
| 639 |
+
</div>
|
| 640 |
+
{[
|
| 641 |
+
{ key: 'heatmap', label: 'Heatmap Density', Icon: Thermometer },
|
| 642 |
+
{ key: 'clustering', label: 'AI Pod Clustering', Icon: GitBranch },
|
| 643 |
+
{ key: 'showPoints', label: 'Head Points', Icon: Crosshair },
|
| 644 |
+
{ key: 'motionVecs', label: 'Motion Vectors', Icon: Wind }, // GAP 5
|
| 645 |
+
].map(({ key, label, Icon }) => (
|
| 646 |
+
<div key={key} className="toggle-row" onClick={() => toggleSetting(key)}>
|
| 647 |
+
<span className="toggle-label"><Icon size={13} />{label}</span>
|
| 648 |
+
<div className={`toggle-switch ${settings[key] ? 'on' : ''}`} />
|
| 649 |
+
</div>
|
| 650 |
+
))}
|
| 651 |
+
|
| 652 |
+
{/* GAP 2: Overlay Opacity Slider */}
|
| 653 |
+
<div className="slider-group" style={{ marginTop: 10 }}>
|
| 654 |
+
<div className="slider-header">
|
| 655 |
+
<span>Overlay Opacity</span>
|
| 656 |
+
<span className="slider-value">{settings.overlayOpacity}%</span>
|
| 657 |
+
</div>
|
| 658 |
+
<input type="range" min={20} max={100} step={5}
|
| 659 |
+
value={settings.overlayOpacity}
|
| 660 |
+
onChange={e => setSetting('overlayOpacity', +e.target.value)} />
|
| 661 |
+
</div>
|
| 662 |
+
</div>
|
| 663 |
+
|
| 664 |
+
{/* Engine Mode */}
|
| 665 |
+
<div className="panel-section">
|
| 666 |
+
<div className="section-header">
|
| 667 |
+
<Cpu size={16} className="section-icon" />
|
| 668 |
+
<span className="section-title">Engine Mode</span>
|
| 669 |
+
</div>
|
| 670 |
+
<select className="mode-select" value={settings.mode}
|
| 671 |
+
onChange={e => setSetting('mode', e.target.value)}>
|
| 672 |
+
<option value="Performance">Performance (Fast)</option>
|
| 673 |
+
<option value="Balanced">Balanced</option>
|
| 674 |
+
<option value="Accuracy">Accuracy (Slow)</option>
|
| 675 |
+
</select>
|
| 676 |
+
</div>
|
| 677 |
+
|
| 678 |
+
{/* Parameters */}
|
| 679 |
+
<div className="panel-section">
|
| 680 |
+
<div className="section-header">
|
| 681 |
+
<BarChart2 size={16} className="section-icon" />
|
| 682 |
+
<span className="section-title">Parameters</span>
|
| 683 |
+
</div>
|
| 684 |
+
{[
|
| 685 |
+
{ key: 'capacity', label: 'Capacity Limit', suffix: '', min: 10, max: 1000, step: 5, isInt: true },
|
| 686 |
+
{ key: 'magnification', label: 'Magnification', suffix: '×', min: 1.0, max: 3.0, step: 0.1, isInt: false },
|
| 687 |
+
{ key: 'nmsRadius', label: 'NMS Radius', suffix: 'px', min: 3.0, max: 20.0, step: 0.5, isInt: false },
|
| 688 |
+
{ key: 'frameSkip', label: 'Frame Skip', suffix: '', min: 1, max: 15, step: 1, isInt: true },
|
| 689 |
+
].map(({ key, label, suffix, min, max, step, isInt }) => (
|
| 690 |
+
<div className="slider-group" key={key} style={{ marginTop: 10 }}>
|
| 691 |
+
<div className="slider-header">
|
| 692 |
+
<span>{label}</span>
|
| 693 |
+
<span className="slider-value">
|
| 694 |
+
{isInt
|
| 695 |
+
? Math.round(settings[key])
|
| 696 |
+
: parseFloat(settings[key]).toFixed(1)}{suffix}
|
| 697 |
+
</span>
|
| 698 |
+
</div>
|
| 699 |
+
<input type="range" min={min} max={max} step={step}
|
| 700 |
+
value={settings[key]}
|
| 701 |
+
onChange={e => setSetting(key, isInt ? Math.round(+e.target.value) : parseFloat((+e.target.value).toFixed(2)))} />
|
| 702 |
+
</div>
|
| 703 |
+
))}
|
| 704 |
+
</div>
|
| 705 |
+
|
| 706 |
+
{/* Zone Fencing */}
|
| 707 |
+
<div className="panel-section">
|
| 708 |
+
<div className="section-header">
|
| 709 |
+
<Map size={16} className="section-icon" />
|
| 710 |
+
<span className="section-title">Zone Fencing</span>
|
| 711 |
+
</div>
|
| 712 |
+
<div className="toggle-row" onClick={() => setDrawingFence(d => !d)}>
|
| 713 |
+
<span className="toggle-label"><Target size={13} />Draw Zone Polygon</span>
|
| 714 |
+
<div className={`toggle-switch ${drawingFence ? 'on' : ''}`} />
|
| 715 |
+
</div>
|
| 716 |
+
<div className="fencing-controls">
|
| 717 |
+
{fencePoints.length > 0
|
| 718 |
+
? <div className="fencing-info">{fencePoints.length} anchor point{fencePoints.length !== 1 ? 's' : ''} — AI counts only inside zone.</div>
|
| 719 |
+
: drawingFence && <div className="fencing-info">Click on the feed to drop anchor points.</div>
|
| 720 |
+
}
|
| 721 |
+
{fencePoints.length > 0 && (
|
| 722 |
+
<button className="btn-clear-fence" onClick={() => setFencePoints([])}>Clear Zone Fence</button>
|
| 723 |
+
)}
|
| 724 |
+
</div>
|
| 725 |
+
</div>
|
| 726 |
+
|
| 727 |
+
{/* Zoom / Pan Controls (GAP 4) */}
|
| 728 |
+
<div className="panel-section">
|
| 729 |
+
<div className="section-header">
|
| 730 |
+
<Move size={16} className="section-icon" />
|
| 731 |
+
<span className="section-title">Viewport ({zoom.toFixed(1)}×)</span>
|
| 732 |
+
</div>
|
| 733 |
+
<div style={{ display: 'flex', gap: 6 }}>
|
| 734 |
+
<button className="btn-secondary" style={{ flex: 1 }}
|
| 735 |
+
onClick={() => setZoom(z => Math.min(5, +(z + 0.5).toFixed(1)))}>
|
| 736 |
+
<ZoomIn size={12} style={{ marginRight: 4, verticalAlign: 'middle' }} />Zoom In
|
| 737 |
+
</button>
|
| 738 |
+
<button className="btn-secondary" style={{ flex: 1 }}
|
| 739 |
+
onClick={() => { setZoom(z => Math.max(1, +(z - 0.5).toFixed(1))); setPan({ x: 0, y: 0 }); }}>
|
| 740 |
+
<ZoomOut size={12} style={{ marginRight: 4, verticalAlign: 'middle' }} />Zoom Out
|
| 741 |
+
</button>
|
| 742 |
+
</div>
|
| 743 |
+
{zoom > 1 && (
|
| 744 |
+
<button className="btn-secondary" style={{ marginTop: 6 }}
|
| 745 |
+
onClick={() => { setZoom(1); setPan({ x: 0, y: 0 }); }}>
|
| 746 |
+
<RotateCcw size={12} style={{ marginRight: 4, verticalAlign: 'middle' }} />Reset View
|
| 747 |
+
</button>
|
| 748 |
+
)}
|
| 749 |
+
</div>
|
| 750 |
+
|
| 751 |
+
{/* Execute & Export */}
|
| 752 |
+
<div className="panel-section" style={{ marginTop: 'auto' }}>
|
| 753 |
+
{!loading ? (
|
| 754 |
+
<>
|
| 755 |
+
<button className="btn-execute" onClick={handleExecute} disabled={!file}>
|
| 756 |
+
<Play size={14} style={{ marginRight: 6, verticalAlign: 'middle' }} />
|
| 757 |
+
{fileType === 'video' ? 'Initialize Stream' : 'Execute Scan'}
|
| 758 |
+
</button>
|
| 759 |
+
<button className="btn-secondary" onClick={exportReport} disabled={history.length === 0}>
|
| 760 |
+
<Download size={12} style={{ marginRight: 4, verticalAlign: 'middle' }} />
|
| 761 |
+
Export Report
|
| 762 |
+
</button>
|
| 763 |
+
</>
|
| 764 |
+
) : (
|
| 765 |
+
<button className="btn-execute terminate" onClick={terminateStream}>
|
| 766 |
+
<Square size={14} style={{ marginRight: 6, verticalAlign: 'middle' }} />
|
| 767 |
+
Terminate Stream
|
| 768 |
+
</button>
|
| 769 |
+
)}
|
| 770 |
+
</div>
|
| 771 |
+
</aside>
|
| 772 |
+
|
| 773 |
+
{/* ════════ CENTER PANEL ════════ */}
|
| 774 |
+
<main
|
| 775 |
+
className="center-panel"
|
| 776 |
+
ref={viewerRef}
|
| 777 |
+
onClick={onViewerClick}
|
| 778 |
+
onMouseDown={onMouseDown}
|
| 779 |
+
onMouseMove={onMouseMove}
|
| 780 |
+
onMouseUp={onMouseUp}
|
| 781 |
+
onMouseLeave={onMouseUp}
|
| 782 |
+
style={{ cursor: zoom > 1 ? (isPanning.current ? 'grabbing' : 'grab') : drawingFence ? 'crosshair' : 'default' }}
|
| 783 |
+
>
|
| 784 |
+
<div className="scan-lines" />
|
| 785 |
+
|
| 786 |
+
{/* Topbar */}
|
| 787 |
+
<div className="panel-topbar">
|
| 788 |
+
<div className="panel-topbar-title">
|
| 789 |
+
<Eye size={14} />
|
| 790 |
+
AERIAL FEED
|
| 791 |
+
{(loading || wsConnected) && <span className="live-badge">LIVE</span>}
|
| 792 |
+
{zoom > 1 && <span style={{ fontSize: '0.6rem', color: 'var(--blue)', marginLeft: 6 }}>{zoom.toFixed(1)}×</span>}
|
| 793 |
+
</div>
|
| 794 |
+
<div className="video-overlay-controls">
|
| 795 |
+
{[
|
| 796 |
+
{ key: 'heatmap', Icon: Thermometer, title: 'Heatmap' },
|
| 797 |
+
{ key: 'clustering', Icon: GitBranch, title: 'Clustering' },
|
| 798 |
+
{ key: 'motionVecs', Icon: Wind, title: 'Motion Vecs'},
|
| 799 |
+
{ key: 'drawFence', Icon: Target, title: 'Draw Zone' },
|
| 800 |
+
].map(({ key, Icon, title }) => (
|
| 801 |
+
<div key={key}
|
| 802 |
+
className={`overlay-btn ${(key === 'drawFence' ? drawingFence : settings[key]) ? 'active' : ''}`}
|
| 803 |
+
title={title}
|
| 804 |
+
onClick={e => {
|
| 805 |
+
e.stopPropagation();
|
| 806 |
+
if (key === 'drawFence') setDrawingFence(d => !d);
|
| 807 |
+
else toggleSetting(key);
|
| 808 |
+
}}>
|
| 809 |
+
<Icon size={13} />
|
| 810 |
+
</div>
|
| 811 |
+
))}
|
| 812 |
+
<div className="overlay-btn" title="Zoom In"
|
| 813 |
+
onClick={e => { e.stopPropagation(); setZoom(z => Math.min(5, +(z + 0.5).toFixed(1))); }}>
|
| 814 |
+
<ZoomIn size={13} />
|
| 815 |
+
</div>
|
| 816 |
+
<div className="overlay-btn" title="Fullscreen"
|
| 817 |
+
onClick={e => { e.stopPropagation(); viewerRef.current?.requestFullscreen?.(); }}>
|
| 818 |
+
<Maximize2 size={13} />
|
| 819 |
+
</div>
|
| 820 |
+
</div>
|
| 821 |
+
</div>
|
| 822 |
+
|
| 823 |
+
{/* Loading state */}
|
| 824 |
+
{loading && !resultImg && (
|
| 825 |
+
<div className="loader-overlay">
|
| 826 |
+
<div className="spinner" />
|
| 827 |
+
<div className="loader-text">
|
| 828 |
+
{fileType === 'video' ? 'Initializing Telemetry...' : 'Neural Engine Processing...'}
|
| 829 |
+
</div>
|
| 830 |
+
</div>
|
| 831 |
+
)}
|
| 832 |
+
|
| 833 |
+
{/* Image / video feed with zoom+pan+opacity (GAP 2, 4) */}
|
| 834 |
+
<div style={{
|
| 835 |
+
position: 'absolute', inset: 0,
|
| 836 |
+
transform: `scale(${zoom}) translate(${pan.x / zoom}px, ${pan.y / zoom}px)`,
|
| 837 |
+
transformOrigin: 'center center',
|
| 838 |
+
transition: isPanning.current ? 'none' : 'transform 0.15s ease',
|
| 839 |
+
display: 'flex', alignItems: 'center', justifyContent: 'center',
|
| 840 |
+
}}>
|
| 841 |
+
{resultImg && (
|
| 842 |
+
<img src={resultImg} className="main-feed fade-up" alt="Analysis feed"
|
| 843 |
+
style={{ opacity: settings.overlayOpacity / 100 }}
|
| 844 |
+
/>
|
| 845 |
+
)}
|
| 846 |
+
{!resultImg && preview && !loading && (
|
| 847 |
+
<img src={preview} className="main-feed" alt="Preview"
|
| 848 |
+
style={{ opacity: (settings.overlayOpacity / 100) * 0.6, filter: 'grayscale(20%)' }} />
|
| 849 |
+
)}
|
| 850 |
+
</div>
|
| 851 |
+
|
| 852 |
+
{/* Empty state */}
|
| 853 |
+
{!resultImg && !preview && !loading && (
|
| 854 |
+
<div className="empty-feed">
|
| 855 |
+
<div className="empty-feed-icon"><Radio size={36} /></div>
|
| 856 |
+
<div className="empty-feed-text">No Feed Detected</div>
|
| 857 |
+
<div className="empty-feed-sub">Upload aerial imagery or video to begin analysis</div>
|
| 858 |
+
</div>
|
| 859 |
+
)}
|
| 860 |
+
|
| 861 |
+
{/* Fencing SVG */}
|
| 862 |
+
{drawingFence && viewerRef.current && (
|
| 863 |
+
<svg className="fencing-svg-overlay"
|
| 864 |
+
width={viewerRef.current.offsetWidth}
|
| 865 |
+
height={viewerRef.current.offsetHeight}>
|
| 866 |
+
{fenceSvg && (
|
| 867 |
+
<>
|
| 868 |
+
<polygon
|
| 869 |
+
points={fenceSvg.pts}
|
| 870 |
+
fill="rgba(56,189,248,0.12)"
|
| 871 |
+
stroke="#38bdf8" strokeWidth={1.5} strokeDasharray="6,4"
|
| 872 |
+
/>
|
| 873 |
+
{fencePoints.map((p, i) => (
|
| 874 |
+
<circle key={i}
|
| 875 |
+
cx={p.x * fenceSvg.w} cy={p.y * fenceSvg.h}
|
| 876 |
+
r={5} fill="var(--teal)"
|
| 877 |
+
stroke="rgba(0,230,184,0.4)" strokeWidth={2}
|
| 878 |
+
/>
|
| 879 |
+
))}
|
| 880 |
+
</>
|
| 881 |
+
)}
|
| 882 |
+
</svg>
|
| 883 |
+
)}
|
| 884 |
+
|
| 885 |
+
{/* Count overlay */}
|
| 886 |
+
{(resultImg || stats.count > 0) && (
|
| 887 |
+
<div className="count-overlay">
|
| 888 |
+
<div className="count-overlay-label">Zone Population</div>
|
| 889 |
+
<div className={`count-overlay-value ${countClass}`}>{stats.count}</div>
|
| 890 |
+
</div>
|
| 891 |
+
)}
|
| 892 |
+
|
| 893 |
+
{/* Anomaly banner */}
|
| 894 |
+
{anomalyActive && (
|
| 895 |
+
<div className="anomaly-banner">
|
| 896 |
+
<AlertTriangle size={18} />
|
| 897 |
+
ANOMALY DETECTED — CHAOS / COUNTERFLOW
|
| 898 |
+
</div>
|
| 899 |
+
)}
|
| 900 |
+
|
| 901 |
+
{/* Predictive warning overlay */}
|
| 902 |
+
{predictAlert && !anomalyActive && (
|
| 903 |
+
<div className="anomaly-banner" style={{
|
| 904 |
+
background: 'rgba(245,158,11,0.12)',
|
| 905 |
+
border: '1.5px solid var(--moderate)',
|
| 906 |
+
color: 'var(--moderate)',
|
| 907 |
+
boxShadow: '0 0 30px var(--moderate-glow)',
|
| 908 |
+
}}>
|
| 909 |
+
<Brain size={18} />
|
| 910 |
+
PREDICTIVE ALERT — CAPACITY BREACH IMMINENT (~{predicted} subjects)
|
| 911 |
+
</div>
|
| 912 |
+
)}
|
| 913 |
+
</main>
|
| 914 |
+
|
| 915 |
+
{/* ════════ RIGHT INTELLIGENCE PANEL ════════ */}
|
| 916 |
+
<aside className="right-panel panel">
|
| 917 |
+
{/* Alert Feed */}
|
| 918 |
+
<div className="right-panel-section" style={{ flex: '3 1 0' }}>
|
| 919 |
+
<div className="panel-header">
|
| 920 |
+
<div className="panel-header-title">
|
| 921 |
+
<AlertTriangle size={13} />Alert Feed
|
| 922 |
+
</div>
|
| 923 |
+
{alerts.length > 0 && <div className="panel-header-count">{alerts.length}</div>}
|
| 924 |
+
</div>
|
| 925 |
+
<div className="alert-feed">
|
| 926 |
+
{alerts.length === 0
|
| 927 |
+
? <div className="alert-empty"><Shield size={28} /><p>All systems nominal</p></div>
|
| 928 |
+
: alerts.map(a => <AlertItem key={a.id} type={a.type} title={a.title} msg={a.msg} time={a.time} />)
|
| 929 |
+
}
|
| 930 |
+
</div>
|
| 931 |
+
</div>
|
| 932 |
+
|
| 933 |
+
{/* Session History */}
|
| 934 |
+
<div className="right-panel-section" style={{ flex: '2 1 0' }}>
|
| 935 |
+
<div className="panel-header">
|
| 936 |
+
<div className="panel-header-title"><Database size={13} />Session History</div>
|
| 937 |
+
{sessions.length > 0 && (
|
| 938 |
+
<button className="btn-export" style={{ fontSize: '0.6rem', padding: '2px 8px' }}
|
| 939 |
+
onClick={() => { setSessions([]); localStorage.removeItem('cp_sessions'); }}>
|
| 940 |
+
Clear
|
| 941 |
+
</button>
|
| 942 |
+
)}
|
| 943 |
+
</div>
|
| 944 |
+
<div className="session-list">
|
| 945 |
+
{sessions.length === 0
|
| 946 |
+
? <div className="session-empty"><Database size={24} /><span>No sessions recorded</span></div>
|
| 947 |
+
: sessions.map(s => (
|
| 948 |
+
<div key={s.id} className="session-item"
|
| 949 |
+
onClick={() => s.history?.length && setHistory(s.history)}>
|
| 950 |
+
<div className="session-name">{s.name.slice(0, 28)}</div>
|
| 951 |
+
<div className="session-meta">
|
| 952 |
+
<span><TrendingUp size={10} />{s.peak}</span>
|
| 953 |
+
<span><Gauge size={10} />{s.avg}</span>
|
| 954 |
+
<span><AlertTriangle size={10} />{s.alerts}</span>
|
| 955 |
+
<span style={{ marginLeft: 'auto' }}>{s.elapsed}</span>
|
| 956 |
+
</div>
|
| 957 |
+
</div>
|
| 958 |
+
))
|
| 959 |
+
}
|
| 960 |
+
</div>
|
| 961 |
+
</div>
|
| 962 |
+
</aside>
|
| 963 |
+
|
| 964 |
+
{/* ════════ BOTTOM ANALYTICS PANEL ════════ */}
|
| 965 |
+
<section className="bottom-panel panel">
|
| 966 |
+
|
| 967 |
+
{/* Metric: Zone Population */}
|
| 968 |
+
<div className="metric-card">
|
| 969 |
+
<div className="metric-label"><Users size={11} />Zone Pop.</div>
|
| 970 |
+
<div>
|
| 971 |
+
<div className={`metric-value ${countClass}`}>{stats.count}</div>
|
| 972 |
+
<div className="metric-sub">Current frame</div>
|
| 973 |
+
</div>
|
| 974 |
+
</div>
|
| 975 |
+
|
| 976 |
+
{/* Metric: Unique Subjects */}
|
| 977 |
+
<div className="metric-card">
|
| 978 |
+
<div className="metric-label"><Crosshair size={11} />Unique</div>
|
| 979 |
+
<div>
|
| 980 |
+
<div className="metric-value blue">{stats.unique}</div>
|
| 981 |
+
<div className="metric-sub">Tracked subjects</div>
|
| 982 |
+
</div>
|
| 983 |
+
</div>
|
| 984 |
+
|
| 985 |
+
{/* Metric: Avg Density (GAP 3) */}
|
| 986 |
+
<div className="metric-card">
|
| 987 |
+
<div className="metric-label"><Gauge size={11} />Density</div>
|
| 988 |
+
<div>
|
| 989 |
+
<div className={`metric-value ${density.cls}`} style={{ fontSize: '1.5rem', letterSpacing: '-0.5px' }}>
|
| 990 |
+
{density.label}
|
| 991 |
+
</div>
|
| 992 |
+
<div className="metric-sub">Avg {avgCount} · Peak {peakCount}</div>
|
| 993 |
+
</div>
|
| 994 |
+
</div>
|
| 995 |
+
|
| 996 |
+
{/* Metric: Latency / Frames */}
|
| 997 |
+
<div className="metric-card">
|
| 998 |
+
<div className="metric-label"><Activity size={11} />Latency</div>
|
| 999 |
+
<div>
|
| 1000 |
+
<div className="metric-value" style={{ fontSize: '1.6rem' }}>
|
| 1001 |
+
{fileType === 'video'
|
| 1002 |
+
? (stats.frames || 0)
|
| 1003 |
+
: (stats.latency > 0 ? stats.latency.toFixed(2) : '—')}
|
| 1004 |
+
</div>
|
| 1005 |
+
<div className="metric-sub">{fileType === 'video' ? 'frames processed' : 'seconds'}</div>
|
| 1006 |
+
</div>
|
| 1007 |
+
</div>
|
| 1008 |
+
|
| 1009 |
+
{/* Chart (GAP 1: time-formatted X-axis) */}
|
| 1010 |
+
<div className="chart-area">
|
| 1011 |
+
<div className="chart-header">
|
| 1012 |
+
<div className="chart-title">Population Dynamics Timeline</div>
|
| 1013 |
+
<div className="export-actions">
|
| 1014 |
+
<button className="btn-export" onClick={exportReport} disabled={history.length === 0}>
|
| 1015 |
+
<Download size={11} />Report
|
| 1016 |
+
</button>
|
| 1017 |
+
<button className="btn-export"
|
| 1018 |
+
style={{ borderColor: 'rgba(167,139,250,0.3)', color: 'var(--purple)', background: 'rgba(167,139,250,0.06)' }}
|
| 1019 |
+
onClick={() => setHistory([])} disabled={history.length === 0}>
|
| 1020 |
+
<RotateCcw size={11} />Reset
|
| 1021 |
+
</button>
|
| 1022 |
+
</div>
|
| 1023 |
+
</div>
|
| 1024 |
+
<div className="chart-wrapper">
|
| 1025 |
+
{history.length > 0 ? (
|
| 1026 |
+
<ResponsiveContainer width="100%" height="100%">
|
| 1027 |
+
<AreaChart data={history} margin={{ top: 4, right: 8, left: -20, bottom: 0 }}>
|
| 1028 |
+
<defs>
|
| 1029 |
+
<linearGradient id="tealGrad" x1="0" y1="0" x2="0" y2="1">
|
| 1030 |
+
<stop offset="5%" stopColor="var(--teal)" stopOpacity={0.25} />
|
| 1031 |
+
<stop offset="95%" stopColor="var(--teal)" stopOpacity={0} />
|
| 1032 |
+
</linearGradient>
|
| 1033 |
+
</defs>
|
| 1034 |
+
<CartesianGrid strokeDasharray="2 4" stroke="rgba(255,255,255,0.04)" />
|
| 1035 |
+
{/* GAP 1: X-axis shows HH:MM:SS time label */}
|
| 1036 |
+
<XAxis dataKey="label"
|
| 1037 |
+
stroke="rgba(255,255,255,0.12)"
|
| 1038 |
+
tick={{ fontSize: 8, fill: 'var(--text-muted)', fontFamily: 'Orbitron' }}
|
| 1039 |
+
interval="preserveStartEnd"
|
| 1040 |
+
/>
|
| 1041 |
+
<YAxis stroke="rgba(255,255,255,0.12)"
|
| 1042 |
+
tick={{ fontSize: 9, fill: 'var(--text-muted)' }} />
|
| 1043 |
+
<Tooltip content={<CTooltip />} />
|
| 1044 |
+
{settings.capacity > 0 && (
|
| 1045 |
+
<ReferenceLine y={settings.capacity}
|
| 1046 |
+
stroke="var(--danger)" strokeDasharray="4 4" strokeOpacity={0.5}
|
| 1047 |
+
label={{ value: 'LIMIT', fill: 'var(--danger)', fontSize: 9, fontFamily: 'Orbitron' }}
|
| 1048 |
+
/>
|
| 1049 |
+
)}
|
| 1050 |
+
<Area type="monotone" dataKey="count"
|
| 1051 |
+
stroke="var(--teal)" strokeWidth={2.5}
|
| 1052 |
+
fill="url(#tealGrad)"
|
| 1053 |
+
dot={false}
|
| 1054 |
+
activeDot={{ r: 5, fill: 'var(--bg-base)', stroke: 'var(--teal)', strokeWidth: 2 }}
|
| 1055 |
+
isAnimationActive={false}
|
| 1056 |
+
/>
|
| 1057 |
+
</AreaChart>
|
| 1058 |
+
</ResponsiveContainer>
|
| 1059 |
+
) : (
|
| 1060 |
+
<div className="chart-empty-state">Run an image scan or video stream to populate analytics.</div>
|
| 1061 |
+
)}
|
| 1062 |
+
</div>
|
| 1063 |
+
</div>
|
| 1064 |
+
</section>
|
| 1065 |
+
</div>
|
| 1066 |
+
);
|
| 1067 |
+
}
|
frontend/src/index.css
ADDED
|
@@ -0,0 +1,607 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@import url('https://fonts.googleapis.com/css2?family=Inter:wght@300;400;500;600;700;800&family=Orbitron:wght@400;500;600;700;800;900&display=swap');
|
| 2 |
+
|
| 3 |
+
/* ============================================
|
| 4 |
+
DARK THEME (default)
|
| 5 |
+
============================================ */
|
| 6 |
+
:root,
|
| 7 |
+
[data-theme="dark"] {
|
| 8 |
+
--bg-base: #080c14;
|
| 9 |
+
--bg-surface: #0d1117;
|
| 10 |
+
--bg-panel: rgba(13, 20, 35, 0.88);
|
| 11 |
+
--bg-input: rgba(255, 255, 255, 0.05);
|
| 12 |
+
--bg-hover: rgba(255, 255, 255, 0.08);
|
| 13 |
+
|
| 14 |
+
--primary: #00e6b8;
|
| 15 |
+
--primary-dim: #00b891;
|
| 16 |
+
--primary-glow: rgba(0, 230, 184, 0.28);
|
| 17 |
+
--primary-border: rgba(0, 230, 184, 0.22);
|
| 18 |
+
|
| 19 |
+
--blue: #38bdf8;
|
| 20 |
+
--blue-glow: rgba(56, 189, 248, 0.22);
|
| 21 |
+
|
| 22 |
+
--purple: #a78bfa;
|
| 23 |
+
--purple-glow: rgba(167, 139, 250, 0.22);
|
| 24 |
+
|
| 25 |
+
--safe: #22c55e;
|
| 26 |
+
--safe-glow: rgba(34, 197, 94, 0.30);
|
| 27 |
+
--moderate: #f59e0b;
|
| 28 |
+
--moderate-glow: rgba(245, 158, 11, 0.30);
|
| 29 |
+
--danger: #ef4444;
|
| 30 |
+
--danger-glow: rgba(239, 68, 68, 0.35);
|
| 31 |
+
|
| 32 |
+
--text-primary: #f0f4ff;
|
| 33 |
+
--text-secondary: #8899b4;
|
| 34 |
+
--text-muted: #4a5568;
|
| 35 |
+
|
| 36 |
+
--border-glass: rgba(255, 255, 255, 0.07);
|
| 37 |
+
--border-active: rgba(0, 230, 184, 0.42);
|
| 38 |
+
|
| 39 |
+
--shadow-panel: 0 8px 32px rgba(0,0,0,0.6), 0 0 0 1px rgba(255,255,255,0.04);
|
| 40 |
+
|
| 41 |
+
--scan-line: rgba(0,0,0,0.04);
|
| 42 |
+
--upload-bg: rgba(0, 230, 184, 0.03);
|
| 43 |
+
--chart-gradient: rgba(0, 230, 184, 0.25);
|
| 44 |
+
--chart-gradient-end: rgba(0, 230, 184, 0);
|
| 45 |
+
|
| 46 |
+
--metric-bg: transparent;
|
| 47 |
+
--toggle-bg: rgba(0,0,0,0.5);
|
| 48 |
+
|
| 49 |
+
--bg-ambient-1: rgba(59, 130, 246, 0.10);
|
| 50 |
+
--bg-ambient-2: rgba(0, 230, 184, 0.08);
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
/* ============================================
|
| 54 |
+
LIGHT THEME
|
| 55 |
+
============================================ */
|
| 56 |
+
[data-theme="light"] {
|
| 57 |
+
--bg-base: #f1f5f9;
|
| 58 |
+
--bg-surface: #ffffff;
|
| 59 |
+
--bg-panel: rgba(255, 255, 255, 0.92);
|
| 60 |
+
--bg-input: rgba(0, 0, 0, 0.04);
|
| 61 |
+
--bg-hover: rgba(0, 0, 0, 0.06);
|
| 62 |
+
|
| 63 |
+
--primary: #0891b2;
|
| 64 |
+
--primary-dim: #0e7490;
|
| 65 |
+
--primary-glow: rgba(8, 145, 178, 0.20);
|
| 66 |
+
--primary-border: rgba(8, 145, 178, 0.25);
|
| 67 |
+
|
| 68 |
+
--blue: #2563eb;
|
| 69 |
+
--blue-glow: rgba(37, 99, 235, 0.18);
|
| 70 |
+
|
| 71 |
+
--purple: #7c3aed;
|
| 72 |
+
--purple-glow: rgba(124, 58, 237, 0.18);
|
| 73 |
+
|
| 74 |
+
--safe: #16a34a;
|
| 75 |
+
--safe-glow: rgba(22, 163, 74, 0.20);
|
| 76 |
+
--moderate: #d97706;
|
| 77 |
+
--moderate-glow: rgba(217, 119, 6, 0.22);
|
| 78 |
+
--danger: #dc2626;
|
| 79 |
+
--danger-glow: rgba(220, 38, 38, 0.22);
|
| 80 |
+
|
| 81 |
+
--text-primary: #0f172a;
|
| 82 |
+
--text-secondary: #475569;
|
| 83 |
+
--text-muted: #94a3b8;
|
| 84 |
+
|
| 85 |
+
--border-glass: rgba(0, 0, 0, 0.09);
|
| 86 |
+
--border-active: rgba(8, 145, 178, 0.45);
|
| 87 |
+
|
| 88 |
+
--shadow-panel: 0 4px 24px rgba(0,0,0,0.08), 0 0 0 1px rgba(0,0,0,0.05);
|
| 89 |
+
|
| 90 |
+
--scan-line: rgba(0,0,0,0.015);
|
| 91 |
+
--upload-bg: rgba(8, 145, 178, 0.04);
|
| 92 |
+
--chart-gradient: rgba(8, 145, 178, 0.20);
|
| 93 |
+
--chart-gradient-end: rgba(8, 145, 178, 0);
|
| 94 |
+
|
| 95 |
+
--metric-bg: rgba(248, 250, 252, 0.9);
|
| 96 |
+
--toggle-bg: rgba(255,255,255,0.8);
|
| 97 |
+
|
| 98 |
+
--bg-ambient-1: rgba(8, 145, 178, 0.06);
|
| 99 |
+
--bg-ambient-2: rgba(124, 58, 237, 0.05);
|
| 100 |
+
}
|
| 101 |
+
|
| 102 |
+
/* ============================================
|
| 103 |
+
RESET & BASE
|
| 104 |
+
============================================ */
|
| 105 |
+
*, *::before, *::after { box-sizing: border-box; margin: 0; padding: 0; }
|
| 106 |
+
html, body, #root { height: 100%; overflow: hidden; }
|
| 107 |
+
|
| 108 |
+
body {
|
| 109 |
+
font-family: 'Inter', sans-serif;
|
| 110 |
+
background: var(--bg-base);
|
| 111 |
+
color: var(--text-primary);
|
| 112 |
+
font-size: 13px;
|
| 113 |
+
line-height: 1.5;
|
| 114 |
+
background-image:
|
| 115 |
+
radial-gradient(ellipse at 15% 50%, var(--bg-ambient-1) 0%, transparent 45%),
|
| 116 |
+
radial-gradient(ellipse at 85% 20%, var(--bg-ambient-2) 0%, transparent 45%);
|
| 117 |
+
background-attachment: fixed;
|
| 118 |
+
transition: background 0.35s ease, color 0.35s ease;
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
/* ============================================
|
| 122 |
+
THEME TOGGLE BUTTON
|
| 123 |
+
============================================ */
|
| 124 |
+
.theme-toggle {
|
| 125 |
+
width: 48px;
|
| 126 |
+
height: 26px;
|
| 127 |
+
border-radius: 13px;
|
| 128 |
+
background: var(--bg-input);
|
| 129 |
+
border: 1px solid var(--border-glass);
|
| 130 |
+
cursor: pointer;
|
| 131 |
+
position: relative;
|
| 132 |
+
transition: all 0.3s ease;
|
| 133 |
+
flex-shrink: 0;
|
| 134 |
+
}
|
| 135 |
+
[data-theme="dark"] .theme-toggle { background: rgba(255,255,255,0.08); }
|
| 136 |
+
[data-theme="light"] .theme-toggle { background: rgba(8,145,178,0.12); border-color: var(--primary-border); }
|
| 137 |
+
|
| 138 |
+
.theme-toggle-thumb {
|
| 139 |
+
position: absolute;
|
| 140 |
+
width: 20px; height: 20px;
|
| 141 |
+
border-radius: 50%;
|
| 142 |
+
top: 2px; left: 2px;
|
| 143 |
+
background: var(--text-muted);
|
| 144 |
+
transition: all 0.35s cubic-bezier(0.4, 0, 0.2, 1);
|
| 145 |
+
display: flex; align-items: center; justify-content: center;
|
| 146 |
+
font-size: 10px;
|
| 147 |
+
box-shadow: 0 1px 4px rgba(0,0,0,0.2);
|
| 148 |
+
}
|
| 149 |
+
[data-theme="light"] .theme-toggle-thumb {
|
| 150 |
+
transform: translateX(22px);
|
| 151 |
+
background: var(--primary);
|
| 152 |
+
box-shadow: 0 0 8px var(--primary-glow);
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
.theme-label {
|
| 156 |
+
font-size: 0.6rem;
|
| 157 |
+
font-weight: 600;
|
| 158 |
+
letter-spacing: 0.5px;
|
| 159 |
+
color: var(--text-muted);
|
| 160 |
+
text-transform: uppercase;
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
/* ============================================
|
| 164 |
+
DASHBOARD GRID
|
| 165 |
+
============================================ */
|
| 166 |
+
.dashboard {
|
| 167 |
+
display: grid;
|
| 168 |
+
height: 100vh;
|
| 169 |
+
grid-template-rows: 56px 1fr 220px;
|
| 170 |
+
grid-template-columns: 280px 1fr 300px;
|
| 171 |
+
grid-template-areas:
|
| 172 |
+
"navbar navbar navbar"
|
| 173 |
+
"left center right"
|
| 174 |
+
"left bottom right";
|
| 175 |
+
overflow: hidden;
|
| 176 |
+
}
|
| 177 |
+
|
| 178 |
+
/* ============================================
|
| 179 |
+
TOP NAVBAR
|
| 180 |
+
============================================ */
|
| 181 |
+
.navbar {
|
| 182 |
+
grid-area: navbar;
|
| 183 |
+
display: flex;
|
| 184 |
+
align-items: center;
|
| 185 |
+
justify-content: space-between;
|
| 186 |
+
padding: 0 20px;
|
| 187 |
+
background: var(--bg-panel);
|
| 188 |
+
border-bottom: 1px solid var(--border-glass);
|
| 189 |
+
backdrop-filter: blur(20px);
|
| 190 |
+
-webkit-backdrop-filter: blur(20px);
|
| 191 |
+
z-index: 100;
|
| 192 |
+
gap: 16px;
|
| 193 |
+
transition: background 0.35s ease, border-color 0.35s ease;
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
[data-theme="light"] .navbar {
|
| 197 |
+
box-shadow: 0 1px 12px rgba(0,0,0,0.07);
|
| 198 |
+
}
|
| 199 |
+
|
| 200 |
+
.navbar-brand { display: flex; align-items: center; gap: 10px; flex-shrink: 0; }
|
| 201 |
+
.navbar-logo {
|
| 202 |
+
width: 32px; height: 32px; border-radius: 8px;
|
| 203 |
+
background: linear-gradient(135deg, var(--primary), var(--blue));
|
| 204 |
+
display: flex; align-items: center; justify-content: center;
|
| 205 |
+
box-shadow: 0 0 16px var(--primary-glow);
|
| 206 |
+
}
|
| 207 |
+
.navbar-logo svg { width: 16px; height: 16px; color: #fff; }
|
| 208 |
+
|
| 209 |
+
.navbar-title {
|
| 210 |
+
font-family: 'Orbitron', sans-serif;
|
| 211 |
+
font-size: 1.05rem; font-weight: 700;
|
| 212 |
+
background: linear-gradient(135deg, var(--primary), var(--blue));
|
| 213 |
+
-webkit-background-clip: text; -webkit-text-fill-color: transparent;
|
| 214 |
+
background-clip: text; letter-spacing: 2px;
|
| 215 |
+
}
|
| 216 |
+
.navbar-subtitle {
|
| 217 |
+
font-size: 0.62rem; color: var(--text-muted);
|
| 218 |
+
letter-spacing: 1.5px; text-transform: uppercase; font-weight: 500;
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
.navbar-center { display: flex; align-items: center; gap: 16px; flex: 1; justify-content: center; }
|
| 222 |
+
|
| 223 |
+
.threat-level {
|
| 224 |
+
display: flex; align-items: center; gap: 8px;
|
| 225 |
+
padding: 5px 14px; border-radius: 100px; border: 1px solid;
|
| 226 |
+
font-family: 'Orbitron', sans-serif;
|
| 227 |
+
font-size: 0.65rem; font-weight: 700; letter-spacing: 1.5px; text-transform: uppercase;
|
| 228 |
+
transition: all 0.4s ease;
|
| 229 |
+
}
|
| 230 |
+
.threat-level.safe { background: rgba(34,197,94,0.08); border-color: rgba(34,197,94,0.3); color: var(--safe); box-shadow: 0 0 16px var(--safe-glow); }
|
| 231 |
+
.threat-level.moderate { background: rgba(245,158,11,0.08); border-color: rgba(245,158,11,0.3); color: var(--moderate); box-shadow: 0 0 16px var(--moderate-glow); }
|
| 232 |
+
.threat-level.danger { background: rgba(239,68,68,0.08); border-color: rgba(239,68,68,0.3); color: var(--danger); box-shadow: 0 0 16px var(--danger-glow); animation: danger-pulse 1.5s infinite; }
|
| 233 |
+
@keyframes danger-pulse { 0%,100%{box-shadow:0 0 16px var(--danger-glow)}50%{box-shadow:0 0 36px var(--danger-glow),0 0 60px rgba(239,68,68,0.1)} }
|
| 234 |
+
|
| 235 |
+
.threat-dot { width: 7px; height: 7px; border-radius: 50%; flex-shrink: 0; }
|
| 236 |
+
.safe .threat-dot { background: var(--safe); animation: blink 2s infinite; }
|
| 237 |
+
.moderate .threat-dot { background: var(--moderate); animation: blink 1s infinite; }
|
| 238 |
+
.danger .threat-dot { background: var(--danger); animation: blink 0.5s infinite; }
|
| 239 |
+
@keyframes blink { 0%,100%{opacity:1}50%{opacity:0.2} }
|
| 240 |
+
|
| 241 |
+
.navbar-stats { display: flex; align-items: center; gap: 16px; flex-shrink: 0; }
|
| 242 |
+
.nav-stat { display: flex; flex-direction: column; align-items: flex-end; gap: 1px; }
|
| 243 |
+
.nav-stat-label { font-size: 0.58rem; text-transform: uppercase; letter-spacing: 1px; color: var(--text-muted); font-weight: 600; }
|
| 244 |
+
.nav-stat-value { font-family: 'Orbitron', sans-serif; font-size: 0.78rem; font-weight: 600; color: var(--primary); }
|
| 245 |
+
|
| 246 |
+
.ws-status {
|
| 247 |
+
display: flex; align-items: center; gap: 6px;
|
| 248 |
+
padding: 4px 10px; border-radius: 100px;
|
| 249 |
+
font-size: 0.62rem; font-weight: 600; letter-spacing: 0.5px; text-transform: uppercase;
|
| 250 |
+
background: var(--bg-input); border: 1px solid var(--border-glass); transition: all 0.3s;
|
| 251 |
+
}
|
| 252 |
+
.ws-status.connected { color: var(--safe); border-color: rgba(34,197,94,0.25); }
|
| 253 |
+
.ws-status.disconnected { color: var(--text-muted); }
|
| 254 |
+
|
| 255 |
+
/* ============================================
|
| 256 |
+
GLASS PANEL BASE
|
| 257 |
+
============================================ */
|
| 258 |
+
.panel {
|
| 259 |
+
background: var(--bg-panel);
|
| 260 |
+
backdrop-filter: blur(24px); -webkit-backdrop-filter: blur(24px);
|
| 261 |
+
border: 1px solid var(--border-glass);
|
| 262 |
+
overflow: hidden;
|
| 263 |
+
transition: border-color 0.3s ease, background 0.35s ease, box-shadow 0.3s ease;
|
| 264 |
+
}
|
| 265 |
+
.panel:hover { border-color: var(--primary-border); }
|
| 266 |
+
[data-theme="light"] .panel { box-shadow: 0 2px 16px rgba(0,0,0,0.06); }
|
| 267 |
+
|
| 268 |
+
/* ============================================
|
| 269 |
+
LEFT PANEL
|
| 270 |
+
============================================ */
|
| 271 |
+
.left-panel { grid-area: left; border-right: 1px solid var(--border-glass); display: flex; flex-direction: column; overflow-y: auto; }
|
| 272 |
+
.left-panel::-webkit-scrollbar { width: 3px; }
|
| 273 |
+
.left-panel::-webkit-scrollbar-thumb { background: var(--primary-border); border-radius: 2px; }
|
| 274 |
+
|
| 275 |
+
.panel-section { padding: 14px 16px; border-bottom: 1px solid var(--border-glass); }
|
| 276 |
+
.panel-section:last-child { border-bottom: none; }
|
| 277 |
+
|
| 278 |
+
.section-header { display: flex; align-items: center; gap: 8px; margin-bottom: 12px; }
|
| 279 |
+
.section-title { font-size: 0.63rem; font-weight: 700; text-transform: uppercase; letter-spacing: 2px; color: var(--text-muted); }
|
| 280 |
+
.section-icon { width: 15px; height: 15px; color: var(--primary); flex-shrink: 0; }
|
| 281 |
+
|
| 282 |
+
.toggle-row { display: flex; align-items: center; justify-content: space-between; padding: 7px 0; cursor: pointer; }
|
| 283 |
+
.toggle-row:hover { opacity: 0.82; }
|
| 284 |
+
.toggle-label { display: flex; align-items: center; gap: 8px; font-size: 0.8rem; font-weight: 500; color: var(--text-secondary); }
|
| 285 |
+
.toggle-label svg { width: 12px; height: 12px; color: var(--primary); flex-shrink: 0; }
|
| 286 |
+
|
| 287 |
+
.toggle-switch {
|
| 288 |
+
width: 36px; height: 20px; border-radius: 10px;
|
| 289 |
+
background: var(--bg-input); border: 1px solid var(--border-glass);
|
| 290 |
+
position: relative; transition: all 0.3s cubic-bezier(0.4,0,0.2,1); flex-shrink: 0; cursor: pointer;
|
| 291 |
+
}
|
| 292 |
+
.toggle-switch.on {
|
| 293 |
+
background: linear-gradient(135deg, var(--primary), var(--blue));
|
| 294 |
+
border-color: var(--primary); box-shadow: 0 0 10px var(--primary-glow);
|
| 295 |
+
}
|
| 296 |
+
.toggle-switch::after {
|
| 297 |
+
content: ''; position: absolute; width: 14px; height: 14px; border-radius: 50%;
|
| 298 |
+
background: white; top: 2px; left: 2px;
|
| 299 |
+
transition: transform 0.3s cubic-bezier(0.4,0,0.2,1); box-shadow: 0 1px 3px rgba(0,0,0,0.35);
|
| 300 |
+
}
|
| 301 |
+
[data-theme="light"] .toggle-switch::after { background: #f8fafc; box-shadow: 0 1px 3px rgba(0,0,0,0.18); }
|
| 302 |
+
.toggle-switch.on::after { transform: translateX(16px); }
|
| 303 |
+
|
| 304 |
+
.mode-select {
|
| 305 |
+
width: 100%; background: var(--bg-input); border: 1px solid var(--border-glass);
|
| 306 |
+
border-radius: 6px; color: var(--text-primary); font-family: 'Inter', sans-serif;
|
| 307 |
+
font-size: 0.8rem; font-weight: 500; padding: 8px 12px; cursor: pointer; outline: none;
|
| 308 |
+
-webkit-appearance: none; appearance: none; transition: all 0.2s;
|
| 309 |
+
background-image: url("data:image/svg+xml,%3Csvg xmlns='http://www.w3.org/2000/svg' width='12' height='8' viewBox='0 0 12 8'%3E%3Cpath fill='%2394a3b8' d='M1 1l5 5 5-5'/%3E%3C/svg%3E");
|
| 310 |
+
background-repeat: no-repeat; background-position: right 10px center;
|
| 311 |
+
}
|
| 312 |
+
.mode-select:hover, .mode-select:focus { border-color: var(--primary-border); background-color: var(--bg-hover); }
|
| 313 |
+
.mode-select option { background: var(--bg-surface); color: var(--text-primary); }
|
| 314 |
+
|
| 315 |
+
.slider-group { display: flex; flex-direction: column; gap: 5px; padding: 3px 0; }
|
| 316 |
+
.slider-header { display: flex; justify-content: space-between; font-size: 0.78rem; font-weight: 500; color: var(--text-secondary); }
|
| 317 |
+
.slider-value { font-family: 'Orbitron', sans-serif; font-size: 0.68rem; font-weight: 600; color: var(--primary); }
|
| 318 |
+
|
| 319 |
+
input[type=range] {
|
| 320 |
+
-webkit-appearance: none; appearance: none; width: 100%; height: 4px;
|
| 321 |
+
border-radius: 2px; background: var(--bg-input); outline: none; cursor: pointer;
|
| 322 |
+
}
|
| 323 |
+
input[type=range]::-webkit-slider-thumb {
|
| 324 |
+
-webkit-appearance: none; width: 16px; height: 16px; border-radius: 50%;
|
| 325 |
+
background: var(--primary); cursor: pointer; box-shadow: 0 0 8px var(--primary-glow);
|
| 326 |
+
transition: transform 0.15s, box-shadow 0.15s;
|
| 327 |
+
}
|
| 328 |
+
input[type=range]::-webkit-slider-thumb:hover { transform: scale(1.25); box-shadow: 0 0 16px var(--primary-glow); }
|
| 329 |
+
input[type=range]::-webkit-slider-runnable-track { height: 4px; border-radius: 2px; }
|
| 330 |
+
|
| 331 |
+
.upload-zone {
|
| 332 |
+
border: 1.5px dashed var(--primary-border); border-radius: 12px;
|
| 333 |
+
padding: 24px 16px; text-align: center; cursor: pointer;
|
| 334 |
+
transition: all 0.3s ease; background: var(--upload-bg); position: relative; overflow: hidden;
|
| 335 |
+
}
|
| 336 |
+
.upload-zone:hover, .upload-zone.drag-active {
|
| 337 |
+
border-color: var(--primary); transform: scale(1.01); background: var(--primary-glow);
|
| 338 |
+
}
|
| 339 |
+
[data-theme="light"] .upload-zone { background: rgba(8,145,178,0.03); }
|
| 340 |
+
[data-theme="light"] .upload-zone:hover { background: rgba(8,145,178,0.07); }
|
| 341 |
+
|
| 342 |
+
.upload-icon { font-size: 2rem; display: block; margin-bottom: 8px; animation: float 3s ease-in-out infinite; }
|
| 343 |
+
.upload-title { font-weight: 600; color: var(--primary); font-size: 0.83rem; margin-bottom: 3px; }
|
| 344 |
+
.upload-sub { color: var(--text-muted); font-size: 0.7rem; }
|
| 345 |
+
|
| 346 |
+
.fencing-controls { display: flex; flex-direction: column; gap: 6px; margin-top: 4px; }
|
| 347 |
+
.fencing-info {
|
| 348 |
+
padding: 6px 10px; background: rgba(37,99,235,0.08); border: 1px solid rgba(37,99,235,0.2);
|
| 349 |
+
border-radius: 6px; font-size: 0.7rem; color: var(--blue); line-height: 1.4;
|
| 350 |
+
}
|
| 351 |
+
[data-theme="light"] .fencing-info { background: rgba(37,99,235,0.06); }
|
| 352 |
+
|
| 353 |
+
.btn-clear-fence {
|
| 354 |
+
background: rgba(220,38,38,0.08); border: 1px solid rgba(220,38,38,0.2);
|
| 355 |
+
border-radius: 6px; color: var(--danger); font-size: 0.72rem; font-weight: 600;
|
| 356 |
+
padding: 6px 10px; cursor: pointer; font-family: inherit; transition: all 0.2s;
|
| 357 |
+
}
|
| 358 |
+
.btn-clear-fence:hover { background: rgba(220,38,38,0.15); }
|
| 359 |
+
|
| 360 |
+
.btn-execute {
|
| 361 |
+
width: 100%; padding: 11px; border: none; border-radius: 10px;
|
| 362 |
+
background: linear-gradient(135deg, var(--primary), var(--blue));
|
| 363 |
+
color: #fff; font-family: 'Orbitron', sans-serif;
|
| 364 |
+
font-size: 0.68rem; font-weight: 700; letter-spacing: 1.2px; text-transform: uppercase;
|
| 365 |
+
cursor: pointer; transition: all 0.3s cubic-bezier(0.4,0,0.2,1);
|
| 366 |
+
box-shadow: 0 4px 16px var(--primary-glow); position: relative; overflow: hidden;
|
| 367 |
+
}
|
| 368 |
+
[data-theme="dark"] .btn-execute { color: #080c14; }
|
| 369 |
+
.btn-execute:hover:not(:disabled) { transform: translateY(-2px); box-shadow: 0 8px 28px var(--primary-glow); }
|
| 370 |
+
.btn-execute:disabled { opacity: 0.35; cursor: not-allowed; transform: none; box-shadow: none; }
|
| 371 |
+
.btn-execute.terminate {
|
| 372 |
+
background: linear-gradient(135deg, var(--danger), #f97316);
|
| 373 |
+
box-shadow: 0 4px 16px var(--danger-glow); color: #fff;
|
| 374 |
+
}
|
| 375 |
+
.btn-execute.terminate:hover:not(:disabled) { box-shadow: 0 8px 28px var(--danger-glow); }
|
| 376 |
+
|
| 377 |
+
.btn-secondary {
|
| 378 |
+
width: 100%; padding: 7px; border: 1px solid var(--border-glass);
|
| 379 |
+
border-radius: 6px; background: var(--bg-input); color: var(--text-secondary);
|
| 380 |
+
font-family: 'Inter', sans-serif; font-size: 0.75rem; font-weight: 500;
|
| 381 |
+
cursor: pointer; transition: all 0.2s; margin-top: 6px;
|
| 382 |
+
}
|
| 383 |
+
.btn-secondary:hover:not(:disabled) { background: var(--bg-hover); color: var(--text-primary); border-color: var(--primary-border); }
|
| 384 |
+
.btn-secondary:disabled { opacity: 0.3; cursor: not-allowed; }
|
| 385 |
+
|
| 386 |
+
/* ============================================
|
| 387 |
+
CENTER MAIN PANEL
|
| 388 |
+
============================================ */
|
| 389 |
+
.center-panel {
|
| 390 |
+
grid-area: center; position: relative;
|
| 391 |
+
background: var(--bg-surface); display: flex; align-items: center; justify-content: center; overflow: hidden;
|
| 392 |
+
}
|
| 393 |
+
[data-theme="dark"] .center-panel { background: #000; }
|
| 394 |
+
[data-theme="light"] .center-panel { background: #e2e8f0; }
|
| 395 |
+
|
| 396 |
+
.scan-lines {
|
| 397 |
+
position: absolute; inset: 0; pointer-events: none; z-index: 1;
|
| 398 |
+
background: repeating-linear-gradient(to bottom, transparent 0px, transparent 2px, var(--scan-line) 2px, var(--scan-line) 4px);
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
.panel-topbar {
|
| 402 |
+
position: absolute; top: 0; left: 0; right: 0; z-index: 20;
|
| 403 |
+
padding: 10px 16px; display: flex; align-items: center; justify-content: space-between;
|
| 404 |
+
backdrop-filter: blur(8px); -webkit-backdrop-filter: blur(8px);
|
| 405 |
+
}
|
| 406 |
+
[data-theme="dark"] .panel-topbar { background: linear-gradient(to bottom, rgba(8,12,20,0.9), transparent); }
|
| 407 |
+
[data-theme="light"] .panel-topbar { background: linear-gradient(to bottom, rgba(241,245,249,0.9), transparent); }
|
| 408 |
+
|
| 409 |
+
.panel-topbar-title {
|
| 410 |
+
font-family: 'Orbitron', sans-serif; font-size: 0.62rem; font-weight: 600;
|
| 411 |
+
letter-spacing: 2px; text-transform: uppercase; color: var(--primary);
|
| 412 |
+
display: flex; align-items: center; gap: 6px;
|
| 413 |
+
}
|
| 414 |
+
.live-badge {
|
| 415 |
+
background: rgba(220,38,38,0.15); border: 1px solid rgba(220,38,38,0.4); color: var(--danger);
|
| 416 |
+
font-size: 0.52rem; font-weight: 800; letter-spacing: 1.5px;
|
| 417 |
+
padding: 2px 6px; border-radius: 4px; animation: blink 1s infinite;
|
| 418 |
+
}
|
| 419 |
+
|
| 420 |
+
.video-overlay-controls { display: flex; align-items: center; gap: 6px; }
|
| 421 |
+
.overlay-btn {
|
| 422 |
+
width: 28px; height: 28px; border-radius: 6px; border: 1px solid var(--border-glass);
|
| 423 |
+
background: var(--bg-panel); color: var(--text-secondary); cursor: pointer;
|
| 424 |
+
display: flex; align-items: center; justify-content: center; transition: all 0.2s;
|
| 425 |
+
backdrop-filter: blur(8px);
|
| 426 |
+
}
|
| 427 |
+
.overlay-btn svg { width: 13px; height: 13px; }
|
| 428 |
+
.overlay-btn:hover { border-color: var(--primary-border); color: var(--primary); }
|
| 429 |
+
.overlay-btn.active { background: var(--primary-glow); border-color: var(--primary); color: var(--primary); }
|
| 430 |
+
|
| 431 |
+
.main-feed { width: 100%; height: 100%; object-fit: contain; display: block; }
|
| 432 |
+
|
| 433 |
+
.empty-feed { display: flex; flex-direction: column; align-items: center; justify-content: center; height: 100%; width: 100%; gap: 12px; pointer-events: none; }
|
| 434 |
+
.empty-feed-icon { width: 72px; height: 72px; border-radius: 50%; border: 2px dashed var(--primary-border); display: flex; align-items: center; justify-content: center; color: var(--primary); animation: spin-slow 8s linear infinite; }
|
| 435 |
+
.empty-feed-icon svg { opacity: 0.5; width: 32px; height: 32px; }
|
| 436 |
+
@keyframes spin-slow { to { transform: rotate(360deg); } }
|
| 437 |
+
.empty-feed-text { color: var(--text-secondary); font-size: 0.85rem; font-weight: 500; }
|
| 438 |
+
.empty-feed-sub { color: var(--text-muted); font-size: 0.72rem; }
|
| 439 |
+
|
| 440 |
+
.fencing-svg-overlay { position: absolute; inset: 0; z-index: 15; cursor: crosshair; }
|
| 441 |
+
|
| 442 |
+
.count-overlay {
|
| 443 |
+
position: absolute; bottom: 16px; left: 16px; z-index: 25;
|
| 444 |
+
background: var(--bg-panel); backdrop-filter: blur(12px); -webkit-backdrop-filter: blur(12px);
|
| 445 |
+
border: 1px solid var(--primary-border); border-radius: 12px; padding: 10px 16px;
|
| 446 |
+
}
|
| 447 |
+
.count-overlay-label { font-size: 0.58rem; text-transform: uppercase; letter-spacing: 1.5px; color: var(--text-muted); font-weight: 600; }
|
| 448 |
+
.count-overlay-value { font-family: 'Orbitron', sans-serif; font-size: 1.8rem; font-weight: 900; color: var(--primary); line-height: 1; text-shadow: 0 0 16px var(--primary-glow); transition: color 0.3s; }
|
| 449 |
+
.count-overlay-value.danger { color: var(--danger); text-shadow: 0 0 16px var(--danger-glow); }
|
| 450 |
+
.count-overlay-value.moderate { color: var(--moderate); text-shadow: 0 0 16px var(--moderate-glow); }
|
| 451 |
+
|
| 452 |
+
.anomaly-banner {
|
| 453 |
+
position: absolute; top: 52px; left: 50%; transform: translateX(-50%); z-index: 30;
|
| 454 |
+
background: rgba(220,38,38,0.1); border: 1.5px solid var(--danger); backdrop-filter: blur(12px);
|
| 455 |
+
border-radius: 10px; padding: 10px 22px; display: flex; align-items: center; gap: 10px;
|
| 456 |
+
white-space: nowrap; color: var(--danger); font-weight: 700; font-size: 0.82rem;
|
| 457 |
+
box-shadow: 0 0 30px var(--danger-glow); animation: anomaly-in 0.4s ease;
|
| 458 |
+
}
|
| 459 |
+
.anomaly-banner svg { width: 17px; height: 17px; flex-shrink: 0; animation: blink 0.6s infinite; }
|
| 460 |
+
@keyframes anomaly-in { from{opacity:0;transform:translateX(-50%) translateY(-20px)}to{opacity:1;transform:translateX(-50%) translateY(0)} }
|
| 461 |
+
|
| 462 |
+
.loader-overlay { position: absolute; inset: 0; z-index: 40; display: flex; flex-direction: column; align-items: center; justify-content: center; gap: 20px; background: rgba(8,12,20,0.55); backdrop-filter: blur(6px); }
|
| 463 |
+
[data-theme="light"] .loader-overlay { background: rgba(241,245,249,0.7); }
|
| 464 |
+
|
| 465 |
+
.spinner { width: 56px; height: 56px; position: relative; }
|
| 466 |
+
.spinner::before, .spinner::after { content: ''; position: absolute; border-radius: 50%; border: 3px solid transparent; }
|
| 467 |
+
.spinner::before { inset: 0; border-top-color: var(--primary); animation: rotation 1.2s linear infinite; }
|
| 468 |
+
.spinner::after { inset: 8px; border-top-color: var(--blue); animation: rotation 0.8s linear infinite reverse; }
|
| 469 |
+
@keyframes rotation { to { transform: rotate(360deg); } }
|
| 470 |
+
|
| 471 |
+
.loader-text { font-family: 'Orbitron', sans-serif; font-size: 0.68rem; font-weight: 600; color: var(--primary); letter-spacing: 2px; text-transform: uppercase; animation: blink 1.5s infinite; }
|
| 472 |
+
|
| 473 |
+
/* ============================================
|
| 474 |
+
RIGHT PANEL
|
| 475 |
+
============================================ */
|
| 476 |
+
.right-panel { grid-area: right; border-left: 1px solid var(--border-glass); display: flex; flex-direction: column; overflow: hidden; }
|
| 477 |
+
.right-panel-section { display: flex; flex-direction: column; overflow: hidden; }
|
| 478 |
+
.right-panel-section + .right-panel-section { border-top: 1px solid var(--border-glass); }
|
| 479 |
+
|
| 480 |
+
.panel-header { padding: 11px 16px; border-bottom: 1px solid var(--border-glass); display: flex; align-items: center; justify-content: space-between; flex-shrink: 0; }
|
| 481 |
+
.panel-header-title { font-size: 0.62rem; font-weight: 700; text-transform: uppercase; letter-spacing: 2px; color: var(--text-muted); display: flex; align-items: center; gap: 6px; }
|
| 482 |
+
.panel-header-title svg { width: 12px; height: 12px; color: var(--primary); }
|
| 483 |
+
.panel-header-count { font-family: 'Orbitron', sans-serif; font-size: 0.68rem; font-weight: 600; background: var(--primary-glow); border: 1px solid var(--primary-border); color: var(--primary); padding: 2px 8px; border-radius: 100px; }
|
| 484 |
+
|
| 485 |
+
.alert-feed { flex: 1; overflow-y: auto; padding: 8px; display: flex; flex-direction: column; gap: 5px; }
|
| 486 |
+
.alert-feed::-webkit-scrollbar { width: 3px; }
|
| 487 |
+
.alert-feed::-webkit-scrollbar-thumb { background: var(--border-glass); border-radius: 2px; }
|
| 488 |
+
|
| 489 |
+
.alert-item { display: flex; gap: 9px; padding: 9px 11px; border-radius: 8px; border: 1px solid; transition: all 0.2s; animation: slide-in 0.35s ease; }
|
| 490 |
+
@keyframes slide-in { from{opacity:0;transform:translateX(16px)}to{opacity:1;transform:translateX(0)} }
|
| 491 |
+
|
| 492 |
+
.alert-item.info { background: rgba(37,99,235,0.06); border-color: rgba(37,99,235,0.18); }
|
| 493 |
+
.alert-item.warning { background: rgba(217,119,6,0.06); border-color: rgba(217,119,6,0.2); }
|
| 494 |
+
.alert-item.danger { background: rgba(220,38,38,0.07); border-color: rgba(220,38,38,0.22); animation: slide-in 0.35s, alert-danger 2s 0.35s infinite; }
|
| 495 |
+
@keyframes alert-danger { 0%,100%{box-shadow:none}50%{box-shadow:0 0 10px var(--danger-glow)} }
|
| 496 |
+
|
| 497 |
+
.alert-icon { width: 28px; height: 28px; border-radius: 6px; display: flex; align-items: center; justify-content: center; flex-shrink: 0; }
|
| 498 |
+
.alert-icon svg { width: 13px; height: 13px; }
|
| 499 |
+
.alert-item.info .alert-icon { background: rgba(37,99,235,0.1); color: var(--blue); }
|
| 500 |
+
.alert-item.warning .alert-icon { background: rgba(217,119,6,0.1); color: var(--moderate); }
|
| 501 |
+
.alert-item.danger .alert-icon { background: rgba(220,38,38,0.12); color: var(--danger); }
|
| 502 |
+
|
| 503 |
+
.alert-body { flex: 1; min-width: 0; }
|
| 504 |
+
.alert-title { font-weight: 600; font-size: 0.76rem; margin-bottom: 2px; }
|
| 505 |
+
.alert-item.info .alert-title { color: var(--blue); }
|
| 506 |
+
.alert-item.warning .alert-title { color: var(--moderate); }
|
| 507 |
+
.alert-item.danger .alert-title { color: var(--danger); }
|
| 508 |
+
.alert-msg { font-size: 0.68rem; color: var(--text-muted); white-space: nowrap; overflow: hidden; text-overflow: ellipsis; }
|
| 509 |
+
.alert-time { font-size: 0.62rem; color: var(--text-muted); margin-top: 3px; font-family: 'Orbitron', sans-serif; }
|
| 510 |
+
|
| 511 |
+
.alert-empty { flex: 1; display: flex; flex-direction: column; align-items: center; justify-content: center; gap: 8px; color: var(--text-muted); }
|
| 512 |
+
.alert-empty svg { opacity: 0.25; width: 28px; height: 28px; }
|
| 513 |
+
.alert-empty p { font-size: 0.75rem; }
|
| 514 |
+
|
| 515 |
+
.session-list { flex: 1; overflow-y: auto; padding: 8px; display: flex; flex-direction: column; gap: 4px; }
|
| 516 |
+
.session-list::-webkit-scrollbar { width: 3px; }
|
| 517 |
+
|
| 518 |
+
.session-item { padding: 8px 10px; border-radius: 8px; border: 1px solid var(--border-glass); background: var(--bg-input); cursor: pointer; transition: all 0.2s; }
|
| 519 |
+
.session-item:hover { border-color: var(--primary-border); background: var(--bg-hover); }
|
| 520 |
+
.session-name { font-size: 0.74rem; font-weight: 600; color: var(--text-secondary); }
|
| 521 |
+
.session-meta { font-size: 0.63rem; color: var(--text-muted); margin-top: 3px; display: flex; gap: 8px; align-items: center; }
|
| 522 |
+
.session-meta svg { width: 9px; height: 9px; }
|
| 523 |
+
.session-empty { flex: 1; display: flex; align-items: center; justify-content: center; flex-direction: column; color: var(--text-muted); font-size: 0.74rem; gap: 8px; }
|
| 524 |
+
.session-empty svg { opacity: 0.2; width: 22px; height: 22px; }
|
| 525 |
+
|
| 526 |
+
/* ============================================
|
| 527 |
+
BOTTOM ANALYTICS PANEL
|
| 528 |
+
============================================ */
|
| 529 |
+
.bottom-panel {
|
| 530 |
+
grid-area: bottom; border-top: 1px solid var(--border-glass);
|
| 531 |
+
display: grid; grid-template-columns: 1fr 1fr 1fr 1fr 2fr; overflow: hidden;
|
| 532 |
+
}
|
| 533 |
+
|
| 534 |
+
.metric-card {
|
| 535 |
+
padding: 16px 18px; border-right: 1px solid var(--border-glass);
|
| 536 |
+
display: flex; flex-direction: column; justify-content: space-between;
|
| 537 |
+
background: var(--metric-bg); position: relative; overflow: hidden;
|
| 538 |
+
transition: background 0.35s;
|
| 539 |
+
}
|
| 540 |
+
.metric-card::after {
|
| 541 |
+
content: ''; position: absolute; bottom: 0; left: 0; right: 0; height: 2px;
|
| 542 |
+
background: linear-gradient(90deg, transparent, var(--primary), transparent);
|
| 543 |
+
opacity: 0; transition: opacity 0.3s;
|
| 544 |
+
}
|
| 545 |
+
.metric-card:hover::after { opacity: 1; }
|
| 546 |
+
|
| 547 |
+
.metric-label { font-size: 0.6rem; text-transform: uppercase; letter-spacing: 1.5px; font-weight: 700; color: var(--text-muted); display: flex; align-items: center; gap: 5px; }
|
| 548 |
+
.metric-label svg { width: 11px; height: 11px; color: var(--primary); }
|
| 549 |
+
.metric-value { font-family: 'Orbitron', sans-serif; font-size: 2.2rem; font-weight: 900; color: var(--primary); line-height: 1; text-shadow: 0 0 20px var(--primary-glow); transition: all 0.4s; }
|
| 550 |
+
.metric-value.danger { color: var(--danger); text-shadow: 0 0 20px var(--danger-glow); animation: heartbeat 0.8s infinite; }
|
| 551 |
+
.metric-value.moderate { color: var(--moderate); text-shadow: 0 0 20px var(--moderate-glow); }
|
| 552 |
+
.metric-value.blue { color: var(--blue); text-shadow: 0 0 20px var(--blue-glow); }
|
| 553 |
+
.metric-value.purple { color: var(--purple); text-shadow: 0 0 20px var(--purple-glow); }
|
| 554 |
+
@keyframes heartbeat { 0%,100%{transform:scale(1)}50%{transform:scale(1.06)} }
|
| 555 |
+
|
| 556 |
+
.metric-sub { font-size: 0.63rem; color: var(--text-muted); margin-top: 4px; }
|
| 557 |
+
|
| 558 |
+
.chart-area { padding: 12px 16px; display: flex; flex-direction: column; overflow: hidden; }
|
| 559 |
+
.chart-header { display: flex; align-items: center; justify-content: space-between; margin-bottom: 8px; flex-shrink: 0; }
|
| 560 |
+
.chart-title { font-size: 0.62rem; text-transform: uppercase; letter-spacing: 2px; font-weight: 700; color: var(--text-muted); }
|
| 561 |
+
.chart-wrapper { flex: 1; min-height: 0; height: 160px; }
|
| 562 |
+
.chart-empty-state {
|
| 563 |
+
height: 100%;
|
| 564 |
+
display: flex;
|
| 565 |
+
align-items: center;
|
| 566 |
+
justify-content: center;
|
| 567 |
+
text-align: center;
|
| 568 |
+
padding: 16px;
|
| 569 |
+
color: var(--text-muted);
|
| 570 |
+
font-size: 0.78rem;
|
| 571 |
+
border: 1px dashed var(--border-glass);
|
| 572 |
+
border-radius: 10px;
|
| 573 |
+
background: rgba(148, 163, 184, 0.04);
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
.export-actions { display: flex; gap: 5px; }
|
| 577 |
+
.btn-export {
|
| 578 |
+
padding: 4px 10px; border: 1px solid var(--primary-border); border-radius: 6px;
|
| 579 |
+
background: var(--primary-glow); color: var(--primary);
|
| 580 |
+
font-size: 0.63rem; font-weight: 600; cursor: pointer; font-family: inherit;
|
| 581 |
+
display: flex; align-items: center; gap: 4px; transition: all 0.2s;
|
| 582 |
+
}
|
| 583 |
+
.btn-export svg { width: 11px; height: 11px; }
|
| 584 |
+
.btn-export:hover { background: var(--bg-hover); }
|
| 585 |
+
.btn-export:disabled { opacity: 0.3; cursor: not-allowed; }
|
| 586 |
+
|
| 587 |
+
/* ============================================
|
| 588 |
+
ANIMATIONS
|
| 589 |
+
============================================ */
|
| 590 |
+
@keyframes float { 0%,100%{transform:translateY(0)}50%{transform:translateY(-8px)} }
|
| 591 |
+
@keyframes fade-up { from{opacity:0;transform:translateY(12px)}to{opacity:1;transform:translateY(0)} }
|
| 592 |
+
.fade-up { animation: fade-up 0.4s ease forwards; }
|
| 593 |
+
|
| 594 |
+
/* ============================================
|
| 595 |
+
RECHARTS CUSTOM TOOLTIP
|
| 596 |
+
============================================ */
|
| 597 |
+
.custom-tooltip {
|
| 598 |
+
background: var(--bg-panel); border: 1px solid var(--primary-border);
|
| 599 |
+
border-radius: 8px; padding: 8px 12px; backdrop-filter: blur(12px);
|
| 600 |
+
}
|
| 601 |
+
.custom-tooltip-label { font-size: 0.63rem; color: var(--text-muted); text-transform: uppercase; letter-spacing: 1px; margin-bottom: 4px; }
|
| 602 |
+
.custom-tooltip-value { font-family: 'Orbitron', sans-serif; font-size: 0.9rem; font-weight: 700; color: var(--primary); }
|
| 603 |
+
|
| 604 |
+
/* Scrollbar */
|
| 605 |
+
::-webkit-scrollbar { width: 4px; height: 4px; }
|
| 606 |
+
::-webkit-scrollbar-track { background: transparent; }
|
| 607 |
+
::-webkit-scrollbar-thumb { background: var(--border-glass); border-radius: 2px; }
|
frontend/src/main.jsx
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { StrictMode } from 'react'
|
| 2 |
+
import { createRoot } from 'react-dom/client'
|
| 3 |
+
import './index.css'
|
| 4 |
+
import App from './App.jsx'
|
| 5 |
+
|
| 6 |
+
createRoot(document.getElementById('root')).render(
|
| 7 |
+
<StrictMode>
|
| 8 |
+
<App />
|
| 9 |
+
</StrictMode>,
|
| 10 |
+
)
|
frontend/vercel.json
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"buildCommand": "npm run build",
|
| 3 |
+
"outputDirectory": "dist",
|
| 4 |
+
"framework": "vite",
|
| 5 |
+
"rewrites": [{ "source": "/(.*)", "destination": "/index.html" }]
|
| 6 |
+
}
|
frontend/vite.config.js
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import { defineConfig } from 'vite'
|
| 2 |
+
import react from '@vitejs/plugin-react'
|
| 3 |
+
|
| 4 |
+
// https://vite.dev/config/
|
| 5 |
+
export default defineConfig({
|
| 6 |
+
plugins: [react()],
|
| 7 |
+
base: './', // relative paths → works on Vercel, Netlify, and sub-path servers
|
| 8 |
+
})
|
label_tool.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
import streamlit as st
|
| 5 |
+
from PIL import Image, ImageDraw
|
| 6 |
+
|
| 7 |
+
try:
|
| 8 |
+
from streamlit_image_coordinates import streamlit_image_coordinates
|
| 9 |
+
except Exception:
|
| 10 |
+
streamlit_image_coordinates = None
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
st.set_page_config(page_title="Point Annotation Tool", layout="wide")
|
| 14 |
+
st.title("Point Annotation Tool")
|
| 15 |
+
|
| 16 |
+
image_dir = st.sidebar.text_input("Image folder", value=".")
|
| 17 |
+
output_json = st.sidebar.text_input("Output JSON", value="annotations.json")
|
| 18 |
+
|
| 19 |
+
if "index" not in st.session_state:
|
| 20 |
+
st.session_state.index = 0
|
| 21 |
+
if "annotations" not in st.session_state:
|
| 22 |
+
st.session_state.annotations = {}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def list_images(folder):
|
| 26 |
+
if not os.path.isdir(folder):
|
| 27 |
+
return []
|
| 28 |
+
exts = {".jpg", ".jpeg", ".png", ".bmp", ".webp"}
|
| 29 |
+
return sorted([name for name in os.listdir(folder) if os.path.splitext(name.lower())[1] in exts])
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def annotated_image(image, points):
|
| 33 |
+
preview = image.copy()
|
| 34 |
+
draw = ImageDraw.Draw(preview)
|
| 35 |
+
for x, y in points:
|
| 36 |
+
r = 5
|
| 37 |
+
draw.ellipse((x - r, y - r, x + r, y + r), fill="red", outline="white")
|
| 38 |
+
return preview
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
images = list_images(image_dir)
|
| 42 |
+
if not images:
|
| 43 |
+
st.warning("No images found.")
|
| 44 |
+
st.stop()
|
| 45 |
+
|
| 46 |
+
st.session_state.index = max(0, min(st.session_state.index, len(images) - 1))
|
| 47 |
+
image_name = images[st.session_state.index]
|
| 48 |
+
image_path = os.path.join(image_dir, image_name)
|
| 49 |
+
image = Image.open(image_path).convert("RGB")
|
| 50 |
+
points = st.session_state.annotations.setdefault(image_name, [])
|
| 51 |
+
|
| 52 |
+
nav1, nav2, nav3, nav4 = st.columns(4)
|
| 53 |
+
if nav1.button("Prev"):
|
| 54 |
+
st.session_state.index = max(0, st.session_state.index - 1)
|
| 55 |
+
st.rerun()
|
| 56 |
+
if nav2.button("Next"):
|
| 57 |
+
st.session_state.index = min(len(images) - 1, st.session_state.index + 1)
|
| 58 |
+
st.rerun()
|
| 59 |
+
if nav3.button("Delete Last") and points:
|
| 60 |
+
points.pop()
|
| 61 |
+
st.rerun()
|
| 62 |
+
if nav4.button("Clear"):
|
| 63 |
+
st.session_state.annotations[image_name] = []
|
| 64 |
+
st.rerun()
|
| 65 |
+
|
| 66 |
+
st.write(f"{st.session_state.index + 1}/{len(images)} | {image_name} | Count: {len(points)}")
|
| 67 |
+
preview = annotated_image(image, points)
|
| 68 |
+
|
| 69 |
+
if streamlit_image_coordinates is not None:
|
| 70 |
+
clicked = streamlit_image_coordinates(preview, key=f"img_{image_name}_{len(points)}")
|
| 71 |
+
if clicked is not None and "x" in clicked and "y" in clicked:
|
| 72 |
+
points.append([int(clicked["x"]), int(clicked["y"])])
|
| 73 |
+
st.rerun()
|
| 74 |
+
else:
|
| 75 |
+
st.image(preview, caption="Install streamlit-image-coordinates for direct click annotation.", use_container_width=True)
|
| 76 |
+
c1, c2, c3 = st.columns(3)
|
| 77 |
+
x = c1.number_input("x", min_value=0, max_value=image.width, value=0, step=1)
|
| 78 |
+
y = c2.number_input("y", min_value=0, max_value=image.height, value=0, step=1)
|
| 79 |
+
if c3.button("Add Point"):
|
| 80 |
+
points.append([int(x), int(y)])
|
| 81 |
+
st.rerun()
|
| 82 |
+
|
| 83 |
+
export_data = [
|
| 84 |
+
{"image": name, "points": pts, "count": len(pts)}
|
| 85 |
+
for name, pts in sorted(st.session_state.annotations.items())
|
| 86 |
+
]
|
| 87 |
+
json_text = json.dumps(export_data, indent=2)
|
| 88 |
+
|
| 89 |
+
if st.sidebar.button("Save JSON"):
|
| 90 |
+
with open(output_json, "w", encoding="utf-8") as f:
|
| 91 |
+
f.write(json_text)
|
| 92 |
+
st.sidebar.success(output_json)
|
| 93 |
+
|
| 94 |
+
st.sidebar.download_button(
|
| 95 |
+
"Download JSON",
|
| 96 |
+
data=json_text.encode("utf-8"),
|
| 97 |
+
file_name=os.path.basename(output_json),
|
| 98 |
+
mime="application/json",
|
| 99 |
+
)
|
models/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .p2pnet import build
|
| 2 |
+
|
| 3 |
+
# build the P2PNet model
|
| 4 |
+
# set training to 'True' during training
|
| 5 |
+
def build_model(args, training=False):
|
| 6 |
+
return build(args, training)
|
models/backbone.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Backbone modules.
|
| 4 |
+
"""
|
| 5 |
+
from collections import OrderedDict
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torch.nn.functional as F
|
| 9 |
+
import torchvision
|
| 10 |
+
from torch import nn
|
| 11 |
+
|
| 12 |
+
import models.vgg_ as models
|
| 13 |
+
|
| 14 |
+
class BackboneBase_VGG(nn.Module):
|
| 15 |
+
def __init__(self, backbone: nn.Module, num_channels: int, name: str, return_interm_layers: bool):
|
| 16 |
+
super().__init__()
|
| 17 |
+
features = list(backbone.features.children())
|
| 18 |
+
if return_interm_layers:
|
| 19 |
+
if name == 'vgg16_bn':
|
| 20 |
+
self.body1 = nn.Sequential(*features[:13])
|
| 21 |
+
self.body2 = nn.Sequential(*features[13:23])
|
| 22 |
+
self.body3 = nn.Sequential(*features[23:33])
|
| 23 |
+
self.body4 = nn.Sequential(*features[33:43])
|
| 24 |
+
else:
|
| 25 |
+
self.body1 = nn.Sequential(*features[:9])
|
| 26 |
+
self.body2 = nn.Sequential(*features[9:16])
|
| 27 |
+
self.body3 = nn.Sequential(*features[16:23])
|
| 28 |
+
self.body4 = nn.Sequential(*features[23:30])
|
| 29 |
+
else:
|
| 30 |
+
if name == 'vgg16_bn':
|
| 31 |
+
self.body = nn.Sequential(*features[:44]) # 16x down-sample
|
| 32 |
+
elif name == 'vgg16':
|
| 33 |
+
self.body = nn.Sequential(*features[:30]) # 16x down-sample
|
| 34 |
+
self.num_channels = num_channels
|
| 35 |
+
self.return_interm_layers = return_interm_layers
|
| 36 |
+
|
| 37 |
+
def forward(self, tensor_list):
|
| 38 |
+
out = []
|
| 39 |
+
|
| 40 |
+
if self.return_interm_layers:
|
| 41 |
+
xs = tensor_list
|
| 42 |
+
for _, layer in enumerate([self.body1, self.body2, self.body3, self.body4]):
|
| 43 |
+
xs = layer(xs)
|
| 44 |
+
out.append(xs)
|
| 45 |
+
|
| 46 |
+
else:
|
| 47 |
+
xs = self.body(tensor_list)
|
| 48 |
+
out.append(xs)
|
| 49 |
+
return out
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
class Backbone_VGG(BackboneBase_VGG):
|
| 53 |
+
"""ResNet backbone with frozen BatchNorm."""
|
| 54 |
+
def __init__(self, name: str, return_interm_layers: bool):
|
| 55 |
+
if name == 'vgg16_bn':
|
| 56 |
+
backbone = models.vgg16_bn(pretrained=True)
|
| 57 |
+
elif name == 'vgg16':
|
| 58 |
+
backbone = models.vgg16(pretrained=True)
|
| 59 |
+
num_channels = 256
|
| 60 |
+
super().__init__(backbone, num_channels, name, return_interm_layers)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def build_backbone(args):
|
| 64 |
+
backbone = Backbone_VGG(args.backbone, True)
|
| 65 |
+
return backbone
|
| 66 |
+
|
| 67 |
+
if __name__ == '__main__':
|
| 68 |
+
Backbone_VGG('vgg16', True)
|
models/matcher.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
|
| 2 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 3 |
+
"""
|
| 4 |
+
Mostly copy-paste from DETR (https://github.com/facebookresearch/detr).
|
| 5 |
+
"""
|
| 6 |
+
import torch
|
| 7 |
+
from scipy.optimize import linear_sum_assignment
|
| 8 |
+
from torch import nn
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class HungarianMatcher_Crowd(nn.Module):
|
| 12 |
+
"""This class computes an assignment between the targets and the predictions of the network
|
| 13 |
+
|
| 14 |
+
For efficiency reasons, the targets don't include the no_object. Because of this, in general,
|
| 15 |
+
there are more predictions than targets. In this case, we do a 1-to-1 matching of the best predictions,
|
| 16 |
+
while the others are un-matched (and thus treated as non-objects).
|
| 17 |
+
"""
|
| 18 |
+
|
| 19 |
+
def __init__(self, cost_class: float = 1, cost_point: float = 1):
|
| 20 |
+
"""Creates the matcher
|
| 21 |
+
|
| 22 |
+
Params:
|
| 23 |
+
cost_class: This is the relative weight of the foreground object
|
| 24 |
+
cost_point: This is the relative weight of the L1 error of the points coordinates in the matching cost
|
| 25 |
+
"""
|
| 26 |
+
super().__init__()
|
| 27 |
+
self.cost_class = cost_class
|
| 28 |
+
self.cost_point = cost_point
|
| 29 |
+
assert cost_class != 0 or cost_point != 0, "all costs cant be 0"
|
| 30 |
+
|
| 31 |
+
@torch.no_grad()
|
| 32 |
+
def forward(self, outputs, targets):
|
| 33 |
+
""" Performs the matching
|
| 34 |
+
|
| 35 |
+
Params:
|
| 36 |
+
outputs: This is a dict that contains at least these entries:
|
| 37 |
+
"pred_logits": Tensor of dim [batch_size, num_queries, num_classes] with the classification logits
|
| 38 |
+
"points": Tensor of dim [batch_size, num_queries, 2] with the predicted point coordinates
|
| 39 |
+
|
| 40 |
+
targets: This is a list of targets (len(targets) = batch_size), where each target is a dict containing:
|
| 41 |
+
"labels": Tensor of dim [num_target_points] (where num_target_points is the number of ground-truth
|
| 42 |
+
objects in the target) containing the class labels
|
| 43 |
+
"points": Tensor of dim [num_target_points, 2] containing the target point coordinates
|
| 44 |
+
|
| 45 |
+
Returns:
|
| 46 |
+
A list of size batch_size, containing tuples of (index_i, index_j) where:
|
| 47 |
+
- index_i is the indices of the selected predictions (in order)
|
| 48 |
+
- index_j is the indices of the corresponding selected targets (in order)
|
| 49 |
+
For each batch element, it holds:
|
| 50 |
+
len(index_i) = len(index_j) = min(num_queries, num_target_points)
|
| 51 |
+
"""
|
| 52 |
+
bs, num_queries = outputs["pred_logits"].shape[:2]
|
| 53 |
+
|
| 54 |
+
# We flatten to compute the cost matrices in a batch
|
| 55 |
+
out_prob = outputs["pred_logits"].flatten(0, 1).softmax(-1) # [batch_size * num_queries, num_classes]
|
| 56 |
+
out_points = outputs["pred_points"].flatten(0, 1) # [batch_size * num_queries, 2]
|
| 57 |
+
|
| 58 |
+
# Also concat the target labels and points
|
| 59 |
+
# tgt_ids = torch.cat([v["labels"] for v in targets])
|
| 60 |
+
tgt_ids = torch.cat([v["labels"] for v in targets])
|
| 61 |
+
tgt_points = torch.cat([v["point"] for v in targets])
|
| 62 |
+
|
| 63 |
+
# Compute the classification cost. Contrary to the loss, we don't use the NLL,
|
| 64 |
+
# but approximate it in 1 - proba[target class].
|
| 65 |
+
# The 1 is a constant that doesn't change the matching, it can be ommitted.
|
| 66 |
+
cost_class = -out_prob[:, tgt_ids]
|
| 67 |
+
|
| 68 |
+
# Compute the L2 cost between point
|
| 69 |
+
cost_point = torch.cdist(out_points, tgt_points, p=2)
|
| 70 |
+
|
| 71 |
+
# Compute the giou cost between point
|
| 72 |
+
|
| 73 |
+
# Final cost matrix
|
| 74 |
+
C = self.cost_point * cost_point + self.cost_class * cost_class
|
| 75 |
+
C = C.view(bs, num_queries, -1).cpu()
|
| 76 |
+
|
| 77 |
+
sizes = [len(v["point"]) for v in targets]
|
| 78 |
+
indices = [linear_sum_assignment(c[i]) for i, c in enumerate(C.split(sizes, -1))]
|
| 79 |
+
return [(torch.as_tensor(i, dtype=torch.int64), torch.as_tensor(j, dtype=torch.int64)) for i, j in indices]
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def build_matcher_crowd(args):
|
| 83 |
+
return HungarianMatcher_Crowd(cost_class=args.set_cost_class, cost_point=args.set_cost_point)
|
models/p2pnet.py
ADDED
|
@@ -0,0 +1,342 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn.functional as F
|
| 3 |
+
from torch import nn
|
| 4 |
+
|
| 5 |
+
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
|
| 6 |
+
accuracy, get_world_size, interpolate,
|
| 7 |
+
is_dist_avail_and_initialized)
|
| 8 |
+
|
| 9 |
+
from .backbone import build_backbone
|
| 10 |
+
from .matcher import build_matcher_crowd
|
| 11 |
+
|
| 12 |
+
import numpy as np
|
| 13 |
+
import time
|
| 14 |
+
|
| 15 |
+
# the network frmawork of the regression branch
|
| 16 |
+
class RegressionModel(nn.Module):
|
| 17 |
+
def __init__(self, num_features_in, num_anchor_points=4, feature_size=256):
|
| 18 |
+
super(RegressionModel, self).__init__()
|
| 19 |
+
|
| 20 |
+
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
|
| 21 |
+
self.act1 = nn.ReLU()
|
| 22 |
+
|
| 23 |
+
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
|
| 24 |
+
self.act2 = nn.ReLU()
|
| 25 |
+
|
| 26 |
+
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
|
| 27 |
+
self.act3 = nn.ReLU()
|
| 28 |
+
|
| 29 |
+
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
|
| 30 |
+
self.act4 = nn.ReLU()
|
| 31 |
+
|
| 32 |
+
self.output = nn.Conv2d(feature_size, num_anchor_points * 2, kernel_size=3, padding=1)
|
| 33 |
+
# sub-branch forward
|
| 34 |
+
def forward(self, x):
|
| 35 |
+
out = self.conv1(x)
|
| 36 |
+
out = self.act1(out)
|
| 37 |
+
|
| 38 |
+
out = self.conv2(out)
|
| 39 |
+
out = self.act2(out)
|
| 40 |
+
|
| 41 |
+
out = self.output(out)
|
| 42 |
+
|
| 43 |
+
out = out.permute(0, 2, 3, 1)
|
| 44 |
+
|
| 45 |
+
return out.contiguous().view(out.shape[0], -1, 2)
|
| 46 |
+
|
| 47 |
+
# the network frmawork of the classification branch
|
| 48 |
+
class ClassificationModel(nn.Module):
|
| 49 |
+
def __init__(self, num_features_in, num_anchor_points=4, num_classes=80, prior=0.01, feature_size=256):
|
| 50 |
+
super(ClassificationModel, self).__init__()
|
| 51 |
+
|
| 52 |
+
self.num_classes = num_classes
|
| 53 |
+
self.num_anchor_points = num_anchor_points
|
| 54 |
+
|
| 55 |
+
self.conv1 = nn.Conv2d(num_features_in, feature_size, kernel_size=3, padding=1)
|
| 56 |
+
self.act1 = nn.ReLU()
|
| 57 |
+
|
| 58 |
+
self.conv2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
|
| 59 |
+
self.act2 = nn.ReLU()
|
| 60 |
+
|
| 61 |
+
self.conv3 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
|
| 62 |
+
self.act3 = nn.ReLU()
|
| 63 |
+
|
| 64 |
+
self.conv4 = nn.Conv2d(feature_size, feature_size, kernel_size=3, padding=1)
|
| 65 |
+
self.act4 = nn.ReLU()
|
| 66 |
+
|
| 67 |
+
self.output = nn.Conv2d(feature_size, num_anchor_points * num_classes, kernel_size=3, padding=1)
|
| 68 |
+
self.output_act = nn.Sigmoid()
|
| 69 |
+
# sub-branch forward
|
| 70 |
+
def forward(self, x):
|
| 71 |
+
out = self.conv1(x)
|
| 72 |
+
out = self.act1(out)
|
| 73 |
+
|
| 74 |
+
out = self.conv2(out)
|
| 75 |
+
out = self.act2(out)
|
| 76 |
+
|
| 77 |
+
out = self.output(out)
|
| 78 |
+
|
| 79 |
+
out1 = out.permute(0, 2, 3, 1)
|
| 80 |
+
|
| 81 |
+
batch_size, width, height, _ = out1.shape
|
| 82 |
+
|
| 83 |
+
out2 = out1.view(batch_size, width, height, self.num_anchor_points, self.num_classes)
|
| 84 |
+
|
| 85 |
+
return out2.contiguous().view(x.shape[0], -1, self.num_classes)
|
| 86 |
+
|
| 87 |
+
# generate the reference points in grid layout
|
| 88 |
+
def generate_anchor_points(stride=16, row=3, line=3):
|
| 89 |
+
row_step = stride / row
|
| 90 |
+
line_step = stride / line
|
| 91 |
+
|
| 92 |
+
shift_x = (np.arange(1, line + 1) - 0.5) * line_step - stride / 2
|
| 93 |
+
shift_y = (np.arange(1, row + 1) - 0.5) * row_step - stride / 2
|
| 94 |
+
|
| 95 |
+
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
|
| 96 |
+
|
| 97 |
+
anchor_points = np.vstack((
|
| 98 |
+
shift_x.ravel(), shift_y.ravel()
|
| 99 |
+
)).transpose()
|
| 100 |
+
|
| 101 |
+
return anchor_points
|
| 102 |
+
# shift the meta-anchor to get an acnhor points
|
| 103 |
+
def shift(shape, stride, anchor_points):
|
| 104 |
+
shift_x = (np.arange(0, shape[1]) + 0.5) * stride
|
| 105 |
+
shift_y = (np.arange(0, shape[0]) + 0.5) * stride
|
| 106 |
+
|
| 107 |
+
shift_x, shift_y = np.meshgrid(shift_x, shift_y)
|
| 108 |
+
|
| 109 |
+
shifts = np.vstack((
|
| 110 |
+
shift_x.ravel(), shift_y.ravel()
|
| 111 |
+
)).transpose()
|
| 112 |
+
|
| 113 |
+
A = anchor_points.shape[0]
|
| 114 |
+
K = shifts.shape[0]
|
| 115 |
+
all_anchor_points = (anchor_points.reshape((1, A, 2)) + shifts.reshape((1, K, 2)).transpose((1, 0, 2)))
|
| 116 |
+
all_anchor_points = all_anchor_points.reshape((K * A, 2))
|
| 117 |
+
|
| 118 |
+
return all_anchor_points
|
| 119 |
+
|
| 120 |
+
# this class generate all reference points on all pyramid levels
|
| 121 |
+
class AnchorPoints(nn.Module):
|
| 122 |
+
def __init__(self, pyramid_levels=None, strides=None, row=3, line=3):
|
| 123 |
+
super(AnchorPoints, self).__init__()
|
| 124 |
+
|
| 125 |
+
if pyramid_levels is None:
|
| 126 |
+
self.pyramid_levels = [3, 4, 5, 6, 7]
|
| 127 |
+
else:
|
| 128 |
+
self.pyramid_levels = pyramid_levels
|
| 129 |
+
|
| 130 |
+
if strides is None:
|
| 131 |
+
self.strides = [2 ** x for x in self.pyramid_levels]
|
| 132 |
+
|
| 133 |
+
self.row = row
|
| 134 |
+
self.line = line
|
| 135 |
+
|
| 136 |
+
def forward(self, image):
|
| 137 |
+
image_shape = image.shape[2:]
|
| 138 |
+
image_shape = np.array(image_shape)
|
| 139 |
+
image_shapes = [(image_shape + 2 ** x - 1) // (2 ** x) for x in self.pyramid_levels]
|
| 140 |
+
|
| 141 |
+
all_anchor_points = np.zeros((0, 2)).astype(np.float32)
|
| 142 |
+
# get reference points for each level
|
| 143 |
+
for idx, p in enumerate(self.pyramid_levels):
|
| 144 |
+
anchor_points = generate_anchor_points(2**p, row=self.row, line=self.line)
|
| 145 |
+
shifted_anchor_points = shift(image_shapes[idx], self.strides[idx], anchor_points)
|
| 146 |
+
all_anchor_points = np.append(all_anchor_points, shifted_anchor_points, axis=0)
|
| 147 |
+
|
| 148 |
+
all_anchor_points = np.expand_dims(all_anchor_points, axis=0)
|
| 149 |
+
# send reference points to device
|
| 150 |
+
if torch.cuda.is_available():
|
| 151 |
+
return torch.from_numpy(all_anchor_points.astype(np.float32)).cuda()
|
| 152 |
+
else:
|
| 153 |
+
return torch.from_numpy(all_anchor_points.astype(np.float32))
|
| 154 |
+
|
| 155 |
+
class Decoder(nn.Module):
|
| 156 |
+
def __init__(self, C3_size, C4_size, C5_size, feature_size=256):
|
| 157 |
+
super(Decoder, self).__init__()
|
| 158 |
+
|
| 159 |
+
# upsample C5 to get P5 from the FPN paper
|
| 160 |
+
self.P5_1 = nn.Conv2d(C5_size, feature_size, kernel_size=1, stride=1, padding=0)
|
| 161 |
+
self.P5_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
|
| 162 |
+
self.P5_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
|
| 163 |
+
|
| 164 |
+
# add P5 elementwise to C4
|
| 165 |
+
self.P4_1 = nn.Conv2d(C4_size, feature_size, kernel_size=1, stride=1, padding=0)
|
| 166 |
+
self.P4_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
|
| 167 |
+
self.P4_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
|
| 168 |
+
|
| 169 |
+
# add P4 elementwise to C3
|
| 170 |
+
self.P3_1 = nn.Conv2d(C3_size, feature_size, kernel_size=1, stride=1, padding=0)
|
| 171 |
+
self.P3_upsampled = nn.Upsample(scale_factor=2, mode='nearest')
|
| 172 |
+
self.P3_2 = nn.Conv2d(feature_size, feature_size, kernel_size=3, stride=1, padding=1)
|
| 173 |
+
|
| 174 |
+
|
| 175 |
+
def forward(self, inputs):
|
| 176 |
+
C3, C4, C5 = inputs
|
| 177 |
+
|
| 178 |
+
P5_x = self.P5_1(C5)
|
| 179 |
+
P5_upsampled_x = self.P5_upsampled(P5_x)
|
| 180 |
+
P5_x = self.P5_2(P5_x)
|
| 181 |
+
|
| 182 |
+
P4_x = self.P4_1(C4)
|
| 183 |
+
P4_x = P5_upsampled_x + P4_x
|
| 184 |
+
P4_upsampled_x = self.P4_upsampled(P4_x)
|
| 185 |
+
P4_x = self.P4_2(P4_x)
|
| 186 |
+
|
| 187 |
+
P3_x = self.P3_1(C3)
|
| 188 |
+
P3_x = P3_x + P4_upsampled_x
|
| 189 |
+
P3_x = self.P3_2(P3_x)
|
| 190 |
+
|
| 191 |
+
return [P3_x, P4_x, P5_x]
|
| 192 |
+
|
| 193 |
+
# the defenition of the P2PNet model
|
| 194 |
+
class P2PNet(nn.Module):
|
| 195 |
+
def __init__(self, backbone, row=2, line=2):
|
| 196 |
+
super().__init__()
|
| 197 |
+
self.backbone = backbone
|
| 198 |
+
self.num_classes = 2
|
| 199 |
+
# the number of all anchor points
|
| 200 |
+
num_anchor_points = row * line
|
| 201 |
+
|
| 202 |
+
self.regression = RegressionModel(num_features_in=256, num_anchor_points=num_anchor_points)
|
| 203 |
+
self.classification = ClassificationModel(num_features_in=256, \
|
| 204 |
+
num_classes=self.num_classes, \
|
| 205 |
+
num_anchor_points=num_anchor_points)
|
| 206 |
+
|
| 207 |
+
self.anchor_points = AnchorPoints(pyramid_levels=[3,], row=row, line=line)
|
| 208 |
+
|
| 209 |
+
self.fpn = Decoder(256, 512, 512)
|
| 210 |
+
|
| 211 |
+
def forward(self, samples: NestedTensor):
|
| 212 |
+
# get the backbone features
|
| 213 |
+
features = self.backbone(samples)
|
| 214 |
+
# forward the feature pyramid
|
| 215 |
+
features_fpn = self.fpn([features[1], features[2], features[3]])
|
| 216 |
+
|
| 217 |
+
batch_size = features[0].shape[0]
|
| 218 |
+
# run the regression and classification branch
|
| 219 |
+
regression = self.regression(features_fpn[1]) * 100 # 8x
|
| 220 |
+
classification = self.classification(features_fpn[1])
|
| 221 |
+
anchor_points = self.anchor_points(samples).repeat(batch_size, 1, 1)
|
| 222 |
+
# decode the points as prediction
|
| 223 |
+
output_coord = regression + anchor_points
|
| 224 |
+
output_class = classification
|
| 225 |
+
out = {'pred_logits': output_class, 'pred_points': output_coord}
|
| 226 |
+
|
| 227 |
+
return out
|
| 228 |
+
|
| 229 |
+
class SetCriterion_Crowd(nn.Module):
|
| 230 |
+
|
| 231 |
+
def __init__(self, num_classes, matcher, weight_dict, eos_coef, losses):
|
| 232 |
+
""" Create the criterion.
|
| 233 |
+
Parameters:
|
| 234 |
+
num_classes: number of object categories, omitting the special no-object category
|
| 235 |
+
matcher: module able to compute a matching between targets and proposals
|
| 236 |
+
weight_dict: dict containing as key the names of the losses and as values their relative weight.
|
| 237 |
+
eos_coef: relative classification weight applied to the no-object category
|
| 238 |
+
losses: list of all the losses to be applied. See get_loss for list of available losses.
|
| 239 |
+
"""
|
| 240 |
+
super().__init__()
|
| 241 |
+
self.num_classes = num_classes
|
| 242 |
+
self.matcher = matcher
|
| 243 |
+
self.weight_dict = weight_dict
|
| 244 |
+
self.eos_coef = eos_coef
|
| 245 |
+
self.losses = losses
|
| 246 |
+
empty_weight = torch.ones(self.num_classes + 1)
|
| 247 |
+
empty_weight[0] = self.eos_coef
|
| 248 |
+
self.register_buffer('empty_weight', empty_weight)
|
| 249 |
+
|
| 250 |
+
def loss_labels(self, outputs, targets, indices, num_points):
|
| 251 |
+
"""Classification loss (NLL)
|
| 252 |
+
targets dicts must contain the key "labels" containing a tensor of dim [nb_target_boxes]
|
| 253 |
+
"""
|
| 254 |
+
assert 'pred_logits' in outputs
|
| 255 |
+
src_logits = outputs['pred_logits']
|
| 256 |
+
|
| 257 |
+
idx = self._get_src_permutation_idx(indices)
|
| 258 |
+
target_classes_o = torch.cat([t["labels"][J] for t, (_, J) in zip(targets, indices)])
|
| 259 |
+
target_classes = torch.full(src_logits.shape[:2], 0,
|
| 260 |
+
dtype=torch.int64, device=src_logits.device)
|
| 261 |
+
target_classes[idx] = target_classes_o
|
| 262 |
+
|
| 263 |
+
loss_ce = F.cross_entropy(src_logits.transpose(1, 2), target_classes, self.empty_weight)
|
| 264 |
+
losses = {'loss_ce': loss_ce}
|
| 265 |
+
|
| 266 |
+
return losses
|
| 267 |
+
|
| 268 |
+
def loss_points(self, outputs, targets, indices, num_points):
|
| 269 |
+
|
| 270 |
+
assert 'pred_points' in outputs
|
| 271 |
+
idx = self._get_src_permutation_idx(indices)
|
| 272 |
+
src_points = outputs['pred_points'][idx]
|
| 273 |
+
target_points = torch.cat([t['point'][i] for t, (_, i) in zip(targets, indices)], dim=0)
|
| 274 |
+
|
| 275 |
+
loss_bbox = F.mse_loss(src_points, target_points, reduction='none')
|
| 276 |
+
|
| 277 |
+
losses = {}
|
| 278 |
+
losses['loss_point'] = loss_bbox.sum() / num_points
|
| 279 |
+
|
| 280 |
+
return losses
|
| 281 |
+
|
| 282 |
+
def _get_src_permutation_idx(self, indices):
|
| 283 |
+
# permute predictions following indices
|
| 284 |
+
batch_idx = torch.cat([torch.full_like(src, i) for i, (src, _) in enumerate(indices)])
|
| 285 |
+
src_idx = torch.cat([src for (src, _) in indices])
|
| 286 |
+
return batch_idx, src_idx
|
| 287 |
+
|
| 288 |
+
def _get_tgt_permutation_idx(self, indices):
|
| 289 |
+
# permute targets following indices
|
| 290 |
+
batch_idx = torch.cat([torch.full_like(tgt, i) for i, (_, tgt) in enumerate(indices)])
|
| 291 |
+
tgt_idx = torch.cat([tgt for (_, tgt) in indices])
|
| 292 |
+
return batch_idx, tgt_idx
|
| 293 |
+
|
| 294 |
+
def get_loss(self, loss, outputs, targets, indices, num_points, **kwargs):
|
| 295 |
+
loss_map = {
|
| 296 |
+
'labels': self.loss_labels,
|
| 297 |
+
'points': self.loss_points,
|
| 298 |
+
}
|
| 299 |
+
assert loss in loss_map, f'do you really want to compute {loss} loss?'
|
| 300 |
+
return loss_map[loss](outputs, targets, indices, num_points, **kwargs)
|
| 301 |
+
|
| 302 |
+
def forward(self, outputs, targets):
|
| 303 |
+
""" This performs the loss computation.
|
| 304 |
+
Parameters:
|
| 305 |
+
outputs: dict of tensors, see the output specification of the model for the format
|
| 306 |
+
targets: list of dicts, such that len(targets) == batch_size.
|
| 307 |
+
The expected keys in each dict depends on the losses applied, see each loss' doc
|
| 308 |
+
"""
|
| 309 |
+
output1 = {'pred_logits': outputs['pred_logits'], 'pred_points': outputs['pred_points']}
|
| 310 |
+
|
| 311 |
+
indices1 = self.matcher(output1, targets)
|
| 312 |
+
|
| 313 |
+
num_points = sum(len(t["labels"]) for t in targets)
|
| 314 |
+
num_points = torch.as_tensor([num_points], dtype=torch.float, device=next(iter(output1.values())).device)
|
| 315 |
+
if is_dist_avail_and_initialized():
|
| 316 |
+
torch.distributed.all_reduce(num_points)
|
| 317 |
+
num_boxes = torch.clamp(num_points / get_world_size(), min=1).item()
|
| 318 |
+
|
| 319 |
+
losses = {}
|
| 320 |
+
for loss in self.losses:
|
| 321 |
+
losses.update(self.get_loss(loss, output1, targets, indices1, num_boxes))
|
| 322 |
+
|
| 323 |
+
return losses
|
| 324 |
+
|
| 325 |
+
# create the P2PNet model
|
| 326 |
+
def build(args, training):
|
| 327 |
+
# treats persons as a single class
|
| 328 |
+
num_classes = 1
|
| 329 |
+
|
| 330 |
+
backbone = build_backbone(args)
|
| 331 |
+
model = P2PNet(backbone, args.row, args.line)
|
| 332 |
+
if not training:
|
| 333 |
+
return model
|
| 334 |
+
|
| 335 |
+
weight_dict = {'loss_ce': 1, 'loss_points': args.point_loss_coef}
|
| 336 |
+
losses = ['labels', 'points']
|
| 337 |
+
matcher = build_matcher_crowd(args)
|
| 338 |
+
criterion = SetCriterion_Crowd(num_classes, \
|
| 339 |
+
matcher=matcher, weight_dict=weight_dict, \
|
| 340 |
+
eos_coef=args.eos_coef, losses=losses)
|
| 341 |
+
|
| 342 |
+
return model, criterion
|
models/vgg_.py
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Mostly copy-paste from torchvision references.
|
| 4 |
+
"""
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
__all__ = [
|
| 10 |
+
'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn',
|
| 11 |
+
'vgg19_bn', 'vgg19',
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
model_urls = {
|
| 16 |
+
'vgg11': 'https://download.pytorch.org/models/vgg11-bbd30ac9.pth',
|
| 17 |
+
'vgg13': 'https://download.pytorch.org/models/vgg13-c768596a.pth',
|
| 18 |
+
'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth',
|
| 19 |
+
'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth',
|
| 20 |
+
'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth',
|
| 21 |
+
'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth',
|
| 22 |
+
'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth',
|
| 23 |
+
'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth',
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
model_paths = {
|
| 28 |
+
'vgg16_bn': '/apdcephfs/private_changanwang/checkpoints/vgg16_bn-6c64b313.pth',
|
| 29 |
+
'vgg16': '/apdcephfs/private_changanwang/checkpoints/vgg16-397923af.pth',
|
| 30 |
+
|
| 31 |
+
}
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class VGG(nn.Module):
|
| 35 |
+
|
| 36 |
+
def __init__(self, features, num_classes=1000, init_weights=True):
|
| 37 |
+
super(VGG, self).__init__()
|
| 38 |
+
self.features = features
|
| 39 |
+
self.avgpool = nn.AdaptiveAvgPool2d((7, 7))
|
| 40 |
+
self.classifier = nn.Sequential(
|
| 41 |
+
nn.Linear(512 * 7 * 7, 4096),
|
| 42 |
+
nn.ReLU(True),
|
| 43 |
+
nn.Dropout(),
|
| 44 |
+
nn.Linear(4096, 4096),
|
| 45 |
+
nn.ReLU(True),
|
| 46 |
+
nn.Dropout(),
|
| 47 |
+
nn.Linear(4096, num_classes),
|
| 48 |
+
)
|
| 49 |
+
if init_weights:
|
| 50 |
+
self._initialize_weights()
|
| 51 |
+
|
| 52 |
+
def forward(self, x):
|
| 53 |
+
x = self.features(x)
|
| 54 |
+
x = self.avgpool(x)
|
| 55 |
+
x = torch.flatten(x, 1)
|
| 56 |
+
x = self.classifier(x)
|
| 57 |
+
return x
|
| 58 |
+
|
| 59 |
+
def _initialize_weights(self):
|
| 60 |
+
for m in self.modules():
|
| 61 |
+
if isinstance(m, nn.Conv2d):
|
| 62 |
+
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
|
| 63 |
+
if m.bias is not None:
|
| 64 |
+
nn.init.constant_(m.bias, 0)
|
| 65 |
+
elif isinstance(m, nn.BatchNorm2d):
|
| 66 |
+
nn.init.constant_(m.weight, 1)
|
| 67 |
+
nn.init.constant_(m.bias, 0)
|
| 68 |
+
elif isinstance(m, nn.Linear):
|
| 69 |
+
nn.init.normal_(m.weight, 0, 0.01)
|
| 70 |
+
nn.init.constant_(m.bias, 0)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
def make_layers(cfg, batch_norm=False, sync=False):
|
| 74 |
+
layers = []
|
| 75 |
+
in_channels = 3
|
| 76 |
+
for v in cfg:
|
| 77 |
+
if v == 'M':
|
| 78 |
+
layers += [nn.MaxPool2d(kernel_size=2, stride=2)]
|
| 79 |
+
else:
|
| 80 |
+
conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1)
|
| 81 |
+
if batch_norm:
|
| 82 |
+
if sync:
|
| 83 |
+
print('use sync backbone')
|
| 84 |
+
layers += [conv2d, nn.SyncBatchNorm(v), nn.ReLU(inplace=True)]
|
| 85 |
+
else:
|
| 86 |
+
layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)]
|
| 87 |
+
else:
|
| 88 |
+
layers += [conv2d, nn.ReLU(inplace=True)]
|
| 89 |
+
in_channels = v
|
| 90 |
+
return nn.Sequential(*layers)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
cfgs = {
|
| 94 |
+
'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
|
| 95 |
+
'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'],
|
| 96 |
+
'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'],
|
| 97 |
+
'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'],
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _vgg(arch, cfg, batch_norm, pretrained, progress, sync=False, **kwargs):
|
| 102 |
+
if pretrained:
|
| 103 |
+
kwargs['init_weights'] = False
|
| 104 |
+
model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm, sync=sync), **kwargs)
|
| 105 |
+
if pretrained:
|
| 106 |
+
from torch.hub import load_state_dict_from_url
|
| 107 |
+
state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)
|
| 108 |
+
model.load_state_dict(state_dict)
|
| 109 |
+
return model
|
| 110 |
+
|
| 111 |
+
|
| 112 |
+
def vgg11(pretrained=False, progress=True, **kwargs):
|
| 113 |
+
r"""VGG 11-layer model (configuration "A") from
|
| 114 |
+
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
|
| 115 |
+
|
| 116 |
+
Args:
|
| 117 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 118 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
| 119 |
+
"""
|
| 120 |
+
return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs)
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
def vgg11_bn(pretrained=False, progress=True, **kwargs):
|
| 124 |
+
r"""VGG 11-layer model (configuration "A") with batch normalization
|
| 125 |
+
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
|
| 126 |
+
|
| 127 |
+
Args:
|
| 128 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 129 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
| 130 |
+
"""
|
| 131 |
+
return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs)
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
def vgg13(pretrained=False, progress=True, **kwargs):
|
| 135 |
+
r"""VGG 13-layer model (configuration "B")
|
| 136 |
+
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 140 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
| 141 |
+
"""
|
| 142 |
+
return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs)
|
| 143 |
+
|
| 144 |
+
|
| 145 |
+
def vgg13_bn(pretrained=False, progress=True, **kwargs):
|
| 146 |
+
r"""VGG 13-layer model (configuration "B") with batch normalization
|
| 147 |
+
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
|
| 148 |
+
|
| 149 |
+
Args:
|
| 150 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 151 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
| 152 |
+
"""
|
| 153 |
+
return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs)
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def vgg16(pretrained=False, progress=True, **kwargs):
|
| 157 |
+
r"""VGG 16-layer model (configuration "D")
|
| 158 |
+
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
|
| 159 |
+
|
| 160 |
+
Args:
|
| 161 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 162 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
| 163 |
+
"""
|
| 164 |
+
return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs)
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
def vgg16_bn(pretrained=False, progress=True, sync=False, **kwargs):
|
| 168 |
+
r"""VGG 16-layer model (configuration "D") with batch normalization
|
| 169 |
+
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
|
| 170 |
+
|
| 171 |
+
Args:
|
| 172 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 173 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
| 174 |
+
"""
|
| 175 |
+
return _vgg('vgg16_bn', 'D', True, pretrained, progress, sync=sync, **kwargs)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def vgg19(pretrained=False, progress=True, **kwargs):
|
| 179 |
+
r"""VGG 19-layer model (configuration "E")
|
| 180 |
+
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
|
| 181 |
+
|
| 182 |
+
Args:
|
| 183 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 184 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
| 185 |
+
"""
|
| 186 |
+
return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs)
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
def vgg19_bn(pretrained=False, progress=True, **kwargs):
|
| 190 |
+
r"""VGG 19-layer model (configuration 'E') with batch normalization
|
| 191 |
+
`"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_
|
| 192 |
+
|
| 193 |
+
Args:
|
| 194 |
+
pretrained (bool): If True, returns a model pre-trained on ImageNet
|
| 195 |
+
progress (bool): If True, displays a progress bar of the download to stderr
|
| 196 |
+
"""
|
| 197 |
+
return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs)
|
motion_estimator.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import cv2
|
| 2 |
+
import numpy as np
|
| 3 |
+
|
| 4 |
+
class GlobalMotionEstimator:
|
| 5 |
+
def __init__(self):
|
| 6 |
+
self.prev_gray = None
|
| 7 |
+
self.prev_pts = None
|
| 8 |
+
|
| 9 |
+
def update(self, current_frame):
|
| 10 |
+
# Expects a BGR or RGB frame (function works identically long as it's 3-channel)
|
| 11 |
+
curr_gray = cv2.cvtColor(current_frame, cv2.COLOR_BGR2GRAY)
|
| 12 |
+
|
| 13 |
+
transform = np.eye(2, 3, dtype=np.float32)
|
| 14 |
+
|
| 15 |
+
if self.prev_gray is None:
|
| 16 |
+
self.prev_gray = curr_gray
|
| 17 |
+
self.prev_pts = cv2.goodFeaturesToTrack(curr_gray, maxCorners=200, qualityLevel=0.01, minDistance=30, blockSize=3)
|
| 18 |
+
return transform
|
| 19 |
+
|
| 20 |
+
if self.prev_pts is not None and len(self.prev_pts) > 0:
|
| 21 |
+
curr_pts, status, err = cv2.calcOpticalFlowPyrLK(self.prev_gray, curr_gray, self.prev_pts, None, winSize=(21, 21), maxLevel=3)
|
| 22 |
+
|
| 23 |
+
good_prev = self.prev_pts[status == 1]
|
| 24 |
+
good_curr = curr_pts[status == 1]
|
| 25 |
+
|
| 26 |
+
if len(good_prev) >= 4 and len(good_curr) >= 4:
|
| 27 |
+
# Estimate affine transform (translation + rotation + scale)
|
| 28 |
+
transform, inliers = cv2.estimateAffinePartial2D(good_prev, good_curr)
|
| 29 |
+
if transform is None: # Safety fallback
|
| 30 |
+
transform = np.eye(2, 3, dtype=np.float32)
|
| 31 |
+
|
| 32 |
+
self.prev_gray = curr_gray
|
| 33 |
+
self.prev_pts = cv2.goodFeaturesToTrack(curr_gray, maxCorners=200, qualityLevel=0.01, minDistance=30, blockSize=3)
|
| 34 |
+
|
| 35 |
+
return transform
|
optimization_notes.md
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
```python
|
| 2 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 3 |
+
|
| 4 |
+
def extract_patch(job):
|
| 5 |
+
x, y, img_padded, patch_size, transform = job
|
| 6 |
+
patch = img_padded.crop((x, y, x + patch_size, y + patch_size))
|
| 7 |
+
return x, y, transform(patch)
|
| 8 |
+
|
| 9 |
+
jobs = [(x, y, img_padded, patch_size, transform) for x, y in coords]
|
| 10 |
+
with ThreadPoolExecutor(max_workers=8) as executor:
|
| 11 |
+
patch_tensors = list(executor.map(extract_patch, jobs))
|
| 12 |
+
```
|
| 13 |
+
|
| 14 |
+
```python
|
| 15 |
+
def grid_merge_points(points, radius=8.0):
|
| 16 |
+
if not points:
|
| 17 |
+
return []
|
| 18 |
+
cell_size = radius
|
| 19 |
+
cells = {}
|
| 20 |
+
merged = []
|
| 21 |
+
for point in points:
|
| 22 |
+
x, y = point
|
| 23 |
+
key = (int(x // cell_size), int(y // cell_size))
|
| 24 |
+
duplicate = False
|
| 25 |
+
for nx in range(key[0] - 1, key[0] + 2):
|
| 26 |
+
for ny in range(key[1] - 1, key[1] + 2):
|
| 27 |
+
for existing in cells.get((nx, ny), []):
|
| 28 |
+
if ((x - existing[0]) ** 2 + (y - existing[1]) ** 2) ** 0.5 <= radius:
|
| 29 |
+
duplicate = True
|
| 30 |
+
break
|
| 31 |
+
if duplicate:
|
| 32 |
+
break
|
| 33 |
+
if duplicate:
|
| 34 |
+
break
|
| 35 |
+
if not duplicate:
|
| 36 |
+
merged.append(point)
|
| 37 |
+
cells.setdefault(key, []).append(point)
|
| 38 |
+
return merged
|
| 39 |
+
```
|
| 40 |
+
|
| 41 |
+
```python
|
| 42 |
+
import cv2
|
| 43 |
+
import numpy as np
|
| 44 |
+
|
| 45 |
+
class ORBHomographyMotionEstimator:
|
| 46 |
+
def __init__(self, max_features=500):
|
| 47 |
+
self.prev_gray = None
|
| 48 |
+
self.orb = cv2.ORB_create(max_features)
|
| 49 |
+
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
|
| 50 |
+
|
| 51 |
+
def update(self, frame_bgr):
|
| 52 |
+
gray = cv2.cvtColor(frame_bgr, cv2.COLOR_BGR2GRAY)
|
| 53 |
+
if self.prev_gray is None:
|
| 54 |
+
self.prev_gray = gray
|
| 55 |
+
return np.eye(3, dtype=np.float32)
|
| 56 |
+
kp1, des1 = self.orb.detectAndCompute(self.prev_gray, None)
|
| 57 |
+
kp2, des2 = self.orb.detectAndCompute(gray, None)
|
| 58 |
+
H = np.eye(3, dtype=np.float32)
|
| 59 |
+
if des1 is not None and des2 is not None and len(kp1) >= 8 and len(kp2) >= 8:
|
| 60 |
+
matches = sorted(self.matcher.match(des1, des2), key=lambda m: m.distance)[:100]
|
| 61 |
+
if len(matches) >= 8:
|
| 62 |
+
src = np.float32([kp1[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
|
| 63 |
+
dst = np.float32([kp2[m.trainIdx].pt for m in matches]).reshape(-1, 1, 2)
|
| 64 |
+
H_found, _ = cv2.findHomography(src, dst, cv2.RANSAC, 5.0)
|
| 65 |
+
if H_found is not None:
|
| 66 |
+
H = H_found.astype(np.float32)
|
| 67 |
+
self.prev_gray = gray
|
| 68 |
+
return H
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
```python
|
| 72 |
+
from PIL import Image
|
| 73 |
+
|
| 74 |
+
def batch_infer_frames(frames_rgb, process_frame_fn, model, device, transform, **kwargs):
|
| 75 |
+
results = []
|
| 76 |
+
for frame_rgb in frames_rgb:
|
| 77 |
+
image = Image.fromarray(frame_rgb)
|
| 78 |
+
_, count, points = process_frame_fn(image, model, device, transform, **kwargs)
|
| 79 |
+
results.append({"count": count, "points": points})
|
| 80 |
+
return results
|
| 81 |
+
```
|
report_generator.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
class ReportGenerator:
|
| 5 |
+
def __init__(self):
|
| 6 |
+
self.data = []
|
| 7 |
+
|
| 8 |
+
def add_frame_data(self, frame_num, timestamp_sec, frame_count, cumulative_count):
|
| 9 |
+
self.data.append({
|
| 10 |
+
"frame_number": frame_num,
|
| 11 |
+
"timestamp_sec": round(timestamp_sec, 2),
|
| 12 |
+
"frame_count": frame_count,
|
| 13 |
+
"total_unique_count": cumulative_count
|
| 14 |
+
})
|
| 15 |
+
|
| 16 |
+
def get_csv(self):
|
| 17 |
+
if not self.data: return ""
|
| 18 |
+
df = pd.DataFrame(self.data)
|
| 19 |
+
return df.to_csv(index=False).encode('utf-8')
|
| 20 |
+
|
| 21 |
+
def get_json(self):
|
| 22 |
+
return json.dumps({
|
| 23 |
+
"metadata": {
|
| 24 |
+
"generated_by": "Civic Pulse Engine",
|
| 25 |
+
"total_frames_analyzed": len(self.data)
|
| 26 |
+
},
|
| 27 |
+
"timeline": self.data
|
| 28 |
+
}, indent=2).encode('utf-8')
|
requirements.txt
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ── Core API framework ──────────────────────────────────────────
|
| 2 |
+
fastapi
|
| 3 |
+
uvicorn[standard]
|
| 4 |
+
python-multipart
|
| 5 |
+
|
| 6 |
+
# ── PyTorch CPU-only build (~200 MB vs ~800 MB for CUDA) ────────
|
| 7 |
+
# This is essential for fitting in free-tier Docker containers
|
| 8 |
+
--extra-index-url https://download.pytorch.org/whl/cpu
|
| 9 |
+
torch
|
| 10 |
+
torchvision
|
| 11 |
+
|
| 12 |
+
# ── Image / Video processing ────────────────────────────────────
|
| 13 |
+
Pillow
|
| 14 |
+
opencv-python-headless
|
| 15 |
+
|
| 16 |
+
# ── Scientific computing ─────────────────────────────────────────
|
| 17 |
+
numpy
|
| 18 |
+
scipy
|
| 19 |
+
scikit-learn
|
| 20 |
+
|
| 21 |
+
# ── Database ORM ─────────────────────────────────────────────────
|
| 22 |
+
sqlmodel
|
| 23 |
+
|
| 24 |
+
# ── Misc utilities ───────────────────────────────────────────────
|
| 25 |
+
easydict
|
| 26 |
+
|
| 27 |
+
# ── HuggingFace Hub (for auto-downloading model weights) ─────────
|
| 28 |
+
huggingface_hub
|
run_test.py
ADDED
|
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import datetime
|
| 3 |
+
import random
|
| 4 |
+
import time
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
import torchvision.transforms as standard_transforms
|
| 9 |
+
import numpy as np
|
| 10 |
+
|
| 11 |
+
from PIL import Image
|
| 12 |
+
import cv2
|
| 13 |
+
from crowd_datasets import build_dataset
|
| 14 |
+
from engine import *
|
| 15 |
+
from models import build_model
|
| 16 |
+
import os
|
| 17 |
+
import warnings
|
| 18 |
+
warnings.filterwarnings('ignore')
|
| 19 |
+
|
| 20 |
+
def get_args_parser():
|
| 21 |
+
parser = argparse.ArgumentParser('Set parameters for P2PNet evaluation', add_help=False)
|
| 22 |
+
|
| 23 |
+
# * Backbone
|
| 24 |
+
parser.add_argument('--backbone', default='vgg16_bn', type=str,
|
| 25 |
+
help="name of the convolutional backbone to use")
|
| 26 |
+
|
| 27 |
+
parser.add_argument('--row', default=2, type=int,
|
| 28 |
+
help="row number of anchor points")
|
| 29 |
+
parser.add_argument('--line', default=2, type=int,
|
| 30 |
+
help="line number of anchor points")
|
| 31 |
+
|
| 32 |
+
parser.add_argument('--output_dir', default='',
|
| 33 |
+
help='path where to save')
|
| 34 |
+
parser.add_argument('--weight_path', default='',
|
| 35 |
+
help='path where the trained weights saved')
|
| 36 |
+
|
| 37 |
+
parser.add_argument('--gpu_id', default=0, type=int, help='the gpu used for evaluation')
|
| 38 |
+
|
| 39 |
+
return parser
|
| 40 |
+
|
| 41 |
+
def main(args, debug=False):
|
| 42 |
+
|
| 43 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = '{}'.format(args.gpu_id)
|
| 44 |
+
|
| 45 |
+
print(args)
|
| 46 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 47 |
+
# get the P2PNet
|
| 48 |
+
model = build_model(args)
|
| 49 |
+
# move to the selected device
|
| 50 |
+
model.to(device)
|
| 51 |
+
# load trained model
|
| 52 |
+
if args.weight_path is not None:
|
| 53 |
+
checkpoint = torch.load(args.weight_path, map_location='cpu')
|
| 54 |
+
model.load_state_dict(checkpoint['model'])
|
| 55 |
+
# convert to eval mode
|
| 56 |
+
model.eval()
|
| 57 |
+
# create the pre-processing transform
|
| 58 |
+
transform = standard_transforms.Compose([
|
| 59 |
+
standard_transforms.ToTensor(),
|
| 60 |
+
standard_transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
|
| 61 |
+
])
|
| 62 |
+
|
| 63 |
+
# set your image path here
|
| 64 |
+
img_path = "./vis/demo1.jpg"
|
| 65 |
+
# load the images
|
| 66 |
+
img_raw = Image.open(img_path).convert('RGB')
|
| 67 |
+
# round the size
|
| 68 |
+
width, height = img_raw.size
|
| 69 |
+
new_width = width // 128 * 128
|
| 70 |
+
new_height = height // 128 * 128
|
| 71 |
+
resample_filter = getattr(Image, "Resampling", Image).LANCZOS if hasattr(Image, "Resampling") else getattr(Image, "ANTIALIAS", 1)
|
| 72 |
+
img_raw = img_raw.resize((new_width, new_height), resample_filter)
|
| 73 |
+
# pre-proccessing
|
| 74 |
+
img = transform(img_raw)
|
| 75 |
+
|
| 76 |
+
samples = torch.Tensor(img).unsqueeze(0)
|
| 77 |
+
samples = samples.to(device)
|
| 78 |
+
# run inference
|
| 79 |
+
outputs = model(samples)
|
| 80 |
+
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0]
|
| 81 |
+
|
| 82 |
+
outputs_points = outputs['pred_points'][0]
|
| 83 |
+
|
| 84 |
+
threshold = 0.5
|
| 85 |
+
# filter the predictions
|
| 86 |
+
points = outputs_points[outputs_scores > threshold].detach().cpu().numpy().tolist()
|
| 87 |
+
predict_cnt = int((outputs_scores > threshold).sum())
|
| 88 |
+
|
| 89 |
+
outputs_scores = torch.nn.functional.softmax(outputs['pred_logits'], -1)[:, :, 1][0]
|
| 90 |
+
|
| 91 |
+
outputs_points = outputs['pred_points'][0]
|
| 92 |
+
# draw the predictions
|
| 93 |
+
size = 2
|
| 94 |
+
img_to_draw = cv2.cvtColor(np.array(img_raw), cv2.COLOR_RGB2BGR)
|
| 95 |
+
for p in points:
|
| 96 |
+
img_to_draw = cv2.circle(img_to_draw, (int(p[0]), int(p[1])), size, (0, 0, 255), -1)
|
| 97 |
+
# save the visualized image
|
| 98 |
+
cv2.imwrite(os.path.join(args.output_dir, 'pred{}.jpg'.format(predict_cnt)), img_to_draw)
|
| 99 |
+
|
| 100 |
+
if __name__ == '__main__':
|
| 101 |
+
parser = argparse.ArgumentParser('P2PNet evaluation script', parents=[get_args_parser()])
|
| 102 |
+
args = parser.parse_args()
|
| 103 |
+
main(args)
|
test_system.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torch
|
| 6 |
+
|
| 7 |
+
from motion_estimator import GlobalMotionEstimator
|
| 8 |
+
from report_generator import ReportGenerator
|
| 9 |
+
from tracker import Tracker
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
GREEN = "\033[92m"
|
| 13 |
+
RED = "\033[91m"
|
| 14 |
+
RESET = "\033[0m"
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def run_test(name, fn):
|
| 18 |
+
try:
|
| 19 |
+
fn()
|
| 20 |
+
print(f"{GREEN}PASS{RESET} {name}")
|
| 21 |
+
return True
|
| 22 |
+
except Exception as exc:
|
| 23 |
+
print(f"{RED}FAIL{RESET} {name}: {exc}")
|
| 24 |
+
return False
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def test_model_loading():
|
| 28 |
+
from models import build_model
|
| 29 |
+
|
| 30 |
+
class Args:
|
| 31 |
+
backbone = "vgg16_bn"
|
| 32 |
+
row = 2
|
| 33 |
+
line = 2
|
| 34 |
+
|
| 35 |
+
model = build_model(Args())
|
| 36 |
+
assert model is not None
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def test_dummy_inference():
|
| 40 |
+
from models import build_model
|
| 41 |
+
|
| 42 |
+
class Args:
|
| 43 |
+
backbone = "vgg16_bn"
|
| 44 |
+
row = 2
|
| 45 |
+
line = 2
|
| 46 |
+
|
| 47 |
+
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
| 48 |
+
model = build_model(Args()).to(device).eval()
|
| 49 |
+
x = torch.zeros((1, 3, 512, 512), dtype=torch.float32, device=device)
|
| 50 |
+
with torch.inference_mode():
|
| 51 |
+
out = model(x)
|
| 52 |
+
assert "pred_logits" in out and "pred_points" in out
|
| 53 |
+
assert out["pred_logits"].shape[0] == 1
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
def test_tracker_persistence():
|
| 57 |
+
tracker = Tracker(max_distance=20, max_age=3)
|
| 58 |
+
frame = np.zeros((128, 128, 3), dtype=np.uint8)
|
| 59 |
+
first_id = None
|
| 60 |
+
for i in range(30):
|
| 61 |
+
tracks, total, anomaly = tracker.update(frame, [[50 + i * 0.2, 50 + i * 0.1]])
|
| 62 |
+
assert tracks
|
| 63 |
+
if first_id is None:
|
| 64 |
+
first_id = tracks[0].id
|
| 65 |
+
assert tracks[0].id == first_id
|
| 66 |
+
assert anomaly is False
|
| 67 |
+
assert total == 1
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def test_motion_compensation():
|
| 71 |
+
estimator = GlobalMotionEstimator()
|
| 72 |
+
img1 = np.zeros((200, 200, 3), dtype=np.uint8)
|
| 73 |
+
cv2.circle(img1, (80, 80), 10, (255, 255, 255), -1)
|
| 74 |
+
cv2.rectangle(img1, (120, 120), (150, 150), (255, 255, 255), -1)
|
| 75 |
+
img2 = np.roll(img1, shift=5, axis=1)
|
| 76 |
+
t1 = estimator.update(img1)
|
| 77 |
+
t2 = estimator.update(img2)
|
| 78 |
+
assert t1.shape == (2, 3)
|
| 79 |
+
assert t2.shape == (2, 3)
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def test_alert_thresholds():
|
| 83 |
+
for current, threshold in [(50, 100), (75, 100), (90, 100), (100, 100)]:
|
| 84 |
+
ratio = current / threshold
|
| 85 |
+
assert ratio >= 0
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def test_report_exports():
|
| 89 |
+
report = ReportGenerator()
|
| 90 |
+
report.add_frame_data(1, 0.03, 10, 11)
|
| 91 |
+
csv_data = report.get_csv()
|
| 92 |
+
json_data = report.get_json()
|
| 93 |
+
assert b"frame_number" in csv_data
|
| 94 |
+
parsed = json.loads(json_data.decode("utf-8"))
|
| 95 |
+
assert parsed["timeline"][0]["frame_count"] == 10
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
def main():
|
| 99 |
+
tests = [
|
| 100 |
+
("model loading", test_model_loading),
|
| 101 |
+
("dummy image inference", test_dummy_inference),
|
| 102 |
+
("tracker ID persistence across 30 frames", test_tracker_persistence),
|
| 103 |
+
("motion compensation transform", test_motion_compensation),
|
| 104 |
+
("alert thresholds at 50/75/90/100%", test_alert_thresholds),
|
| 105 |
+
("CSV/JSON export", test_report_exports),
|
| 106 |
+
]
|
| 107 |
+
results = [run_test(name, fn) for name, fn in tests]
|
| 108 |
+
passed = sum(results)
|
| 109 |
+
print(f"{passed}/{len(results)} tests passed")
|
| 110 |
+
raise SystemExit(0 if passed == len(results) else 1)
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
if __name__ == "__main__":
|
| 114 |
+
main()
|
tracker.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import numpy as np
|
| 2 |
+
import cv2
|
| 3 |
+
from scipy.optimize import linear_sum_assignment
|
| 4 |
+
from motion_estimator import GlobalMotionEstimator
|
| 5 |
+
|
| 6 |
+
class PointTrack:
|
| 7 |
+
def __init__(self, pt, track_id):
|
| 8 |
+
self.id = track_id
|
| 9 |
+
self.pt = np.array(pt, dtype=np.float32)
|
| 10 |
+
self.velocity = 0.0
|
| 11 |
+
self.time_since_update = 0
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Tracker:
|
| 15 |
+
def __init__(self, max_distance=50.0, max_age=5):
|
| 16 |
+
self.max_distance = max_distance
|
| 17 |
+
self.max_age = max_age
|
| 18 |
+
self.tracks = []
|
| 19 |
+
self.next_id = 1
|
| 20 |
+
self.motion_estimator = GlobalMotionEstimator()
|
| 21 |
+
|
| 22 |
+
def apply_motion_compensation(self, transform):
|
| 23 |
+
if transform is None or len(self.tracks) == 0:
|
| 24 |
+
return
|
| 25 |
+
# transform: 2x3 matrix
|
| 26 |
+
pts = np.array([t.pt for t in self.tracks], dtype=np.float32).reshape(-1, 1, 2)
|
| 27 |
+
pts_transformed = cv2.transform(pts, transform).reshape(-1, 2)
|
| 28 |
+
for i, t in enumerate(self.tracks):
|
| 29 |
+
t.pt = pts_transformed[i]
|
| 30 |
+
|
| 31 |
+
def update(self, frame_bgr, detected_points):
|
| 32 |
+
"""
|
| 33 |
+
Updates standard tracking variables and detects chaos.
|
| 34 |
+
|
| 35 |
+
Returns:
|
| 36 |
+
tuple[list[PointTrack], int, bool]: active tracks, cumulative unique count, and anomaly flag.
|
| 37 |
+
"""
|
| 38 |
+
# 1. Global motion compensation via Drone drift
|
| 39 |
+
transform = self.motion_estimator.update(frame_bgr)
|
| 40 |
+
self.apply_motion_compensation(transform)
|
| 41 |
+
|
| 42 |
+
# 2. Increment age for all
|
| 43 |
+
for t in self.tracks:
|
| 44 |
+
t.time_since_update += 1
|
| 45 |
+
|
| 46 |
+
detected_points = np.array(detected_points, dtype=np.float32)
|
| 47 |
+
|
| 48 |
+
if len(self.tracks) == 0:
|
| 49 |
+
# First initialization
|
| 50 |
+
for pt in detected_points:
|
| 51 |
+
self.tracks.append(PointTrack(pt, self.next_id))
|
| 52 |
+
self.next_id += 1
|
| 53 |
+
return self.tracks.copy(), self.next_id - 1, False
|
| 54 |
+
|
| 55 |
+
if len(detected_points) == 0:
|
| 56 |
+
# No points detected, clear out old ones based on constraint
|
| 57 |
+
self.tracks = [t for t in self.tracks if t.time_since_update <= self.max_age]
|
| 58 |
+
return self.tracks.copy(), self.next_id - 1, False
|
| 59 |
+
|
| 60 |
+
# 3. Hungarian matching assignment optimally pairing tracked to current
|
| 61 |
+
track_pts = np.array([t.pt for t in self.tracks], dtype=np.float32)
|
| 62 |
+
|
| 63 |
+
# Cost matrix: NxM distance mapping
|
| 64 |
+
diff = track_pts[:, np.newaxis, :] - detected_points[np.newaxis, :, :]
|
| 65 |
+
dist_matrix = np.sqrt(np.sum(diff**2, axis=2))
|
| 66 |
+
|
| 67 |
+
# Optimization resolution
|
| 68 |
+
row_ind, col_ind = linear_sum_assignment(dist_matrix)
|
| 69 |
+
|
| 70 |
+
assigned_tracks = set()
|
| 71 |
+
assigned_detections = set()
|
| 72 |
+
|
| 73 |
+
for r, c in zip(row_ind, col_ind):
|
| 74 |
+
if dist_matrix[r, c] <= self.max_distance:
|
| 75 |
+
# Update velocity (distance from last position)
|
| 76 |
+
self.tracks[r].velocity = dist_matrix[r, c]
|
| 77 |
+
self.tracks[r].pt = detected_points[c]
|
| 78 |
+
self.tracks[r].time_since_update = 0
|
| 79 |
+
assigned_tracks.add(r)
|
| 80 |
+
assigned_detections.add(c)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
# 4. Handle unassigned fresh detections
|
| 84 |
+
for i, pt in enumerate(detected_points):
|
| 85 |
+
if i not in assigned_detections:
|
| 86 |
+
self.tracks.append(PointTrack(pt, self.next_id))
|
| 87 |
+
self.next_id += 1
|
| 88 |
+
|
| 89 |
+
# 5. Remove permanently lost/dead tracks
|
| 90 |
+
self.tracks = [t for t in self.tracks if t.time_since_update <= self.max_age]
|
| 91 |
+
|
| 92 |
+
# 6. Chaos detection criteria: if > 5 tracks are moving anomalously rapidly
|
| 93 |
+
chaotic_count = sum(1 for t in self.tracks if t.velocity > self.max_distance * 0.7 and t.time_since_update == 0)
|
| 94 |
+
anomaly = chaotic_count >= 5
|
| 95 |
+
|
| 96 |
+
return self.tracks.copy(), self.next_id - 1, anomaly
|
train.py
ADDED
|
@@ -0,0 +1,229 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import datetime
|
| 3 |
+
import random
|
| 4 |
+
import time
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
import torch
|
| 8 |
+
from torch.utils.data import DataLoader, DistributedSampler
|
| 9 |
+
|
| 10 |
+
from crowd_datasets import build_dataset
|
| 11 |
+
from engine import *
|
| 12 |
+
from models import build_model
|
| 13 |
+
import os
|
| 14 |
+
from tensorboardX import SummaryWriter
|
| 15 |
+
import warnings
|
| 16 |
+
warnings.filterwarnings('ignore')
|
| 17 |
+
|
| 18 |
+
def get_args_parser():
|
| 19 |
+
parser = argparse.ArgumentParser('Set parameters for training P2PNet', add_help=False)
|
| 20 |
+
parser.add_argument('--lr', default=1e-4, type=float)
|
| 21 |
+
parser.add_argument('--lr_backbone', default=1e-5, type=float)
|
| 22 |
+
parser.add_argument('--batch_size', default=8, type=int)
|
| 23 |
+
parser.add_argument('--weight_decay', default=1e-4, type=float)
|
| 24 |
+
parser.add_argument('--epochs', default=3500, type=int)
|
| 25 |
+
parser.add_argument('--lr_drop', default=3500, type=int)
|
| 26 |
+
parser.add_argument('--clip_max_norm', default=0.1, type=float,
|
| 27 |
+
help='gradient clipping max norm')
|
| 28 |
+
|
| 29 |
+
# Model parameters
|
| 30 |
+
parser.add_argument('--frozen_weights', type=str, default=None,
|
| 31 |
+
help="Path to the pretrained model. If set, only the mask head will be trained")
|
| 32 |
+
|
| 33 |
+
# * Backbone
|
| 34 |
+
parser.add_argument('--backbone', default='vgg16_bn', type=str,
|
| 35 |
+
help="Name of the convolutional backbone to use")
|
| 36 |
+
|
| 37 |
+
# * Matcher
|
| 38 |
+
parser.add_argument('--set_cost_class', default=1, type=float,
|
| 39 |
+
help="Class coefficient in the matching cost")
|
| 40 |
+
|
| 41 |
+
parser.add_argument('--set_cost_point', default=0.05, type=float,
|
| 42 |
+
help="L1 point coefficient in the matching cost")
|
| 43 |
+
|
| 44 |
+
# * Loss coefficients
|
| 45 |
+
parser.add_argument('--point_loss_coef', default=0.0002, type=float)
|
| 46 |
+
|
| 47 |
+
parser.add_argument('--eos_coef', default=0.5, type=float,
|
| 48 |
+
help="Relative classification weight of the no-object class")
|
| 49 |
+
parser.add_argument('--row', default=2, type=int,
|
| 50 |
+
help="row number of anchor points")
|
| 51 |
+
parser.add_argument('--line', default=2, type=int,
|
| 52 |
+
help="line number of anchor points")
|
| 53 |
+
|
| 54 |
+
# dataset parameters
|
| 55 |
+
parser.add_argument('--dataset_file', default='SHHA',
|
| 56 |
+
help='Dataset loader to use: SHHA')
|
| 57 |
+
parser.add_argument('--data_root', default='./new_public_density_data',
|
| 58 |
+
help='path where the dataset is')
|
| 59 |
+
|
| 60 |
+
parser.add_argument('--output_dir', default='./log',
|
| 61 |
+
help='path where to save, empty for no saving')
|
| 62 |
+
parser.add_argument('--checkpoints_dir', default='./ckpt',
|
| 63 |
+
help='path where to save checkpoints, empty for no saving')
|
| 64 |
+
parser.add_argument('--tensorboard_dir', default='./runs',
|
| 65 |
+
help='path where to save, empty for no saving')
|
| 66 |
+
|
| 67 |
+
parser.add_argument('--seed', default=42, type=int)
|
| 68 |
+
parser.add_argument('--resume', default='', help='resume from checkpoint')
|
| 69 |
+
parser.add_argument('--start_epoch', default=0, type=int, metavar='N',
|
| 70 |
+
help='start epoch')
|
| 71 |
+
parser.add_argument('--eval', action='store_true')
|
| 72 |
+
parser.add_argument('--num_workers', default=8, type=int)
|
| 73 |
+
parser.add_argument('--eval_freq', default=5, type=int,
|
| 74 |
+
help='frequency of evaluation, default setting is evaluating in every 5 epoch')
|
| 75 |
+
parser.add_argument('--gpu_id', default=0, type=int, help='the gpu used for training')
|
| 76 |
+
|
| 77 |
+
return parser
|
| 78 |
+
|
| 79 |
+
def main(args):
|
| 80 |
+
os.environ["CUDA_VISIBLE_DEVICES"] = '{}'.format(args.gpu_id)
|
| 81 |
+
if args.output_dir:
|
| 82 |
+
os.makedirs(args.output_dir, exist_ok=True)
|
| 83 |
+
if args.checkpoints_dir:
|
| 84 |
+
os.makedirs(args.checkpoints_dir, exist_ok=True)
|
| 85 |
+
if args.tensorboard_dir:
|
| 86 |
+
os.makedirs(args.tensorboard_dir, exist_ok=True)
|
| 87 |
+
# create the logging file
|
| 88 |
+
run_log_name = os.path.join(args.output_dir, 'run_log.txt')
|
| 89 |
+
with open(run_log_name, "w") as log_file:
|
| 90 |
+
log_file.write('Eval Log %s\n' % time.strftime("%c"))
|
| 91 |
+
|
| 92 |
+
if args.frozen_weights is not None:
|
| 93 |
+
assert args.masks, "Frozen training is meant for segmentation only"
|
| 94 |
+
# backup the arguments
|
| 95 |
+
print(args)
|
| 96 |
+
with open(run_log_name, "a") as log_file:
|
| 97 |
+
log_file.write("{}".format(args))
|
| 98 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 99 |
+
# fix the seed for reproducibility
|
| 100 |
+
seed = args.seed + utils.get_rank()
|
| 101 |
+
torch.manual_seed(seed)
|
| 102 |
+
np.random.seed(seed)
|
| 103 |
+
random.seed(seed)
|
| 104 |
+
# get the P2PNet model
|
| 105 |
+
model, criterion = build_model(args, training=True)
|
| 106 |
+
# move to the selected device
|
| 107 |
+
model.to(device)
|
| 108 |
+
criterion.to(device)
|
| 109 |
+
|
| 110 |
+
model_without_ddp = model
|
| 111 |
+
|
| 112 |
+
n_parameters = sum(p.numel() for p in model.parameters() if p.requires_grad)
|
| 113 |
+
print('number of params:', n_parameters)
|
| 114 |
+
# use different optimation params for different parts of the model
|
| 115 |
+
param_dicts = [
|
| 116 |
+
{"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" not in n and p.requires_grad]},
|
| 117 |
+
{
|
| 118 |
+
"params": [p for n, p in model_without_ddp.named_parameters() if "backbone" in n and p.requires_grad],
|
| 119 |
+
"lr": args.lr_backbone,
|
| 120 |
+
},
|
| 121 |
+
]
|
| 122 |
+
# Adam is used by default
|
| 123 |
+
optimizer = torch.optim.Adam(param_dicts, lr=args.lr)
|
| 124 |
+
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)
|
| 125 |
+
# create the dataset
|
| 126 |
+
loading_data = build_dataset(args=args)
|
| 127 |
+
# create the training and valiation set
|
| 128 |
+
train_set, val_set = loading_data(args.data_root)
|
| 129 |
+
# create the sampler used during training
|
| 130 |
+
sampler_train = torch.utils.data.RandomSampler(train_set)
|
| 131 |
+
sampler_val = torch.utils.data.SequentialSampler(val_set)
|
| 132 |
+
|
| 133 |
+
batch_sampler_train = torch.utils.data.BatchSampler(
|
| 134 |
+
sampler_train, args.batch_size, drop_last=True)
|
| 135 |
+
# the dataloader for training
|
| 136 |
+
data_loader_train = DataLoader(train_set, batch_sampler=batch_sampler_train,
|
| 137 |
+
collate_fn=utils.collate_fn_crowd, num_workers=args.num_workers)
|
| 138 |
+
|
| 139 |
+
data_loader_val = DataLoader(val_set, 1, sampler=sampler_val,
|
| 140 |
+
drop_last=False, collate_fn=utils.collate_fn_crowd, num_workers=args.num_workers)
|
| 141 |
+
|
| 142 |
+
if args.frozen_weights is not None:
|
| 143 |
+
checkpoint = torch.load(args.frozen_weights, map_location='cpu')
|
| 144 |
+
model_without_ddp.detr.load_state_dict(checkpoint['model'])
|
| 145 |
+
# resume the weights and training state if exists
|
| 146 |
+
if args.resume:
|
| 147 |
+
checkpoint = torch.load(args.resume, map_location='cpu')
|
| 148 |
+
model_without_ddp.load_state_dict(checkpoint['model'])
|
| 149 |
+
if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
|
| 150 |
+
optimizer.load_state_dict(checkpoint['optimizer'])
|
| 151 |
+
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
|
| 152 |
+
args.start_epoch = checkpoint['epoch'] + 1
|
| 153 |
+
|
| 154 |
+
print("Start training")
|
| 155 |
+
start_time = time.time()
|
| 156 |
+
# save the performance during the training
|
| 157 |
+
mae = []
|
| 158 |
+
mse = []
|
| 159 |
+
# the logger writer
|
| 160 |
+
writer = SummaryWriter(args.tensorboard_dir)
|
| 161 |
+
|
| 162 |
+
step = 0
|
| 163 |
+
# training starts here
|
| 164 |
+
for epoch in range(args.start_epoch, args.epochs):
|
| 165 |
+
t1 = time.time()
|
| 166 |
+
stat = train_one_epoch(
|
| 167 |
+
model, criterion, data_loader_train, optimizer, device, epoch,
|
| 168 |
+
args.clip_max_norm)
|
| 169 |
+
|
| 170 |
+
# record the training states after every epoch
|
| 171 |
+
if writer is not None:
|
| 172 |
+
with open(run_log_name, "a") as log_file:
|
| 173 |
+
log_file.write("loss/loss@{}: {}".format(epoch, stat['loss']))
|
| 174 |
+
log_file.write("loss/loss_ce@{}: {}".format(epoch, stat['loss_ce']))
|
| 175 |
+
|
| 176 |
+
writer.add_scalar('loss/loss', stat['loss'], epoch)
|
| 177 |
+
writer.add_scalar('loss/loss_ce', stat['loss_ce'], epoch)
|
| 178 |
+
|
| 179 |
+
t2 = time.time()
|
| 180 |
+
print('[ep %d][lr %.7f][%.2fs]' % \
|
| 181 |
+
(epoch, optimizer.param_groups[0]['lr'], t2 - t1))
|
| 182 |
+
with open(run_log_name, "a") as log_file:
|
| 183 |
+
log_file.write('[ep %d][lr %.7f][%.2fs]' % (epoch, optimizer.param_groups[0]['lr'], t2 - t1))
|
| 184 |
+
# change lr according to the scheduler
|
| 185 |
+
lr_scheduler.step()
|
| 186 |
+
# save latest weights every epoch
|
| 187 |
+
checkpoint_latest_path = os.path.join(args.checkpoints_dir, 'latest.pth')
|
| 188 |
+
torch.save({
|
| 189 |
+
'model': model_without_ddp.state_dict(),
|
| 190 |
+
}, checkpoint_latest_path)
|
| 191 |
+
# run evaluation
|
| 192 |
+
if epoch % args.eval_freq == 0 and epoch != 0:
|
| 193 |
+
t1 = time.time()
|
| 194 |
+
result = evaluate_crowd_no_overlap(model, data_loader_val, device)
|
| 195 |
+
t2 = time.time()
|
| 196 |
+
|
| 197 |
+
mae.append(result[0])
|
| 198 |
+
mse.append(result[1])
|
| 199 |
+
# print the evaluation results
|
| 200 |
+
print('=======================================test=======================================')
|
| 201 |
+
print("mae:", result[0], "mse:", result[1], "time:", t2 - t1, "best mae:", np.min(mae), )
|
| 202 |
+
with open(run_log_name, "a") as log_file:
|
| 203 |
+
log_file.write("mae:{}, mse:{}, time:{}, best mae:{}".format(result[0],
|
| 204 |
+
result[1], t2 - t1, np.min(mae)))
|
| 205 |
+
print('=======================================test=======================================')
|
| 206 |
+
# recored the evaluation results
|
| 207 |
+
if writer is not None:
|
| 208 |
+
with open(run_log_name, "a") as log_file:
|
| 209 |
+
log_file.write("metric/mae@{}: {}".format(step, result[0]))
|
| 210 |
+
log_file.write("metric/mse@{}: {}".format(step, result[1]))
|
| 211 |
+
writer.add_scalar('metric/mae', result[0], step)
|
| 212 |
+
writer.add_scalar('metric/mse', result[1], step)
|
| 213 |
+
step += 1
|
| 214 |
+
|
| 215 |
+
# save the best model since begining
|
| 216 |
+
if abs(np.min(mae) - result[0]) < 0.01:
|
| 217 |
+
checkpoint_best_path = os.path.join(args.checkpoints_dir, 'best_mae.pth')
|
| 218 |
+
torch.save({
|
| 219 |
+
'model': model_without_ddp.state_dict(),
|
| 220 |
+
}, checkpoint_best_path)
|
| 221 |
+
# total time for training
|
| 222 |
+
total_time = time.time() - start_time
|
| 223 |
+
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
| 224 |
+
print('Training time {}'.format(total_time_str))
|
| 225 |
+
|
| 226 |
+
if __name__ == '__main__':
|
| 227 |
+
parser = argparse.ArgumentParser('P2PNet training and evaluation script', parents=[get_args_parser()])
|
| 228 |
+
args = parser.parse_args()
|
| 229 |
+
main(args)
|
util/__init__.py
ADDED
|
File without changes
|
util/misc.py
ADDED
|
@@ -0,0 +1,506 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
| 2 |
+
"""
|
| 3 |
+
Misc functions, including distributed helpers.
|
| 4 |
+
|
| 5 |
+
Mostly copy-paste from torchvision references.
|
| 6 |
+
"""
|
| 7 |
+
import os
|
| 8 |
+
import subprocess
|
| 9 |
+
import time
|
| 10 |
+
from collections import defaultdict, deque
|
| 11 |
+
import datetime
|
| 12 |
+
import pickle
|
| 13 |
+
from typing import Optional, List
|
| 14 |
+
|
| 15 |
+
import torch
|
| 16 |
+
import torch.distributed as dist
|
| 17 |
+
from torch import Tensor
|
| 18 |
+
|
| 19 |
+
import torch.nn as nn
|
| 20 |
+
import torch.nn.functional as F
|
| 21 |
+
from torch.autograd import Variable
|
| 22 |
+
|
| 23 |
+
import torchvision
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class SmoothedValue(object):
|
| 27 |
+
"""Track a series of values and provide access to smoothed values over a
|
| 28 |
+
window or the global series average.
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
def __init__(self, window_size=20, fmt=None):
|
| 32 |
+
if fmt is None:
|
| 33 |
+
fmt = "{median:.4f} ({global_avg:.4f})"
|
| 34 |
+
self.deque = deque(maxlen=window_size)
|
| 35 |
+
self.total = 0.0
|
| 36 |
+
self.count = 0
|
| 37 |
+
self.fmt = fmt
|
| 38 |
+
|
| 39 |
+
def update(self, value, n=1):
|
| 40 |
+
self.deque.append(value)
|
| 41 |
+
self.count += n
|
| 42 |
+
self.total += value * n
|
| 43 |
+
|
| 44 |
+
def synchronize_between_processes(self):
|
| 45 |
+
"""
|
| 46 |
+
Warning: does not synchronize the deque!
|
| 47 |
+
"""
|
| 48 |
+
if not is_dist_avail_and_initialized():
|
| 49 |
+
return
|
| 50 |
+
t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda')
|
| 51 |
+
dist.barrier()
|
| 52 |
+
dist.all_reduce(t)
|
| 53 |
+
t = t.tolist()
|
| 54 |
+
self.count = int(t[0])
|
| 55 |
+
self.total = t[1]
|
| 56 |
+
|
| 57 |
+
@property
|
| 58 |
+
def median(self):
|
| 59 |
+
d = torch.tensor(list(self.deque))
|
| 60 |
+
return d.median().item()
|
| 61 |
+
|
| 62 |
+
@property
|
| 63 |
+
def avg(self):
|
| 64 |
+
d = torch.tensor(list(self.deque), dtype=torch.float32)
|
| 65 |
+
return d.mean().item()
|
| 66 |
+
|
| 67 |
+
@property
|
| 68 |
+
def global_avg(self):
|
| 69 |
+
return self.total / self.count
|
| 70 |
+
|
| 71 |
+
@property
|
| 72 |
+
def max(self):
|
| 73 |
+
return max(self.deque)
|
| 74 |
+
|
| 75 |
+
@property
|
| 76 |
+
def value(self):
|
| 77 |
+
return self.deque[-1]
|
| 78 |
+
|
| 79 |
+
def __str__(self):
|
| 80 |
+
return self.fmt.format(
|
| 81 |
+
median=self.median,
|
| 82 |
+
avg=self.avg,
|
| 83 |
+
global_avg=self.global_avg,
|
| 84 |
+
max=self.max,
|
| 85 |
+
value=self.value)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def all_gather(data):
|
| 89 |
+
"""
|
| 90 |
+
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
| 91 |
+
Args:
|
| 92 |
+
data: any picklable object
|
| 93 |
+
Returns:
|
| 94 |
+
list[data]: list of data gathered from each rank
|
| 95 |
+
"""
|
| 96 |
+
world_size = get_world_size()
|
| 97 |
+
if world_size == 1:
|
| 98 |
+
return [data]
|
| 99 |
+
|
| 100 |
+
# serialized to a Tensor
|
| 101 |
+
buffer = pickle.dumps(data)
|
| 102 |
+
storage = torch.ByteStorage.from_buffer(buffer)
|
| 103 |
+
tensor = torch.ByteTensor(storage).to("cuda")
|
| 104 |
+
|
| 105 |
+
# obtain Tensor size of each rank
|
| 106 |
+
local_size = torch.tensor([tensor.numel()], device="cuda")
|
| 107 |
+
size_list = [torch.tensor([0], device="cuda") for _ in range(world_size)]
|
| 108 |
+
dist.all_gather(size_list, local_size)
|
| 109 |
+
size_list = [int(size.item()) for size in size_list]
|
| 110 |
+
max_size = max(size_list)
|
| 111 |
+
|
| 112 |
+
# receiving Tensor from all ranks
|
| 113 |
+
# we pad the tensor because torch all_gather does not support
|
| 114 |
+
# gathering tensors of different shapes
|
| 115 |
+
tensor_list = []
|
| 116 |
+
for _ in size_list:
|
| 117 |
+
tensor_list.append(torch.empty((max_size,), dtype=torch.uint8, device="cuda"))
|
| 118 |
+
if local_size != max_size:
|
| 119 |
+
padding = torch.empty(size=(max_size - local_size,), dtype=torch.uint8, device="cuda")
|
| 120 |
+
tensor = torch.cat((tensor, padding), dim=0)
|
| 121 |
+
dist.all_gather(tensor_list, tensor)
|
| 122 |
+
|
| 123 |
+
data_list = []
|
| 124 |
+
for size, tensor in zip(size_list, tensor_list):
|
| 125 |
+
buffer = tensor.cpu().numpy().tobytes()[:size]
|
| 126 |
+
data_list.append(pickle.loads(buffer))
|
| 127 |
+
|
| 128 |
+
return data_list
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
def reduce_dict(input_dict, average=True):
|
| 132 |
+
"""
|
| 133 |
+
Args:
|
| 134 |
+
input_dict (dict): all the values will be reduced
|
| 135 |
+
average (bool): whether to do average or sum
|
| 136 |
+
Reduce the values in the dictionary from all processes so that all processes
|
| 137 |
+
have the averaged results. Returns a dict with the same fields as
|
| 138 |
+
input_dict, after reduction.
|
| 139 |
+
"""
|
| 140 |
+
world_size = get_world_size()
|
| 141 |
+
if world_size < 2:
|
| 142 |
+
return input_dict
|
| 143 |
+
with torch.no_grad():
|
| 144 |
+
names = []
|
| 145 |
+
values = []
|
| 146 |
+
# sort the keys so that they are consistent across processes
|
| 147 |
+
for k in sorted(input_dict.keys()):
|
| 148 |
+
names.append(k)
|
| 149 |
+
values.append(input_dict[k])
|
| 150 |
+
values = torch.stack(values, dim=0)
|
| 151 |
+
dist.all_reduce(values)
|
| 152 |
+
if average:
|
| 153 |
+
values /= world_size
|
| 154 |
+
reduced_dict = {k: v for k, v in zip(names, values)}
|
| 155 |
+
return reduced_dict
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class MetricLogger(object):
|
| 159 |
+
def __init__(self, delimiter="\t"):
|
| 160 |
+
self.meters = defaultdict(SmoothedValue)
|
| 161 |
+
self.delimiter = delimiter
|
| 162 |
+
|
| 163 |
+
def update(self, **kwargs):
|
| 164 |
+
for k, v in kwargs.items():
|
| 165 |
+
if isinstance(v, torch.Tensor):
|
| 166 |
+
v = v.item()
|
| 167 |
+
assert isinstance(v, (float, int))
|
| 168 |
+
self.meters[k].update(v)
|
| 169 |
+
|
| 170 |
+
def __getattr__(self, attr):
|
| 171 |
+
if attr in self.meters:
|
| 172 |
+
return self.meters[attr]
|
| 173 |
+
if attr in self.__dict__:
|
| 174 |
+
return self.__dict__[attr]
|
| 175 |
+
raise AttributeError("'{}' object has no attribute '{}'".format(
|
| 176 |
+
type(self).__name__, attr))
|
| 177 |
+
|
| 178 |
+
def __str__(self):
|
| 179 |
+
loss_str = []
|
| 180 |
+
for name, meter in self.meters.items():
|
| 181 |
+
loss_str.append(
|
| 182 |
+
"{}: {}".format(name, str(meter))
|
| 183 |
+
)
|
| 184 |
+
return self.delimiter.join(loss_str)
|
| 185 |
+
|
| 186 |
+
def synchronize_between_processes(self):
|
| 187 |
+
for meter in self.meters.values():
|
| 188 |
+
meter.synchronize_between_processes()
|
| 189 |
+
|
| 190 |
+
def add_meter(self, name, meter):
|
| 191 |
+
self.meters[name] = meter
|
| 192 |
+
|
| 193 |
+
def log_every(self, iterable, print_freq, header=None):
|
| 194 |
+
i = 0
|
| 195 |
+
if not header:
|
| 196 |
+
header = ''
|
| 197 |
+
start_time = time.time()
|
| 198 |
+
end = time.time()
|
| 199 |
+
iter_time = SmoothedValue(fmt='{avg:.4f}')
|
| 200 |
+
data_time = SmoothedValue(fmt='{avg:.4f}')
|
| 201 |
+
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
|
| 202 |
+
if torch.cuda.is_available():
|
| 203 |
+
log_msg = self.delimiter.join([
|
| 204 |
+
header,
|
| 205 |
+
'[{0' + space_fmt + '}/{1}]',
|
| 206 |
+
'eta: {eta}',
|
| 207 |
+
'{meters}',
|
| 208 |
+
'time: {time}',
|
| 209 |
+
'data: {data}',
|
| 210 |
+
'max mem: {memory:.0f}'
|
| 211 |
+
])
|
| 212 |
+
else:
|
| 213 |
+
log_msg = self.delimiter.join([
|
| 214 |
+
header,
|
| 215 |
+
'[{0' + space_fmt + '}/{1}]',
|
| 216 |
+
'eta: {eta}',
|
| 217 |
+
'{meters}',
|
| 218 |
+
'time: {time}',
|
| 219 |
+
'data: {data}'
|
| 220 |
+
])
|
| 221 |
+
MB = 1024.0 * 1024.0
|
| 222 |
+
for obj in iterable:
|
| 223 |
+
data_time.update(time.time() - end)
|
| 224 |
+
yield obj
|
| 225 |
+
iter_time.update(time.time() - end)
|
| 226 |
+
if i % print_freq == 0 or i == len(iterable) - 1:
|
| 227 |
+
eta_seconds = iter_time.global_avg * (len(iterable) - i)
|
| 228 |
+
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
|
| 229 |
+
if torch.cuda.is_available():
|
| 230 |
+
print(log_msg.format(
|
| 231 |
+
i, len(iterable), eta=eta_string,
|
| 232 |
+
meters=str(self),
|
| 233 |
+
time=str(iter_time), data=str(data_time),
|
| 234 |
+
memory=torch.cuda.max_memory_allocated() / MB))
|
| 235 |
+
else:
|
| 236 |
+
print(log_msg.format(
|
| 237 |
+
i, len(iterable), eta=eta_string,
|
| 238 |
+
meters=str(self),
|
| 239 |
+
time=str(iter_time), data=str(data_time)))
|
| 240 |
+
i += 1
|
| 241 |
+
end = time.time()
|
| 242 |
+
total_time = time.time() - start_time
|
| 243 |
+
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
|
| 244 |
+
print('{} Total time: {} ({:.4f} s / it)'.format(
|
| 245 |
+
header, total_time_str, total_time / len(iterable)))
|
| 246 |
+
|
| 247 |
+
|
| 248 |
+
def get_sha():
|
| 249 |
+
cwd = os.path.dirname(os.path.abspath(__file__))
|
| 250 |
+
|
| 251 |
+
def _run(command):
|
| 252 |
+
return subprocess.check_output(command, cwd=cwd).decode('ascii').strip()
|
| 253 |
+
sha = 'N/A'
|
| 254 |
+
diff = "clean"
|
| 255 |
+
branch = 'N/A'
|
| 256 |
+
try:
|
| 257 |
+
sha = _run(['git', 'rev-parse', 'HEAD'])
|
| 258 |
+
subprocess.check_output(['git', 'diff'], cwd=cwd)
|
| 259 |
+
diff = _run(['git', 'diff-index', 'HEAD'])
|
| 260 |
+
diff = "has uncommited changes" if diff else "clean"
|
| 261 |
+
branch = _run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
|
| 262 |
+
except Exception:
|
| 263 |
+
pass
|
| 264 |
+
message = f"sha: {sha}, status: {diff}, branch: {branch}"
|
| 265 |
+
return message
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def collate_fn(batch):
|
| 269 |
+
batch = list(zip(*batch))
|
| 270 |
+
batch[0] = nested_tensor_from_tensor_list(batch[0])
|
| 271 |
+
return tuple(batch)
|
| 272 |
+
|
| 273 |
+
def collate_fn_crowd(batch):
|
| 274 |
+
# re-organize the batch
|
| 275 |
+
batch_new = []
|
| 276 |
+
for b in batch:
|
| 277 |
+
imgs, points = b
|
| 278 |
+
if imgs.ndim == 3:
|
| 279 |
+
imgs = imgs.unsqueeze(0)
|
| 280 |
+
for i in range(len(imgs)):
|
| 281 |
+
batch_new.append((imgs[i, :, :, :], points[i]))
|
| 282 |
+
batch = batch_new
|
| 283 |
+
batch = list(zip(*batch))
|
| 284 |
+
batch[0] = nested_tensor_from_tensor_list(batch[0])
|
| 285 |
+
return tuple(batch)
|
| 286 |
+
|
| 287 |
+
|
| 288 |
+
def _max_by_axis(the_list):
|
| 289 |
+
# type: (List[List[int]]) -> List[int]
|
| 290 |
+
maxes = the_list[0]
|
| 291 |
+
for sublist in the_list[1:]:
|
| 292 |
+
for index, item in enumerate(sublist):
|
| 293 |
+
maxes[index] = max(maxes[index], item)
|
| 294 |
+
return maxes
|
| 295 |
+
|
| 296 |
+
def _max_by_axis_pad(the_list):
|
| 297 |
+
# type: (List[List[int]]) -> List[int]
|
| 298 |
+
maxes = the_list[0]
|
| 299 |
+
for sublist in the_list[1:]:
|
| 300 |
+
for index, item in enumerate(sublist):
|
| 301 |
+
maxes[index] = max(maxes[index], item)
|
| 302 |
+
|
| 303 |
+
block = 128
|
| 304 |
+
|
| 305 |
+
for i in range(2):
|
| 306 |
+
maxes[i+1] = ((maxes[i+1] - 1) // block + 1) * block
|
| 307 |
+
return maxes
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
|
| 311 |
+
# TODO make this more general
|
| 312 |
+
if tensor_list[0].ndim == 3:
|
| 313 |
+
|
| 314 |
+
# TODO make it support different-sized images
|
| 315 |
+
max_size = _max_by_axis_pad([list(img.shape) for img in tensor_list])
|
| 316 |
+
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
|
| 317 |
+
batch_shape = [len(tensor_list)] + max_size
|
| 318 |
+
b, c, h, w = batch_shape
|
| 319 |
+
dtype = tensor_list[0].dtype
|
| 320 |
+
device = tensor_list[0].device
|
| 321 |
+
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
|
| 322 |
+
for img, pad_img in zip(tensor_list, tensor):
|
| 323 |
+
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
| 324 |
+
else:
|
| 325 |
+
raise ValueError('not supported')
|
| 326 |
+
return tensor
|
| 327 |
+
|
| 328 |
+
class NestedTensor(object):
|
| 329 |
+
def __init__(self, tensors, mask: Optional[Tensor]):
|
| 330 |
+
self.tensors = tensors
|
| 331 |
+
self.mask = mask
|
| 332 |
+
|
| 333 |
+
def to(self, device):
|
| 334 |
+
# type: (Device) -> NestedTensor # noqa
|
| 335 |
+
cast_tensor = self.tensors.to(device)
|
| 336 |
+
mask = self.mask
|
| 337 |
+
if mask is not None:
|
| 338 |
+
assert mask is not None
|
| 339 |
+
cast_mask = mask.to(device)
|
| 340 |
+
else:
|
| 341 |
+
cast_mask = None
|
| 342 |
+
return NestedTensor(cast_tensor, cast_mask)
|
| 343 |
+
|
| 344 |
+
def decompose(self):
|
| 345 |
+
return self.tensors, self.mask
|
| 346 |
+
|
| 347 |
+
def __repr__(self):
|
| 348 |
+
return str(self.tensors)
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
def setup_for_distributed(is_master):
|
| 352 |
+
"""
|
| 353 |
+
This function disables printing when not in master process
|
| 354 |
+
"""
|
| 355 |
+
import builtins as __builtin__
|
| 356 |
+
builtin_print = __builtin__.print
|
| 357 |
+
|
| 358 |
+
def print(*args, **kwargs):
|
| 359 |
+
force = kwargs.pop('force', False)
|
| 360 |
+
if is_master or force:
|
| 361 |
+
builtin_print(*args, **kwargs)
|
| 362 |
+
|
| 363 |
+
__builtin__.print = print
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
def is_dist_avail_and_initialized():
|
| 367 |
+
if not dist.is_available():
|
| 368 |
+
return False
|
| 369 |
+
if not dist.is_initialized():
|
| 370 |
+
return False
|
| 371 |
+
return True
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def get_world_size():
|
| 375 |
+
if not is_dist_avail_and_initialized():
|
| 376 |
+
return 1
|
| 377 |
+
return dist.get_world_size()
|
| 378 |
+
|
| 379 |
+
|
| 380 |
+
def get_rank():
|
| 381 |
+
if not is_dist_avail_and_initialized():
|
| 382 |
+
return 0
|
| 383 |
+
return dist.get_rank()
|
| 384 |
+
|
| 385 |
+
|
| 386 |
+
def is_main_process():
|
| 387 |
+
return get_rank() == 0
|
| 388 |
+
|
| 389 |
+
|
| 390 |
+
def save_on_master(*args, **kwargs):
|
| 391 |
+
if is_main_process():
|
| 392 |
+
torch.save(*args, **kwargs)
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
def init_distributed_mode(args):
|
| 396 |
+
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
|
| 397 |
+
args.rank = int(os.environ["RANK"])
|
| 398 |
+
args.world_size = int(os.environ['WORLD_SIZE'])
|
| 399 |
+
args.gpu = int(os.environ['LOCAL_RANK'])
|
| 400 |
+
elif 'SLURM_PROCID' in os.environ:
|
| 401 |
+
args.rank = int(os.environ['SLURM_PROCID'])
|
| 402 |
+
args.gpu = args.rank % torch.cuda.device_count()
|
| 403 |
+
else:
|
| 404 |
+
print('Not using distributed mode')
|
| 405 |
+
args.distributed = False
|
| 406 |
+
return
|
| 407 |
+
|
| 408 |
+
args.distributed = True
|
| 409 |
+
|
| 410 |
+
torch.cuda.set_device(args.gpu)
|
| 411 |
+
args.dist_backend = 'nccl'
|
| 412 |
+
print('| distributed init (rank {}): {}'.format(
|
| 413 |
+
args.rank, args.dist_url), flush=True)
|
| 414 |
+
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
|
| 415 |
+
world_size=args.world_size, rank=args.rank)
|
| 416 |
+
torch.distributed.barrier()
|
| 417 |
+
setup_for_distributed(args.rank == 0)
|
| 418 |
+
|
| 419 |
+
|
| 420 |
+
@torch.no_grad()
|
| 421 |
+
def accuracy(output, target, topk=(1,)):
|
| 422 |
+
"""Computes the precision@k for the specified values of k"""
|
| 423 |
+
if target.numel() == 0:
|
| 424 |
+
return [torch.zeros([], device=output.device)]
|
| 425 |
+
maxk = max(topk)
|
| 426 |
+
batch_size = target.size(0)
|
| 427 |
+
|
| 428 |
+
_, pred = output.topk(maxk, 1, True, True)
|
| 429 |
+
pred = pred.t()
|
| 430 |
+
correct = pred.eq(target.view(1, -1).expand_as(pred))
|
| 431 |
+
|
| 432 |
+
res = []
|
| 433 |
+
for k in topk:
|
| 434 |
+
correct_k = correct[:k].view(-1).float().sum(0)
|
| 435 |
+
res.append(correct_k.mul_(100.0 / batch_size))
|
| 436 |
+
return res
|
| 437 |
+
|
| 438 |
+
|
| 439 |
+
def interpolate(input, size=None, scale_factor=None, mode="nearest", align_corners=None):
|
| 440 |
+
# type: (Tensor, Optional[List[int]], Optional[float], str, Optional[bool]) -> Tensor
|
| 441 |
+
"""
|
| 442 |
+
Equivalent to nn.functional.interpolate, but with support for empty batch sizes.
|
| 443 |
+
This will eventually be supported natively by PyTorch, and this
|
| 444 |
+
class can go away.
|
| 445 |
+
"""
|
| 446 |
+
return torch.nn.functional.interpolate(
|
| 447 |
+
input, size, scale_factor, mode, align_corners
|
| 448 |
+
)
|
| 449 |
+
|
| 450 |
+
|
| 451 |
+
class FocalLoss(nn.Module):
|
| 452 |
+
r"""
|
| 453 |
+
This criterion is a implemenation of Focal Loss, which is proposed in
|
| 454 |
+
Focal Loss for Dense Object Detection.
|
| 455 |
+
|
| 456 |
+
Loss(x, class) = - \alpha (1-softmax(x)[class])^gamma \log(softmax(x)[class])
|
| 457 |
+
|
| 458 |
+
The losses are averaged across observations for each minibatch.
|
| 459 |
+
|
| 460 |
+
Args:
|
| 461 |
+
alpha(1D Tensor, Variable) : the scalar factor for this criterion
|
| 462 |
+
gamma(float, double) : gamma > 0; reduces the relative loss for well-classified examples (p > .5),
|
| 463 |
+
putting more focus on hard, misclassified examples
|
| 464 |
+
size_average(bool): By default, the losses are averaged over observations for each minibatch.
|
| 465 |
+
However, if the field size_average is set to False, the losses are
|
| 466 |
+
instead summed for each minibatch.
|
| 467 |
+
|
| 468 |
+
|
| 469 |
+
"""
|
| 470 |
+
def __init__(self, class_num, alpha=None, gamma=2, size_average=True):
|
| 471 |
+
super(FocalLoss, self).__init__()
|
| 472 |
+
if alpha is None:
|
| 473 |
+
self.alpha = Variable(torch.ones(class_num, 1))
|
| 474 |
+
else:
|
| 475 |
+
if isinstance(alpha, Variable):
|
| 476 |
+
self.alpha = alpha
|
| 477 |
+
else:
|
| 478 |
+
self.alpha = Variable(alpha)
|
| 479 |
+
self.gamma = gamma
|
| 480 |
+
self.class_num = class_num
|
| 481 |
+
self.size_average = size_average
|
| 482 |
+
|
| 483 |
+
def forward(self, inputs, targets):
|
| 484 |
+
N = inputs.size(0)
|
| 485 |
+
C = inputs.size(1)
|
| 486 |
+
P = F.softmax(inputs)
|
| 487 |
+
|
| 488 |
+
class_mask = inputs.data.new(N, C).fill_(0)
|
| 489 |
+
class_mask = Variable(class_mask)
|
| 490 |
+
ids = targets.view(-1, 1)
|
| 491 |
+
class_mask.scatter_(1, ids.data, 1.)
|
| 492 |
+
|
| 493 |
+
if inputs.is_cuda and not self.alpha.is_cuda:
|
| 494 |
+
self.alpha = self.alpha.cuda()
|
| 495 |
+
alpha = self.alpha[ids.data.view(-1)]
|
| 496 |
+
|
| 497 |
+
probs = (P*class_mask).sum(1).view(-1,1)
|
| 498 |
+
|
| 499 |
+
log_p = probs.log()
|
| 500 |
+
batch_loss = -alpha*(torch.pow((1-probs), self.gamma))*log_p
|
| 501 |
+
|
| 502 |
+
if self.size_average:
|
| 503 |
+
loss = batch_loss.mean()
|
| 504 |
+
else:
|
| 505 |
+
loss = batch_loss.sum()
|
| 506 |
+
return loss
|