Upload app.py with huggingface_hub
Browse files
app.py
CHANGED
|
@@ -1,3 +1,1625 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
保险APP 用户行为分析 - Gradio Space (终极版 v3.0)
|
| 3 |
+
支持: 演示模式 | CSV上传 | 产品推荐(DIN) | 异常检测(TabBERT) | 模型管理 | 生存分析
|
| 4 |
+
|
| 5 |
+
参考文献:
|
| 6 |
+
- DIN: Deep Interest Network (KDD 2018, arxiv:1706.06978)
|
| 7 |
+
- TabBERT: Tabular Transformers (arxiv:2011.01843)
|
| 8 |
+
- Focal Loss: RetinaNet (ICCV 2017, arxiv:1708.02002)
|
| 9 |
+
- DeepSurv: Cox-PH Neural Network (JAMIA 2018, arxiv:1606.00931)
|
| 10 |
+
- RNN Survival: arxiv:2304.00575
|
| 11 |
+
"""
|
| 12 |
+
import os, io, math, warnings, datetime, random, json, tempfile, pickle
|
| 13 |
+
from collections import Counter, defaultdict
|
| 14 |
+
from dataclasses import dataclass, field
|
| 15 |
+
from typing import List, Dict, Optional, Tuple
|
| 16 |
+
from pathlib import Path
|
| 17 |
+
|
| 18 |
+
warnings.filterwarnings('ignore')
|
| 19 |
+
import numpy as np
|
| 20 |
+
import pandas as pd
|
| 21 |
+
from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
|
| 22 |
+
from sklearn.preprocessing import StandardScaler, MinMaxScaler
|
| 23 |
+
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
|
| 24 |
+
from sklearn.metrics import (
|
| 25 |
+
roc_auc_score, f1_score, confusion_matrix,
|
| 26 |
+
average_precision_score, precision_recall_curve, classification_report,
|
| 27 |
+
roc_curve, accuracy_score
|
| 28 |
+
)
|
| 29 |
+
import matplotlib
|
| 30 |
+
matplotlib.use('Agg')
|
| 31 |
+
import matplotlib.pyplot as plt
|
| 32 |
+
import seaborn as sns
|
| 33 |
+
|
| 34 |
+
import gradio as gr
|
| 35 |
+
|
| 36 |
+
# PyTorch
|
| 37 |
+
try:
|
| 38 |
+
import torch
|
| 39 |
+
import torch.nn as nn
|
| 40 |
+
import torch.nn.functional as F
|
| 41 |
+
TORCH_AVAILABLE = True
|
| 42 |
+
except ImportError:
|
| 43 |
+
TORCH_AVAILABLE = False
|
| 44 |
+
print("⚠️ PyTorch not available. Deep learning models disabled.")
|
| 45 |
+
|
| 46 |
+
# Hugging Face Hub (模型保存/加载)
|
| 47 |
+
try:
|
| 48 |
+
from huggingface_hub import HfApi, create_repo, hf_hub_download, login
|
| 49 |
+
HFHUB_AVAILABLE = True
|
| 50 |
+
except ImportError:
|
| 51 |
+
HFHUB_AVAILABLE = False
|
| 52 |
+
print("⚠️ huggingface_hub not available. Model save/load disabled.")
|
| 53 |
+
|
| 54 |
+
# lifelines (生存分析)
|
| 55 |
+
try:
|
| 56 |
+
from lifelines import CoxPHFitter, KaplanMeierFitter, NelsonAalenFitter
|
| 57 |
+
from lifelines.statistics import logrank_test
|
| 58 |
+
LIFELINES_AVAILABLE = True
|
| 59 |
+
except ImportError:
|
| 60 |
+
LIFELINES_AVAILABLE = False
|
| 61 |
+
print("⚠️ lifelines not available. Statistical survival analysis disabled.")
|
| 62 |
+
|
| 63 |
+
# joblib
|
| 64 |
+
import joblib
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
# =============================================================================
|
| 68 |
+
# 全局配置 & 数据模型
|
| 69 |
+
# =============================================================================
|
| 70 |
+
|
| 71 |
+
INSURANCE_EVENT_TYPES = {
|
| 72 |
+
"page_view", "product_view", "product_compare", "premium_calculator",
|
| 73 |
+
"faq_view", "article_read", "quote_request", "quote_result_view",
|
| 74 |
+
"document_upload", "form_submit", "chat_init", "call_init", "video_consult",
|
| 75 |
+
"policy_select", "payment_init", "payment_success", "policy_issued",
|
| 76 |
+
"claim_init", "claim_doc_upload", "claim_review", "claim_approved",
|
| 77 |
+
"claim_rejected", "renewal_reminder", "renewal_click", "renewal_complete",
|
| 78 |
+
"policy_cancel", "app_uninstall", "login", "logout",
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
@dataclass
|
| 82 |
+
class InsuranceAppEvent:
|
| 83 |
+
event_id: str; user_id: str; session_id: str; timestamp: int
|
| 84 |
+
event_type: str; page_id: str
|
| 85 |
+
product_id: Optional[str] = None; amount: Optional[float] = None
|
| 86 |
+
channel: str = "app"; device_type: str = "mobile"
|
| 87 |
+
|
| 88 |
+
@dataclass
|
| 89 |
+
class UserSession:
|
| 90 |
+
session_id: str; user_id: str
|
| 91 |
+
events: List[InsuranceAppEvent] = field(default_factory=list)
|
| 92 |
+
|
| 93 |
+
@dataclass
|
| 94 |
+
class UserBehaviorProfile:
|
| 95 |
+
user_id: str; sessions: List[UserSession] = field(default_factory=list)
|
| 96 |
+
|
| 97 |
+
|
| 98 |
+
# =============================================================================
|
| 99 |
+
# 特征工程
|
| 100 |
+
# =============================================================================
|
| 101 |
+
|
| 102 |
+
class InsuranceFeatureEngineer:
|
| 103 |
+
def extract_user_features(self, profile):
|
| 104 |
+
sessions = profile.sessions
|
| 105 |
+
if not sessions: return None
|
| 106 |
+
all_events = []
|
| 107 |
+
for s in sessions: all_events.extend(s.events)
|
| 108 |
+
all_events.sort(key=lambda e: e.timestamp)
|
| 109 |
+
all_type_counts = Counter(e.event_type for e in all_events)
|
| 110 |
+
total = len(all_events)
|
| 111 |
+
if total == 0: return None
|
| 112 |
+
product_counter = Counter(e.product_id for e in all_events if e.product_id)
|
| 113 |
+
top_product = product_counter.most_common(1)[0][0] if product_counter else None
|
| 114 |
+
first_ts = all_events[0].timestamp; last_ts = all_events[-1].timestamp
|
| 115 |
+
days_active = (last_ts - first_ts) / (24 * 3600 * 1000)
|
| 116 |
+
has_purchased = any(e.event_type == "policy_issued" for e in all_events)
|
| 117 |
+
has_renewed = any(e.event_type == "renewal_complete" for e in all_events)
|
| 118 |
+
has_claimed = any(e.event_type in ("claim_init","claim_approved") for e in all_events)
|
| 119 |
+
support = all_type_counts.get("chat_init", 0) + all_type_counts.get("call_init", 0)
|
| 120 |
+
event_seq = [e.event_type for e in all_events]
|
| 121 |
+
product_seq = [e.product_id or "none" for e in all_events]
|
| 122 |
+
return {
|
| 123 |
+
"total_sessions": len(sessions), "total_events": total,
|
| 124 |
+
"days_active": days_active, "avg_events_per_session": total / len(sessions),
|
| 125 |
+
"product_view_ratio": all_type_counts.get("product_view", 0) / total,
|
| 126 |
+
"quote_request_ratio": all_type_counts.get("quote_request", 0) / total,
|
| 127 |
+
"article_read_ratio": all_type_counts.get("article_read", 0) / total,
|
| 128 |
+
"payment_success_ratio": all_type_counts.get("payment_success", 0) / total,
|
| 129 |
+
"policy_issued_ratio": all_type_counts.get("policy_issued", 0) / total,
|
| 130 |
+
"unique_products_viewed": len(product_counter),
|
| 131 |
+
"top_product_id": top_product or "none",
|
| 132 |
+
"has_purchased": int(has_purchased), "has_renewed": int(has_renewed),
|
| 133 |
+
"has_claimed": int(has_claimed), "support_dependency": support / total,
|
| 134 |
+
"renewal_click_count": all_type_counts.get("renewal_click", 0),
|
| 135 |
+
"policy_cancel_count": all_type_counts.get("policy_cancel", 0),
|
| 136 |
+
"claim_init_count": all_type_counts.get("claim_init", 0),
|
| 137 |
+
"days_since_last_event": (datetime.datetime.now().timestamp()*1000 - last_ts)/(24*3600*1000),
|
| 138 |
+
"weekend_activity_ratio": sum(1 for e in all_events if datetime.datetime.fromtimestamp(e.timestamp/1000).weekday()>=5)/total,
|
| 139 |
+
"peak_active_hour": Counter(datetime.datetime.fromtimestamp(e.timestamp/1000).hour for e in all_events).most_common(1)[0][0],
|
| 140 |
+
"recent_7day_events": sum(1 for e in all_events if (last_ts-e.timestamp)<7*24*3600*1000),
|
| 141 |
+
"recent_30day_events": sum(1 for e in all_events if (last_ts-e.timestamp)<30*24*3600*1000),
|
| 142 |
+
"_event_sequence": event_seq, "_product_sequence": product_seq,
|
| 143 |
+
"_user_id": profile.user_id,
|
| 144 |
+
}
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# =============================================================================
|
| 148 |
+
# 数据解析 & 生成
|
| 149 |
+
# =============================================================================
|
| 150 |
+
|
| 151 |
+
def parse_csv_to_profiles(df):
|
| 152 |
+
required_cols = {"user_id", "session_id", "timestamp", "event_type", "page_id"}
|
| 153 |
+
missing = required_cols - set(c.lower().strip() for c in df.columns)
|
| 154 |
+
if missing:
|
| 155 |
+
raise ValueError(f"CSV缺少必需列: {missing}")
|
| 156 |
+
df.columns = [c.lower().strip() for c in df.columns]
|
| 157 |
+
df["timestamp"] = pd.to_numeric(df["timestamp"], errors="coerce")
|
| 158 |
+
df = df.dropna(subset=["timestamp", "event_type"])
|
| 159 |
+
df["timestamp"] = df["timestamp"].astype(int)
|
| 160 |
+
profiles = {}
|
| 161 |
+
for (uid, sid), group in df.groupby(["user_id", "session_id"]):
|
| 162 |
+
if uid not in profiles:
|
| 163 |
+
profiles[uid] = UserBehaviorProfile(user_id=str(uid), sessions=[])
|
| 164 |
+
events = []
|
| 165 |
+
for _, row in group.sort_values("timestamp").iterrows():
|
| 166 |
+
events.append(InsuranceAppEvent(
|
| 167 |
+
event_id=f"evt_{row.name}", user_id=str(row["user_id"]),
|
| 168 |
+
session_id=str(row["session_id"]), timestamp=int(row["timestamp"]),
|
| 169 |
+
event_type=str(row["event_type"]).strip(),
|
| 170 |
+
page_id=str(row.get("page_id", "unknown")),
|
| 171 |
+
product_id=str(row.get("product_id")) if pd.notna(row.get("product_id")) else None,
|
| 172 |
+
amount=float(row["amount"]) if pd.notna(row.get("amount")) else None,
|
| 173 |
+
))
|
| 174 |
+
profiles[uid].sessions.append(UserSession(session_id=str(sid), user_id=str(uid), events=events))
|
| 175 |
+
return list(profiles.values())
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
def generate_synthetic_data(n_users=2000, n_events_per_user=50, seed=42):
|
| 179 |
+
random.seed(seed); np.random.seed(seed)
|
| 180 |
+
event_types = list(INSURANCE_EVENT_TYPES)
|
| 181 |
+
products = ["health_basic","health_premium","critical_illness","term_life",
|
| 182 |
+
"auto_compulsory","auto_commercial","home","travel_domestic"]
|
| 183 |
+
data = []
|
| 184 |
+
for u in range(n_users):
|
| 185 |
+
user_id = f"user_{u:04d}"; churn_risk = random.random()
|
| 186 |
+
sessions = []; base_ts = int(datetime.datetime(2024,1,1).timestamp()*1000)
|
| 187 |
+
for s in range(random.randint(1,5)):
|
| 188 |
+
session_id = f"sess_{u}_{s}"
|
| 189 |
+
n_events = random.randint(5, n_events_per_user // max(1, random.randint(1,5)))
|
| 190 |
+
events = []
|
| 191 |
+
for e in range(n_events):
|
| 192 |
+
if churn_risk > 0.7:
|
| 193 |
+
event_type = random.choices(["page_view","product_view","article_read","app_uninstall"],weights=[0.4,0.3,0.2,0.1])[0]
|
| 194 |
+
else:
|
| 195 |
+
stages = n_events
|
| 196 |
+
if e < stages*0.3: event_type = random.choice(["page_view","product_view","article_read"])
|
| 197 |
+
elif e < stages*0.6: event_type = random.choice(["product_view","quote_request","premium_calculator","faq_view"])
|
| 198 |
+
elif e < stages*0.8: event_type = random.choice(["quote_result_view","form_submit","document_upload","payment_init"])
|
| 199 |
+
else: event_type = random.choice(["payment_success","policy_issued","renewal_click","renewal_complete"])
|
| 200 |
+
timestamp = base_ts + e * random.randint(5000,30000)
|
| 201 |
+
events.append(InsuranceAppEvent(f"evt_{u}_{s}_{e}", user_id, session_id, timestamp, event_type, f"page_{event_type}",
|
| 202 |
+
random.choice(products) if event_type in ["product_view","quote_request"] else None,
|
| 203 |
+
random.uniform(1000,100000) if event_type in ["quote_request","payment_success"] else None))
|
| 204 |
+
sessions.append(UserSession(session_id, user_id, events))
|
| 205 |
+
base_ts += 24 * 3600 * 1000
|
| 206 |
+
data.append((UserBehaviorProfile(user_id, sessions), int(churn_risk > 0.7)))
|
| 207 |
+
return data
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
# =============================================================================
|
| 211 |
+
# 通用 sklearn 训练函数
|
| 212 |
+
# =============================================================================
|
| 213 |
+
|
| 214 |
+
def train_sklearn(features_list, labels, test_size=0.2, random_state=42, use_cv=False):
|
| 215 |
+
df = pd.DataFrame(features_list)
|
| 216 |
+
df_full = df.copy()
|
| 217 |
+
drop_cols = [c for c in df.columns if c.startswith('_')]
|
| 218 |
+
for c in drop_cols: df.pop(c)
|
| 219 |
+
for c in df.columns:
|
| 220 |
+
if df[c].dtype == 'object':
|
| 221 |
+
df[c] = pd.to_numeric(df[c], errors='coerce').fillna(0)
|
| 222 |
+
df = df.fillna(0).replace([np.inf, -np.inf], 0)
|
| 223 |
+
X = df.values; y = np.array(labels)
|
| 224 |
+
feature_names = list(df.columns)
|
| 225 |
+
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state, stratify=y)
|
| 226 |
+
scaler = StandardScaler()
|
| 227 |
+
X_train_s = scaler.fit_transform(X_train); X_test_s = scaler.transform(X_test)
|
| 228 |
+
|
| 229 |
+
gbdt = GradientBoostingClassifier(n_estimators=200, max_depth=5, learning_rate=0.1, subsample=0.8, random_state=random_state)
|
| 230 |
+
gbdt.fit(X_train_s, y_train)
|
| 231 |
+
y_pred_gbdt = gbdt.predict(X_test_s); y_prob_gbdt = gbdt.predict_proba(X_test_s)[:,1]
|
| 232 |
+
|
| 233 |
+
rf = RandomForestClassifier(n_estimators=100, max_depth=10, class_weight='balanced', random_state=random_state, n_jobs=-1)
|
| 234 |
+
rf.fit(X_train_s, y_train)
|
| 235 |
+
y_prob_rf = rf.predict_proba(X_test_s)[:,1]; y_pred_rf = rf.predict(X_test_s)
|
| 236 |
+
|
| 237 |
+
auc_gbdt = float(roc_auc_score(y_test, y_prob_gbdt))
|
| 238 |
+
f1_gbdt = float(f1_score(y_test, y_pred_gbdt))
|
| 239 |
+
ap_gbdt = float(average_precision_score(y_test, y_prob_gbdt))
|
| 240 |
+
auc_rf = float(roc_auc_score(y_test, y_prob_rf))
|
| 241 |
+
ap_rf = float(average_precision_score(y_test, y_prob_rf))
|
| 242 |
+
|
| 243 |
+
fi = pd.DataFrame({'feature': feature_names, 'importance': rf.feature_importances_}).sort_values('importance', ascending=False)
|
| 244 |
+
|
| 245 |
+
cv_scores = None
|
| 246 |
+
if use_cv and len(y) >= 100:
|
| 247 |
+
skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=random_state)
|
| 248 |
+
cv_scores = cross_val_score(rf, X, y, cv=skf, scoring='roc_auc')
|
| 249 |
+
|
| 250 |
+
os.makedirs("outputs", exist_ok=True)
|
| 251 |
+
|
| 252 |
+
fig, ax = plt.subplots(figsize=(12,8))
|
| 253 |
+
top = fi.head(15)
|
| 254 |
+
colors = plt.cm.RdYlGn(np.linspace(0.2, 0.8, len(top)))[::-1]
|
| 255 |
+
ax.barh(top['feature'][::-1], top['importance'][::-1], color=colors)
|
| 256 |
+
ax.set_title('Insurance APP - Top 15 Feature Importance', fontsize=14, fontweight='bold')
|
| 257 |
+
ax.set_xlabel('Importance Score')
|
| 258 |
+
plt.tight_layout()
|
| 259 |
+
fig_path1 = "outputs/feature_importance.png"
|
| 260 |
+
plt.savefig(fig_path1, dpi=150, bbox_inches='tight'); plt.close()
|
| 261 |
+
|
| 262 |
+
fig, ax = plt.subplots(figsize=(8,6))
|
| 263 |
+
pg, rg, _ = precision_recall_curve(y_test, y_prob_gbdt)
|
| 264 |
+
pr, rr, _ = precision_recall_curve(y_test, y_prob_rf)
|
| 265 |
+
ax.plot(rg, pg, label=f'GBDT AP={ap_gbdt:.3f}', linewidth=2, color='#2E86AB')
|
| 266 |
+
ax.plot(rr, pr, label=f'RF AP={ap_rf:.3f}', linewidth=2, color='#A23B72')
|
| 267 |
+
ax.set_xlabel('Recall', fontsize=12); ax.set_ylabel('Precision', fontsize=12)
|
| 268 |
+
ax.set_title('Precision-Recall Curve', fontsize=14, fontweight='bold')
|
| 269 |
+
ax.legend(fontsize=11); ax.grid(True, alpha=0.3)
|
| 270 |
+
plt.tight_layout()
|
| 271 |
+
fig_path2 = "outputs/pr_curve.png"
|
| 272 |
+
plt.savefig(fig_path2, dpi=150, bbox_inches='tight'); plt.close()
|
| 273 |
+
|
| 274 |
+
fig, axs = plt.subplots(1,2,figsize=(12,5))
|
| 275 |
+
sns.heatmap(confusion_matrix(y_test, y_pred_gbdt), annot=True, fmt='d', cmap='Blues', ax=axs[0], cbar=False)
|
| 276 |
+
axs[0].set_title(f'GBDT (AUC={auc_gbdt:.3f})', fontsize=12, fontweight='bold')
|
| 277 |
+
axs[0].set_xlabel('Predicted'); axs[0].set_ylabel('Actual')
|
| 278 |
+
sns.heatmap(confusion_matrix(y_test, y_pred_rf), annot=True, fmt='d', cmap='Greens', ax=axs[1], cbar=False)
|
| 279 |
+
axs[1].set_title(f'RF (AUC={auc_rf:.3f})', fontsize=12, fontweight='bold')
|
| 280 |
+
axs[1].set_xlabel('Predicted'); axs[1].set_ylabel('Actual')
|
| 281 |
+
plt.tight_layout()
|
| 282 |
+
fig_path3 = "outputs/confusion_matrix.png"
|
| 283 |
+
plt.savefig(fig_path3, dpi=150, bbox_inches='tight'); plt.close()
|
| 284 |
+
|
| 285 |
+
fig, ax = plt.subplots(figsize=(8,6))
|
| 286 |
+
fpr_g, tpr_g, _ = roc_curve(y_test, y_prob_gbdt)
|
| 287 |
+
fpr_r, tpr_r, _ = roc_curve(y_test, y_prob_rf)
|
| 288 |
+
ax.plot(fpr_g, tpr_g, label=f'GBDT AUC={auc_gbdt:.3f}', linewidth=2, color='#2E86AB')
|
| 289 |
+
ax.plot(fpr_r, tpr_r, label=f'RF AUC={auc_rf:.3f}', linewidth=2, color='#A23B72')
|
| 290 |
+
ax.plot([0,1], [0,1], 'k--', alpha=0.5)
|
| 291 |
+
ax.set_xlabel('False Positive Rate', fontsize=12)
|
| 292 |
+
ax.set_ylabel('True Positive Rate', fontsize=12)
|
| 293 |
+
ax.set_title('ROC Curve', fontsize=14, fontweight='bold')
|
| 294 |
+
ax.legend(fontsize=11); ax.grid(True, alpha=0.3)
|
| 295 |
+
plt.tight_layout()
|
| 296 |
+
fig_path4 = "outputs/roc_curve.png"
|
| 297 |
+
plt.savefig(fig_path4, dpi=150, bbox_inches='tight'); plt.close()
|
| 298 |
+
|
| 299 |
+
fi_str = fi.head(15).to_string(index=False)
|
| 300 |
+
report = classification_report(y_test, y_pred_gbdt, digits=4)
|
| 301 |
+
|
| 302 |
+
cv_str = ""
|
| 303 |
+
if cv_scores is not None:
|
| 304 |
+
cv_str = f"\n--- 5折交叉验证 (RF AUC) ---\nMean: {cv_scores.mean():.4f} (+/- {cv_scores.std()*2:.4f})\nScores: {cv_scores.round(4).tolist()}"
|
| 305 |
+
|
| 306 |
+
result_text = f"""=== 模型训练结果 ===
|
| 307 |
+
样本数: {len(y)} | 特征数: {len(feature_names)}
|
| 308 |
+
训练集: {len(y_train)} | 测试集: {len(y_test)}
|
| 309 |
+
|
| 310 |
+
--- GBDT ---
|
| 311 |
+
AUC: {auc_gbdt:.4f}
|
| 312 |
+
F1: {f1_gbdt:.4f}
|
| 313 |
+
AP: {ap_gbdt:.4f}
|
| 314 |
+
|
| 315 |
+
--- Random Forest ---
|
| 316 |
+
AUC: {auc_rf:.4f}
|
| 317 |
+
AP: {ap_rf:.4f}
|
| 318 |
+
{cv_str}
|
| 319 |
+
|
| 320 |
+
--- Top 15 特征重要性 ---
|
| 321 |
+
{fi_str}
|
| 322 |
+
|
| 323 |
+
--- 分类报告 (GBDT) ---
|
| 324 |
+
{report}"""
|
| 325 |
+
|
| 326 |
+
# 保存模型到内存供后续保存到Hub
|
| 327 |
+
model_artifacts = {
|
| 328 |
+
'gbdt': gbdt,
|
| 329 |
+
'rf': rf,
|
| 330 |
+
'scaler': scaler,
|
| 331 |
+
'feature_names': feature_names,
|
| 332 |
+
'metrics': {'auc_gbdt': auc_gbdt, 'f1_gbdt': f1_gbdt, 'auc_rf': auc_rf, 'ap_gbdt': ap_gbdt, 'ap_rf': ap_rf}
|
| 333 |
+
}
|
| 334 |
+
# 保存到本地临时文件
|
| 335 |
+
joblib.dump(model_artifacts, 'outputs/sklearn_model_artifacts.joblib')
|
| 336 |
+
|
| 337 |
+
return result_text, fig_path1, fig_path2, fig_path3, fig_path4, df_full
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
# =============================================================================
|
| 341 |
+
# DIN 产品推荐
|
| 342 |
+
# =============================================================================
|
| 343 |
+
|
| 344 |
+
def generate_product_recommendation_data(n_users=1000, seed=42):
|
| 345 |
+
random.seed(seed); np.random.seed(seed)
|
| 346 |
+
products = ["health_basic","health_premium","critical_illness","term_life",
|
| 347 |
+
"auto_compulsory","auto_commercial","home","travel_domestic"]
|
| 348 |
+
records = []
|
| 349 |
+
for u in range(n_users):
|
| 350 |
+
n_behaviors = random.randint(5, 30)
|
| 351 |
+
behavior_events = []
|
| 352 |
+
behavior_products = []
|
| 353 |
+
for i in range(n_behaviors):
|
| 354 |
+
et = random.choice(["page_view","product_view","quote_request","article_read"])
|
| 355 |
+
behavior_events.append(et)
|
| 356 |
+
behavior_products.append(random.choice(products))
|
| 357 |
+
candidate = random.choice(products)
|
| 358 |
+
label = 1 if candidate in behavior_products else random.choices([0,1], weights=[0.7,0.3])[0]
|
| 359 |
+
records.append({
|
| 360 |
+
'user_id': u, 'behavior_events': behavior_events,
|
| 361 |
+
'behavior_products': behavior_products,
|
| 362 |
+
'candidate_product': candidate, 'label': label,
|
| 363 |
+
'user_features': np.random.randn(20).astype(np.float32),
|
| 364 |
+
})
|
| 365 |
+
return pd.DataFrame(records)
|
| 366 |
+
|
| 367 |
+
|
| 368 |
+
def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, seed):
|
| 369 |
+
if not TORCH_AVAILABLE:
|
| 370 |
+
return "❌ PyTorch 未安装。请在 requirements.txt 中添加 torch 并重启 Space。", None, None, None, None, None
|
| 371 |
+
|
| 372 |
+
torch.manual_seed(seed); np.random.seed(seed); random.seed(seed)
|
| 373 |
+
df = generate_product_recommendation_data(n_users=n_users, seed=seed)
|
| 374 |
+
|
| 375 |
+
all_events = sorted(set(e for seq in df['behavior_events'] for e in seq))
|
| 376 |
+
event_vocab = {e: i+1 for i, e in enumerate(all_events)}
|
| 377 |
+
all_products = sorted(set(p for seq in df['behavior_products'] for p in seq) | set(df['candidate_product']))
|
| 378 |
+
product_vocab = {p: i+1 for i, p in enumerate(all_products)}
|
| 379 |
+
|
| 380 |
+
max_seq_len = 20
|
| 381 |
+
behavior_events_padded = []; behavior_products_padded = []; behavior_masks = []
|
| 382 |
+
for _, row in df.iterrows():
|
| 383 |
+
e_seq = [event_vocab[e] for e in row['behavior_events'][-max_seq_len:]]
|
| 384 |
+
p_seq = [product_vocab[p] for p in row['behavior_products'][-max_seq_len:]]
|
| 385 |
+
mask = [1] * len(e_seq)
|
| 386 |
+
if len(e_seq) < max_seq_len:
|
| 387 |
+
pad = max_seq_len - len(e_seq)
|
| 388 |
+
e_seq = [0]*pad + e_seq; p_seq = [0]*pad + p_seq; mask = [0]*pad + mask
|
| 389 |
+
behavior_events_padded.append(e_seq); behavior_products_padded.append(p_seq); behavior_masks.append(mask)
|
| 390 |
+
|
| 391 |
+
df['be'] = behavior_events_padded; df['bp'] = behavior_products_padded; df['bm'] = behavior_masks
|
| 392 |
+
df['cp'] = df['candidate_product'].map(product_vocab)
|
| 393 |
+
|
| 394 |
+
train_df = df.sample(frac=0.8, random_state=seed)
|
| 395 |
+
test_df = df.drop(train_df.index)
|
| 396 |
+
|
| 397 |
+
device = torch.device('cpu')
|
| 398 |
+
|
| 399 |
+
class SimpleDIN(nn.Module):
|
| 400 |
+
def __init__(self, num_events, num_products, d_model=64, max_len=20):
|
| 401 |
+
super().__init__()
|
| 402 |
+
self.event_emb = nn.Embedding(num_events+1, d_model//2, padding_idx=0)
|
| 403 |
+
self.prod_emb = nn.Embedding(num_products+1, d_model//2, padding_idx=0)
|
| 404 |
+
self.cand_emb = nn.Embedding(num_products+1, d_model)
|
| 405 |
+
self.attn = nn.Sequential(nn.Linear(d_model*4, 128), nn.ReLU(), nn.Linear(128, 1))
|
| 406 |
+
self.mlp = nn.Sequential(nn.Linear(d_model*3, 256), nn.ReLU(), nn.Dropout(0.3),
|
| 407 |
+
nn.Linear(256, 128), nn.ReLU(), nn.Dropout(0.3), nn.Linear(128, 1))
|
| 408 |
+
def forward(self, be, bp, bm, cp):
|
| 409 |
+
B = be.size(0); L = be.size(1)
|
| 410 |
+
e_emb = self.event_emb(be)
|
| 411 |
+
p_emb = self.prod_emb(bp)
|
| 412 |
+
beh_emb = torch.cat([e_emb, p_emb], dim=-1)
|
| 413 |
+
cand_emb = self.cand_emb(cp)
|
| 414 |
+
cand_exp = cand_emb.unsqueeze(1).expand(B, L, -1)
|
| 415 |
+
diff = cand_exp - beh_emb; prod = cand_exp * beh_emb
|
| 416 |
+
attn_in = torch.cat([cand_exp, beh_emb, diff, prod], dim=-1)
|
| 417 |
+
attn_w = self.attn(attn_in).squeeze(-1)
|
| 418 |
+
attn_w = attn_w.masked_fill(~bm.bool(), -1e9)
|
| 419 |
+
attn_w = torch.softmax(attn_w, dim=1)
|
| 420 |
+
interest = (beh_emb * attn_w.unsqueeze(-1)).sum(dim=1)
|
| 421 |
+
x = torch.cat([interest, cand_emb, interest*cand_emb], dim=-1)
|
| 422 |
+
return self.mlp(x).squeeze(-1)
|
| 423 |
+
|
| 424 |
+
model = SimpleDIN(len(all_events), len(all_products), d_model=embedding_dim).to(device)
|
| 425 |
+
criterion = nn.BCEWithLogitsLoss()
|
| 426 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
|
| 427 |
+
|
| 428 |
+
for epoch in range(epochs):
|
| 429 |
+
model.train(); epoch_loss = 0
|
| 430 |
+
for i in range(0, len(train_df), batch_size):
|
| 431 |
+
batch = train_df.iloc[i:i+batch_size]
|
| 432 |
+
be = torch.tensor(np.stack(batch['be'].values), dtype=torch.long).to(device)
|
| 433 |
+
bp = torch.tensor(np.stack(batch['bp'].values), dtype=torch.long).to(device)
|
| 434 |
+
bm = torch.tensor(np.stack(batch['bm'].values), dtype=torch.bool).to(device)
|
| 435 |
+
cp = torch.tensor(batch['cp'].values, dtype=torch.long).to(device)
|
| 436 |
+
labels = torch.tensor(batch['label'].values, dtype=torch.float32).to(device)
|
| 437 |
+
optimizer.zero_grad()
|
| 438 |
+
outputs = model(be, bp, bm, cp)
|
| 439 |
+
loss = criterion(outputs, labels)
|
| 440 |
+
loss.backward(); optimizer.step()
|
| 441 |
+
epoch_loss += loss.item()
|
| 442 |
+
if (epoch+1) % max(1, epochs//5) == 0 or epoch == 0:
|
| 443 |
+
print(f"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss*batch_size/len(train_df):.4f}")
|
| 444 |
+
|
| 445 |
+
model.eval()
|
| 446 |
+
with torch.no_grad():
|
| 447 |
+
be = torch.tensor(np.stack(test_df['be'].values), dtype=torch.long).to(device)
|
| 448 |
+
bp = torch.tensor(np.stack(test_df['bp'].values), dtype=torch.long).to(device)
|
| 449 |
+
bm = torch.tensor(np.stack(test_df['bm'].values), dtype=torch.bool).to(device)
|
| 450 |
+
cp = torch.tensor(test_df['cp'].values, dtype=torch.long).to(device)
|
| 451 |
+
labels = test_df['label'].values
|
| 452 |
+
preds = torch.sigmoid(model(be, bp, bm, cp)).cpu().numpy()
|
| 453 |
+
|
| 454 |
+
auc = float(roc_auc_score(labels, preds))
|
| 455 |
+
ap = float(average_precision_score(labels, preds))
|
| 456 |
+
f1 = float(f1_score(labels, preds > 0.5))
|
| 457 |
+
acc = float(accuracy_score(labels, preds > 0.5))
|
| 458 |
+
|
| 459 |
+
os.makedirs("outputs", exist_ok=True)
|
| 460 |
+
|
| 461 |
+
# 保存 PyTorch 模型
|
| 462 |
+
torch.save({
|
| 463 |
+
'model_state_dict': model.state_dict(),
|
| 464 |
+
'event_vocab': event_vocab,
|
| 465 |
+
'product_vocab': product_vocab,
|
| 466 |
+
'embedding_dim': embedding_dim,
|
| 467 |
+
'max_seq_len': max_seq_len,
|
| 468 |
+
'num_events': len(all_events),
|
| 469 |
+
'num_products': len(all_products),
|
| 470 |
+
'metrics': {'auc': auc, 'ap': ap, 'f1': f1, 'acc': acc}
|
| 471 |
+
}, 'outputs/din_model.pt')
|
| 472 |
+
|
| 473 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 474 |
+
product_perf = {}
|
| 475 |
+
for _, row in test_df.iterrows():
|
| 476 |
+
prod = row['candidate_product']
|
| 477 |
+
if prod not in product_perf: product_perf[prod] = {'preds': [], 'labels': []}
|
| 478 |
+
idx = test_df.index.get_loc(_)
|
| 479 |
+
product_perf[prod]['preds'].append(preds[idx])
|
| 480 |
+
product_perf[prod]['labels'].append(row['label'])
|
| 481 |
+
prod_aucs = []
|
| 482 |
+
for prod, data in product_perf.items():
|
| 483 |
+
if len(set(data['labels'])) > 1 and len(data['labels']) >= 5:
|
| 484 |
+
prod_auc = roc_auc_score(data['labels'], data['preds'])
|
| 485 |
+
prod_aucs.append((prod, prod_auc, np.mean(data['labels'])))
|
| 486 |
+
if prod_aucs:
|
| 487 |
+
prod_aucs.sort(key=lambda x: x[1], reverse=True)
|
| 488 |
+
prods, aucs, rates = zip(*prod_aucs)
|
| 489 |
+
x = np.arange(len(prods))
|
| 490 |
+
ax.bar(x, aucs, color='steelblue', alpha=0.7, label='AUC')
|
| 491 |
+
ax2 = ax.twinx()
|
| 492 |
+
ax2.plot(x, rates, 'ro-', label='Conversion Rate')
|
| 493 |
+
ax.set_xticks(x); ax.set_xticklabels(prods, rotation=45, ha='right')
|
| 494 |
+
ax.set_ylabel('AUC', color='steelblue'); ax2.set_ylabel('Conversion Rate', color='red')
|
| 495 |
+
ax.set_title('Product Recommendation Performance', fontweight='bold')
|
| 496 |
+
ax.legend(loc='upper left'); ax2.legend(loc='upper right')
|
| 497 |
+
plt.tight_layout()
|
| 498 |
+
fig_path1 = "outputs/din_product_performance.png"
|
| 499 |
+
plt.savefig(fig_path1, dpi=150); plt.close()
|
| 500 |
+
|
| 501 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 502 |
+
sample_idx = 0
|
| 503 |
+
with torch.no_grad():
|
| 504 |
+
be_s = be[sample_idx:sample_idx+1]; bp_s = bp[sample_idx:sample_idx+1]
|
| 505 |
+
bm_s = bm[sample_idx:sample_idx+1]; cp_s = cp[sample_idx:sample_idx+1]
|
| 506 |
+
B, L = be_s.size()
|
| 507 |
+
e_emb = model.event_emb(be_s); p_emb = model.prod_emb(bp_s)
|
| 508 |
+
beh_emb = torch.cat([e_emb, p_emb], dim=-1)
|
| 509 |
+
cand_emb = model.cand_emb(cp_s)
|
| 510 |
+
cand_exp = cand_emb.unsqueeze(1).expand(B, L, -1)
|
| 511 |
+
diff = cand_exp - beh_emb; prod_feat = cand_exp * beh_emb
|
| 512 |
+
attn_in = torch.cat([cand_exp, beh_emb, diff, prod_feat], dim=-1)
|
| 513 |
+
attn_w = torch.softmax(model.attn(attn_in).squeeze(-1).masked_fill(~bm_s, -1e9), dim=1)
|
| 514 |
+
weights = attn_w[0].cpu().numpy()
|
| 515 |
+
valid_len = bm_s[0].sum().item()
|
| 516 |
+
valid_weights = weights[-valid_len:] if valid_len > 0 else weights
|
| 517 |
+
ax.bar(range(len(valid_weights)), valid_weights, color='coral')
|
| 518 |
+
ax.set_title('Attention Weights (Sample User)', fontweight='bold')
|
| 519 |
+
ax.set_xlabel('Behavior Position'); ax.set_ylabel('Attention Weight')
|
| 520 |
+
plt.tight_layout()
|
| 521 |
+
fig_path2 = "outputs/din_attention.png"
|
| 522 |
+
plt.savefig(fig_path2, dpi=150); plt.close()
|
| 523 |
+
|
| 524 |
+
fig, ax = plt.subplots(figsize=(8,6))
|
| 525 |
+
fpr, tpr, _ = roc_curve(labels, preds)
|
| 526 |
+
ax.plot(fpr, tpr, label=f'DIN AUC={auc:.3f}', linewidth=2, color='#2E86AB')
|
| 527 |
+
ax.plot([0,1], [0,1], 'k--', alpha=0.5)
|
| 528 |
+
ax.set_xlabel('False Positive Rate'); ax.set_ylabel('True Positive Rate')
|
| 529 |
+
ax.set_title('ROC Curve - Product Recommendation', fontweight='bold')
|
| 530 |
+
ax.legend(); ax.grid(True, alpha=0.3)
|
| 531 |
+
plt.tight_layout()
|
| 532 |
+
fig_path3 = "outputs/din_roc.png"
|
| 533 |
+
plt.savefig(fig_path3, dpi=150); plt.close()
|
| 534 |
+
|
| 535 |
+
fig, ax = plt.subplots(figsize=(8,6))
|
| 536 |
+
prec, rec, _ = precision_recall_curve(labels, preds)
|
| 537 |
+
ax.plot(rec, prec, label=f'DIN AP={ap:.3f}', linewidth=2, color='#A23B72')
|
| 538 |
+
ax.set_xlabel('Recall'); ax.set_ylabel('Precision')
|
| 539 |
+
ax.set_title('Precision-Recall Curve - Product Recommendation', fontweight='bold')
|
| 540 |
+
ax.legend(); ax.grid(True, alpha=0.3)
|
| 541 |
+
plt.tight_layout()
|
| 542 |
+
fig_path4 = "outputs/din_pr.png"
|
| 543 |
+
plt.savefig(fig_path4, dpi=150); plt.close()
|
| 544 |
+
|
| 545 |
+
result_text = f"""=== DIN 保险产品推荐模型 ===
|
| 546 |
+
样本数: {n_users} | 产品数: {len(all_products)}
|
| 547 |
+
Event vocab: {len(all_events)} | Product vocab: {len(all_products)}
|
| 548 |
+
训练集: {len(train_df)} | 测试集: {len(test_df)}
|
| 549 |
+
|
| 550 |
+
--- 模型架构 ---
|
| 551 |
+
Embedding dim: {embedding_dim}
|
| 552 |
+
Attention: LocalActivationUnit (4路交互: [c, b, c-b, c*b])
|
| 553 |
+
MLP: [emb*3] → 256 → 128 → 1
|
| 554 |
+
|
| 555 |
+
--- 训练配置 ---
|
| 556 |
+
Epochs: {epochs} | Batch size: {batch_size} | LR: {lr}
|
| 557 |
+
Optimizer: Adam | Loss: BCEWithLogitsLoss
|
| 558 |
+
|
| 559 |
+
--- 测试集效果 ---
|
| 560 |
+
AUC: {auc:.4f}
|
| 561 |
+
AP: {ap:.4f}
|
| 562 |
+
F1: {f1:.4f}
|
| 563 |
+
Accuracy: {acc:.4f}
|
| 564 |
+
|
| 565 |
+
--- 模型洞察 ---
|
| 566 |
+
1. 注意力机制自动学习用户历史行为中对候选产品的相关度
|
| 567 |
+
2. 高权重通常分配给同类产品的历史浏览/购买行为
|
| 568 |
+
3. 新用户(历史短)依赖统计特征, 老用户依赖行为序列
|
| 569 |
+
|
| 570 |
+
--- 模型文件 ---
|
| 571 |
+
模型已保存至: outputs/din_model.pt
|
| 572 |
+
可使用"模型管理"Tab上传至Hugging Face Hub"""
|
| 573 |
+
|
| 574 |
+
return result_text, fig_path1, fig_path2, fig_path3, fig_path4
|
| 575 |
+
|
| 576 |
+
|
| 577 |
+
# =============================================================================
|
| 578 |
+
# TabBERT 异常检测
|
| 579 |
+
# =============================================================================
|
| 580 |
+
|
| 581 |
+
def generate_anomaly_data(n_normal=800, n_anomaly=200, seed=42):
|
| 582 |
+
random.seed(seed); np.random.seed(seed)
|
| 583 |
+
normal_records = []
|
| 584 |
+
for i in range(n_normal):
|
| 585 |
+
normal_records.append({
|
| 586 |
+
'user_id': i, 'claim_amount': random.uniform(1000, 50000),
|
| 587 |
+
'claim_type': random.choice(["health","auto","property"]),
|
| 588 |
+
'days_since_policy': random.randint(30, 365),
|
| 589 |
+
'num_previous_claims': random.randint(0, 3),
|
| 590 |
+
'document_count': random.randint(3, 10),
|
| 591 |
+
'processing_time_days': random.uniform(1, 15),
|
| 592 |
+
'label': 0,
|
| 593 |
+
})
|
| 594 |
+
anomaly_records = []
|
| 595 |
+
for i in range(n_anomaly):
|
| 596 |
+
anomaly_records.append({
|
| 597 |
+
'user_id': n_normal + i, 'claim_amount': random.uniform(50000, 200000),
|
| 598 |
+
'claim_type': random.choice(["health","auto","property"]),
|
| 599 |
+
'days_since_policy': random.randint(1, 15),
|
| 600 |
+
'num_previous_claims': random.randint(5, 20),
|
| 601 |
+
'document_count': random.randint(0, 2),
|
| 602 |
+
'processing_time_days': random.uniform(0.1, 2),
|
| 603 |
+
'label': 1,
|
| 604 |
+
})
|
| 605 |
+
df = pd.DataFrame(normal_records + anomaly_records)
|
| 606 |
+
df = df.sample(frac=1, random_state=seed).reset_index(drop=True)
|
| 607 |
+
return df
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr, seed):
|
| 611 |
+
if not TORCH_AVAILABLE:
|
| 612 |
+
return "❌ PyTorch 未安装。请在 requirements.txt 中添加 torch 并重启 Space。", None, None, None, None
|
| 613 |
+
|
| 614 |
+
torch.manual_seed(seed); np.random.seed(seed); random.seed(seed)
|
| 615 |
+
df = generate_anomaly_data(n_normal=n_normal, n_anomaly=n_anomaly, seed=seed)
|
| 616 |
+
|
| 617 |
+
claim_type_map = {"health": 0, "auto": 1, "property": 2}
|
| 618 |
+
df['claim_type_enc'] = df['claim_type'].map(claim_type_map)
|
| 619 |
+
|
| 620 |
+
feature_cols = ['claim_amount', 'claim_type_enc', 'days_since_policy',
|
| 621 |
+
'num_previous_claims', 'document_count', 'processing_time_days']
|
| 622 |
+
|
| 623 |
+
X = df[feature_cols].values.astype(np.float32)
|
| 624 |
+
y = df['label'].values.astype(np.float32)
|
| 625 |
+
|
| 626 |
+
scaler = StandardScaler()
|
| 627 |
+
X_s = scaler.fit_transform(X)
|
| 628 |
+
|
| 629 |
+
X_train, X_test, y_train, y_test = train_test_split(
|
| 630 |
+
X_s, y, test_size=0.2, random_state=seed, stratify=y
|
| 631 |
+
)
|
| 632 |
+
|
| 633 |
+
device = torch.device('cpu')
|
| 634 |
+
|
| 635 |
+
class SimpleTabBERT(nn.Module):
|
| 636 |
+
def __init__(self, input_dim=6, d_model=128, n_layers=4):
|
| 637 |
+
super().__init__()
|
| 638 |
+
self.input_proj = nn.Linear(input_dim, d_model)
|
| 639 |
+
layers = []
|
| 640 |
+
for _ in range(n_layers):
|
| 641 |
+
layers.extend([
|
| 642 |
+
nn.Linear(d_model, d_model*4), nn.ReLU(), nn.Dropout(0.2),
|
| 643 |
+
nn.Linear(d_model*4, d_model), nn.LayerNorm(d_model), nn.ReLU(), nn.Dropout(0.2),
|
| 644 |
+
])
|
| 645 |
+
self.transformer = nn.Sequential(*layers)
|
| 646 |
+
self.head = nn.Sequential(nn.Linear(d_model, 256), nn.ReLU(), nn.Dropout(0.3),
|
| 647 |
+
nn.Linear(256, 64), nn.ReLU(), nn.Linear(64, 1))
|
| 648 |
+
def forward(self, x):
|
| 649 |
+
x = self.input_proj(x)
|
| 650 |
+
x = self.transformer(x)
|
| 651 |
+
return self.head(x).squeeze(-1)
|
| 652 |
+
|
| 653 |
+
model = SimpleTabBERT(input_dim=len(feature_cols), d_model=d_model).to(device)
|
| 654 |
+
|
| 655 |
+
class FocalLoss(nn.Module):
|
| 656 |
+
def __init__(self, alpha=0.25, gamma=2.0):
|
| 657 |
+
super().__init__(); self.alpha = alpha; self.gamma = gamma
|
| 658 |
+
def forward(self, inputs, targets):
|
| 659 |
+
bce = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
|
| 660 |
+
pt = torch.exp(-bce)
|
| 661 |
+
return (self.alpha * (1-pt)**self.gamma * bce).mean()
|
| 662 |
+
|
| 663 |
+
criterion = FocalLoss(alpha=0.25, gamma=2.0)
|
| 664 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
|
| 665 |
+
|
| 666 |
+
X_train_t = torch.tensor(X_train, dtype=torch.float32).to(device)
|
| 667 |
+
y_train_t = torch.tensor(y_train, dtype=torch.float32).to(device)
|
| 668 |
+
X_test_t = torch.tensor(X_test, dtype=torch.float32).to(device)
|
| 669 |
+
y_test_t = torch.tensor(y_test, dtype=torch.float32).to(device)
|
| 670 |
+
|
| 671 |
+
for epoch in range(epochs):
|
| 672 |
+
model.train(); epoch_loss = 0
|
| 673 |
+
n_batches = math.ceil(len(X_train_t) / batch_size)
|
| 674 |
+
for i in range(n_batches):
|
| 675 |
+
start = i * batch_size; end = min(start + batch_size, len(X_train_t))
|
| 676 |
+
xb = X_train_t[start:end]; yb = y_train_t[start:end]
|
| 677 |
+
optimizer.zero_grad()
|
| 678 |
+
outputs = model(xb); loss = criterion(outputs, yb)
|
| 679 |
+
loss.backward(); optimizer.step()
|
| 680 |
+
epoch_loss += loss.item()
|
| 681 |
+
if (epoch+1) % max(1, epochs//5) == 0 or epoch == 0:
|
| 682 |
+
print(f"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss/n_batches:.4f}")
|
| 683 |
+
|
| 684 |
+
model.eval()
|
| 685 |
+
with torch.no_grad():
|
| 686 |
+
preds = torch.sigmoid(model(X_test_t)).cpu().numpy()
|
| 687 |
+
|
| 688 |
+
auc = float(roc_auc_score(y_test, preds))
|
| 689 |
+
ap = float(average_precision_score(y_test, preds))
|
| 690 |
+
f1 = float(f1_score(y_test, preds > 0.5))
|
| 691 |
+
|
| 692 |
+
# 保存模型
|
| 693 |
+
torch.save({
|
| 694 |
+
'model_state_dict': model.state_dict(),
|
| 695 |
+
'feature_cols': feature_cols,
|
| 696 |
+
'd_model': d_model,
|
| 697 |
+
'scaler_mean': scaler.mean_,
|
| 698 |
+
'scaler_scale': scaler.scale_,
|
| 699 |
+
'metrics': {'auc': auc, 'ap': ap, 'f1': f1}
|
| 700 |
+
}, 'outputs/tabbert_model.pt')
|
| 701 |
+
|
| 702 |
+
os.makedirs("outputs", exist_ok=True)
|
| 703 |
+
|
| 704 |
+
baseline_auc = auc
|
| 705 |
+
importances = []
|
| 706 |
+
for i in range(len(feature_cols)):
|
| 707 |
+
X_perm = X_test.copy()
|
| 708 |
+
np.random.shuffle(X_perm[:, i])
|
| 709 |
+
X_perm_t = torch.tensor(X_perm, dtype=torch.float32).to(device)
|
| 710 |
+
with torch.no_grad():
|
| 711 |
+
perm_preds = torch.sigmoid(model(X_perm_t)).cpu().numpy()
|
| 712 |
+
perm_auc = roc_auc_score(y_test, perm_preds)
|
| 713 |
+
importances.append(baseline_auc - perm_auc)
|
| 714 |
+
|
| 715 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 716 |
+
colors = ['red' if imp > 0 else 'gray' for imp in importances]
|
| 717 |
+
ax.barh(feature_cols, importances, color=colors)
|
| 718 |
+
ax.set_title('TabularBERT - Feature Importance (Permutation)', fontweight='bold')
|
| 719 |
+
ax.set_xlabel('AUC Drop (Importance)')
|
| 720 |
+
plt.tight_layout()
|
| 721 |
+
fig_path1 = "outputs/tabbert_feature_importance.png"
|
| 722 |
+
plt.savefig(fig_path1, dpi=150); plt.close()
|
| 723 |
+
|
| 724 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 725 |
+
normal_scores = preds[y_test == 0]; anomaly_scores = preds[y_test == 1]
|
| 726 |
+
ax.hist(normal_scores, bins=30, alpha=0.6, label=f'Normal (n={len(normal_scores)})', color='steelblue', edgecolor='white')
|
| 727 |
+
ax.hist(anomaly_scores, bins=30, alpha=0.6, label=f'Anomaly (n={len(anomaly_scores)})', color='red', edgecolor='white')
|
| 728 |
+
ax.axvline(x=0.5, color='black', linestyle='--', label='Threshold=0.5')
|
| 729 |
+
ax.set_xlabel('Anomaly Score'); ax.set_ylabel('Count')
|
| 730 |
+
ax.set_title('Anomaly Score Distribution', fontweight='bold')
|
| 731 |
+
ax.legend(); ax.grid(True, alpha=0.3)
|
| 732 |
+
plt.tight_layout()
|
| 733 |
+
fig_path2 = "outputs/tabbert_distribution.png"
|
| 734 |
+
plt.savefig(fig_path2, dpi=150); plt.close()
|
| 735 |
+
|
| 736 |
+
fig, ax = plt.subplots(figsize=(8,6))
|
| 737 |
+
fpr, tpr, _ = roc_curve(y_test, preds)
|
| 738 |
+
ax.plot(fpr, tpr, label=f'TabBERT AUC={auc:.3f}', linewidth=2, color='#2E86AB')
|
| 739 |
+
ax.plot([0,1], [0,1], 'k--', alpha=0.5)
|
| 740 |
+
ax.set_xlabel('False Positive Rate'); ax.set_ylabel('True Positive Rate')
|
| 741 |
+
ax.set_title('ROC Curve - Anomaly Detection', fontweight='bold')
|
| 742 |
+
ax.legend(); ax.grid(True, alpha=0.3)
|
| 743 |
+
plt.tight_layout()
|
| 744 |
+
fig_path3 = "outputs/tabbert_roc.png"
|
| 745 |
+
plt.savefig(fig_path3, dpi=150); plt.close()
|
| 746 |
+
|
| 747 |
+
fig, axs = plt.subplots(1, 2, figsize=(14,6))
|
| 748 |
+
cm = confusion_matrix(y_test, preds > 0.5)
|
| 749 |
+
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=axs[0], cbar=False)
|
| 750 |
+
axs[0].set_title(f'Confusion Matrix @ threshold=0.5\n(F1={f1:.3f})', fontweight='bold')
|
| 751 |
+
axs[0].set_xlabel('Predicted'); axs[0].set_ylabel('Actual')
|
| 752 |
+
|
| 753 |
+
thresholds = np.linspace(0.1, 0.9, 50)
|
| 754 |
+
f1s = [f1_score(y_test, preds > t) for t in thresholds]
|
| 755 |
+
precs = [precision_score(y_test, preds > t, zero_division=0) for t in thresholds]
|
| 756 |
+
recs = [recall_score(y_test, preds > t, zero_division=0) for t in thresholds]
|
| 757 |
+
axs[1].plot(thresholds, f1s, label='F1', linewidth=2)
|
| 758 |
+
axs[1].plot(thresholds, precs, label='Precision', linewidth=2)
|
| 759 |
+
axs[1].plot(thresholds, recs, label='Recall', linewidth=2)
|
| 760 |
+
best_t = thresholds[np.argmax(f1s)]
|
| 761 |
+
axs[1].axvline(x=best_t, color='red', linestyle='--', label=f'Best F1 @ {best_t:.2f}')
|
| 762 |
+
axs[1].set_xlabel('Threshold'); axs[1].set_ylabel('Score')
|
| 763 |
+
axs[1].set_title('Threshold Analysis', fontweight='bold')
|
| 764 |
+
axs[1].legend(); axs[1].grid(True, alpha=0.3)
|
| 765 |
+
plt.tight_layout()
|
| 766 |
+
fig_path4 = "outputs/tabbert_threshold.png"
|
| 767 |
+
plt.savefig(fig_path4, dpi=150); plt.close()
|
| 768 |
+
|
| 769 |
+
result_text = f"""=== TabularBERT 异常行为检测模型 ===
|
| 770 |
+
样本数: {len(df)} (正常: {n_normal}, 异常: {n_anomaly})
|
| 771 |
+
特征数: {len(feature_cols)}
|
| 772 |
+
训练集: {len(y_train)} | 测试集: {len(y_test)}
|
| 773 |
+
|
| 774 |
+
--- 模型架构 ---
|
| 775 |
+
Input dim: {len(feature_cols)} → d_model: {d_model}
|
| 776 |
+
Transformer layers: {4} (模拟层次化BERT)
|
| 777 |
+
Head: {d_model} → 256 → 64 → 1
|
| 778 |
+
Loss: Focal Loss (α=0.25, γ=2.0)
|
| 779 |
+
|
| 780 |
+
--- 训练配置 ---
|
| 781 |
+
Epochs: {epochs} | Batch size: {batch_size} | LR: {lr}
|
| 782 |
+
Optimizer: Adam
|
| 783 |
+
|
| 784 |
+
--- 测试集效果 ---
|
| 785 |
+
AUC: {auc:.4f}
|
| 786 |
+
AP: {ap:.4f}
|
| 787 |
+
F1: {f1:.4f} @ threshold=0.5
|
| 788 |
+
Best F1: {max(f1s):.4f} @ threshold={best_t:.2f}
|
| 789 |
+
|
| 790 |
+
--- 模型洞察 ---
|
| 791 |
+
1. Focal Loss 自动聚焦难分异常样本, 解决类别不平衡
|
| 792 |
+
2. 关键异常特征: claim_amount(高), days_since_policy(短), document_count(少)
|
| 793 |
+
3. 建议阈值: {best_t:.2f} (平衡精确率与召回率)
|
| 794 |
+
4. 高AUC说明模型能很好区分正常与异常理赔
|
| 795 |
+
|
| 796 |
+
--- 模型文件 ---
|
| 797 |
+
模型已保存至: outputs/tabbert_model.pt
|
| 798 |
+
可使用"模型管理"Tab上传至Hugging Face Hub"""
|
| 799 |
+
|
| 800 |
+
return result_text, fig_path1, fig_path2, fig_path3, fig_path4
|
| 801 |
+
|
| 802 |
+
|
| 803 |
+
# =============================================================================
|
| 804 |
+
# 模型管理 — 保存/加载到 Hugging Face Hub
|
| 805 |
+
# =============================================================================
|
| 806 |
+
|
| 807 |
+
def save_model_to_hub(repo_id, token, model_type, notes):
|
| 808 |
+
"""将训练好的模型保存到 Hugging Face Hub"""
|
| 809 |
+
if not HFHUB_AVAILABLE:
|
| 810 |
+
return "❌ huggingface_hub 未安装。无法保存到 Hub。", None
|
| 811 |
+
|
| 812 |
+
if not token or not token.strip():
|
| 813 |
+
return "❌ 需要提供 Hugging Face Token。在 https://huggingface.co/settings/tokens 获取。", None
|
| 814 |
+
|
| 815 |
+
try:
|
| 816 |
+
api = HfApi(token=token.strip())
|
| 817 |
+
create_repo(repo_id, repo_type="model", exist_ok=True, token=token.strip())
|
| 818 |
+
|
| 819 |
+
with tempfile.TemporaryDirectory() as tmpdir:
|
| 820 |
+
tmpdir = Path(tmpdir)
|
| 821 |
+
|
| 822 |
+
# 收集所有模型文件
|
| 823 |
+
model_files = []
|
| 824 |
+
artifacts = {}
|
| 825 |
+
|
| 826 |
+
# 检查 sklearn 模型
|
| 827 |
+
sklearn_path = Path("outputs/sklearn_model_artifacts.joblib")
|
| 828 |
+
if sklearn_path.exists():
|
| 829 |
+
artifacts['sklearn'] = joblib.load(sklearn_path)
|
| 830 |
+
joblib.dump(artifacts['sklearn'], tmpdir / "sklearn_model.joblib")
|
| 831 |
+
model_files.append("sklearn_model.joblib")
|
| 832 |
+
|
| 833 |
+
# 检查 DIN 模型
|
| 834 |
+
din_path = Path("outputs/din_model.pt")
|
| 835 |
+
if din_path.exists():
|
| 836 |
+
artifacts['din'] = torch.load(din_path, map_location='cpu')
|
| 837 |
+
torch.save(artifacts['din'], tmpdir / "din_model.pt")
|
| 838 |
+
model_files.append("din_model.pt")
|
| 839 |
+
|
| 840 |
+
# 检查 TabBERT 模型
|
| 841 |
+
tab_path = Path("outputs/tabbert_model.pt")
|
| 842 |
+
if tab_path.exists():
|
| 843 |
+
artifacts['tabbert'] = torch.load(tab_path, map_location='cpu')
|
| 844 |
+
torch.save(artifacts['tabbert'], tmpdir / "tabbert_model.pt")
|
| 845 |
+
model_files.append("tabbert_model.pt")
|
| 846 |
+
|
| 847 |
+
if not model_files:
|
| 848 |
+
return "❌ 未找到训练好的模型。请先在其他Tab训练模型。", None
|
| 849 |
+
|
| 850 |
+
# 保存元数据
|
| 851 |
+
metadata = {
|
| 852 |
+
"model_type": model_type,
|
| 853 |
+
"notes": notes,
|
| 854 |
+
"files": model_files,
|
| 855 |
+
"timestamp": datetime.datetime.now().isoformat(),
|
| 856 |
+
"insurance_app_behavior": True,
|
| 857 |
+
"version": "3.0"
|
| 858 |
+
}
|
| 859 |
+
with open(tmpdir / "model_metadata.json", "w") as f:
|
| 860 |
+
json.dump(metadata, f, indent=2, ensure_ascii=False)
|
| 861 |
+
|
| 862 |
+
# 保存 README
|
| 863 |
+
readme = f"""# Insurance App Behavior Model
|
| 864 |
+
|
| 865 |
+
**Model Type:** {model_type}
|
| 866 |
+
**Notes:** {notes}
|
| 867 |
+
**Date:** {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
|
| 868 |
+
|
| 869 |
+
## Files
|
| 870 |
+
|
| 871 |
+
| File | Description |
|
| 872 |
+
|------|-------------|
|
| 873 |
+
| `sklearn_model.joblib` | GBDT + Random Forest + Scaler (sklearn) |
|
| 874 |
+
| `din_model.pt` | Deep Interest Network (PyTorch) |
|
| 875 |
+
| `tabbert_model.pt` | TabularBERT Anomaly Detection (PyTorch) |
|
| 876 |
+
| `model_metadata.json` | Model metadata |
|
| 877 |
+
|
| 878 |
+
## Usage
|
| 879 |
+
|
| 880 |
+
```python
|
| 881 |
+
from huggingface_hub import hf_hub_download
|
| 882 |
+
import joblib
|
| 883 |
+
import torch
|
| 884 |
+
|
| 885 |
+
# Load sklearn models
|
| 886 |
+
model_path = hf_hub_download(repo_id="{repo_id}", filename="sklearn_model.joblib")
|
| 887 |
+
artifacts = joblib.load(model_path)
|
| 888 |
+
# artifacts['gbdt'], artifacts['rf'], artifacts['scaler']
|
| 889 |
+
|
| 890 |
+
# Load DIN
|
| 891 |
+
din_path = hf_hub_download(repo_id="{repo_id}", filename="din_model.pt")
|
| 892 |
+
din_ckpt = torch.load(din_path)
|
| 893 |
+
# din_ckpt['model_state_dict'], din_ckpt['event_vocab'], din_ckpt['product_vocab']
|
| 894 |
+
```
|
| 895 |
+
|
| 896 |
+
## Reference
|
| 897 |
+
|
| 898 |
+
- Deep Interest Network (KDD 2018): https://arxiv.org/abs/1706.06978
|
| 899 |
+
- TabBERT (arXiv 2011.01843): https://arxiv.org/abs/2011.01843
|
| 900 |
+
"""
|
| 901 |
+
with open(tmpdir / "README.md", "w") as f:
|
| 902 |
+
f.write(readme)
|
| 903 |
+
|
| 904 |
+
api.upload_folder(
|
| 905 |
+
folder_path=str(tmpdir),
|
| 906 |
+
repo_id=repo_id,
|
| 907 |
+
repo_type="model",
|
| 908 |
+
token=token.strip()
|
| 909 |
+
)
|
| 910 |
+
|
| 911 |
+
return f"✅ 模型已成功保存到 https://huggingface.co/{repo_id}", None
|
| 912 |
+
|
| 913 |
+
except Exception as e:
|
| 914 |
+
import traceback
|
| 915 |
+
return f"❌ 保存失败: {str(e)}\n\n{traceback.format_exc()}", None
|
| 916 |
+
|
| 917 |
+
|
| 918 |
+
def load_model_from_hub(repo_id, token, model_type):
|
| 919 |
+
"""从 Hugging Face Hub 加载模型"""
|
| 920 |
+
if not HFHUB_AVAILABLE:
|
| 921 |
+
return "❌ huggingface_hub 未安装。无法从 Hub 加载。", None, None, None
|
| 922 |
+
|
| 923 |
+
if not token or not token.strip():
|
| 924 |
+
return "❌ 需要提供 Hugging Face Token。", None, None, None
|
| 925 |
+
|
| 926 |
+
try:
|
| 927 |
+
token = token.strip()
|
| 928 |
+
|
| 929 |
+
# 尝试下载元数据
|
| 930 |
+
metadata_path = hf_hub_download(repo_id=repo_id, filename="model_metadata.json", token=token, repo_type="model")
|
| 931 |
+
with open(metadata_path) as f:
|
| 932 |
+
metadata = json.load(f)
|
| 933 |
+
|
| 934 |
+
results = [f"✅ 成功加载模型: {repo_id}", f"模型类型: {metadata.get('model_type', 'Unknown')}",
|
| 935 |
+
f"备注: {metadata.get('notes', 'N/A')}", f"时间: {metadata.get('timestamp', 'N/A')}",
|
| 936 |
+
f"文件列表: {', '.join(metadata.get('files', []))}", "---"]
|
| 937 |
+
|
| 938 |
+
images = []
|
| 939 |
+
|
| 940 |
+
# 加载 sklearn 模型
|
| 941 |
+
if "sklearn_model.joblib" in metadata.get('files', []):
|
| 942 |
+
sklearn_path = hf_hub_download(repo_id=repo_id, filename="sklearn_model.joblib", token=token, repo_type="model")
|
| 943 |
+
artifacts = joblib.load(sklearn_path)
|
| 944 |
+
metrics = artifacts.get('metrics', {})
|
| 945 |
+
results.append(f"📦 sklearn 模型已加载")
|
| 946 |
+
results.append(f" GBDT AUC: {metrics.get('auc_gbdt', 'N/A')}")
|
| 947 |
+
results.append(f" RF AUC: {metrics.get('auc_rf', 'N/A')}")
|
| 948 |
+
results.append(f" 特征数: {len(artifacts.get('feature_names', []))}")
|
| 949 |
+
|
| 950 |
+
# 特征重要性图
|
| 951 |
+
if 'rf' in artifacts:
|
| 952 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 953 |
+
fi = pd.DataFrame({'feature': artifacts['feature_names'], 'importance': artifacts['rf'].feature_importances_})
|
| 954 |
+
fi = fi.sort_values('importance', ascending=False).head(10)
|
| 955 |
+
ax.barh(fi['feature'][::-1], fi['importance'][::-1], color='steelblue')
|
| 956 |
+
ax.set_title('Loaded Model - Feature Importance', fontweight='bold')
|
| 957 |
+
plt.tight_layout()
|
| 958 |
+
img_path = "outputs/loaded_feature_importance.png"
|
| 959 |
+
plt.savefig(img_path, dpi=150); plt.close()
|
| 960 |
+
images.append(img_path)
|
| 961 |
+
|
| 962 |
+
# 加载 DIN
|
| 963 |
+
if "din_model.pt" in metadata.get('files', []):
|
| 964 |
+
din_path = hf_hub_download(repo_id=repo_id, filename="din_model.pt", token=token, repo_type="model")
|
| 965 |
+
din_ckpt = torch.load(din_path, map_location='cpu')
|
| 966 |
+
metrics = din_ckpt.get('metrics', {})
|
| 967 |
+
results.append(f"📦 DIN 模型已加载")
|
| 968 |
+
results.append(f" AUC: {metrics.get('auc', 'N/A')}")
|
| 969 |
+
results.append(f" Embedding dim: {din_ckpt.get('embedding_dim', 'N/A')}")
|
| 970 |
+
results.append(f" Event vocab: {len(din_ckpt.get('event_vocab', {}))}")
|
| 971 |
+
results.append(f" Product vocab: {len(din_ckpt.get('product_vocab', {}))}")
|
| 972 |
+
|
| 973 |
+
# 加载 TabBERT
|
| 974 |
+
if "tabbert_model.pt" in metadata.get('files', []):
|
| 975 |
+
tab_path = hf_hub_download(repo_id=repo_id, filename="tabbert_model.pt", token=token, repo_type="model")
|
| 976 |
+
tab_ckpt = torch.load(tab_path, map_location='cpu')
|
| 977 |
+
metrics = tab_ckpt.get('metrics', {})
|
| 978 |
+
results.append(f"📦 TabBERT 模型已加载")
|
| 979 |
+
results.append(f" AUC: {metrics.get('auc', 'N/A')}")
|
| 980 |
+
results.append(f" d_model: {tab_ckpt.get('d_model', 'N/A')}")
|
| 981 |
+
results.append(f" 特征: {', '.join(tab_ckpt.get('feature_cols', []))}")
|
| 982 |
+
|
| 983 |
+
return "\n".join(results), images[0] if images else None, images[1] if len(images) > 1 else None, images[2] if len(images) > 2 else None
|
| 984 |
+
|
| 985 |
+
except Exception as e:
|
| 986 |
+
import traceback
|
| 987 |
+
return f"❌ 加载失败: {str(e)}\n\n{traceback.format_exc()}", None, None, None
|
| 988 |
+
|
| 989 |
+
|
| 990 |
+
# =============================================================================
|
| 991 |
+
# 生存分析 — lifelines + DeepSurv
|
| 992 |
+
# =============================================================================
|
| 993 |
+
|
| 994 |
+
def generate_survival_data(n_samples=2000, seed=42):
|
| 995 |
+
"""生成保险生存分析合成数据"""
|
| 996 |
+
random.seed(seed); np.random.seed(seed)
|
| 997 |
+
|
| 998 |
+
records = []
|
| 999 |
+
for i in range(n_samples):
|
| 1000 |
+
age = random.randint(18, 75)
|
| 1001 |
+
gender = random.choice([0, 1]) # 0=female, 1=male
|
| 1002 |
+
income = random.uniform(30000, 200000)
|
| 1003 |
+
policy_type = random.choice(["term_life", "whole_life", "health", "auto", "property"])
|
| 1004 |
+
premium_amount = random.uniform(1000, 50000)
|
| 1005 |
+
coverage_amount = premium_amount * random.uniform(10, 100)
|
| 1006 |
+
risk_score = random.uniform(0, 1)
|
| 1007 |
+
|
| 1008 |
+
# 根据特征计算基础风险
|
| 1009 |
+
base_hazard = (
|
| 1010 |
+
0.001 * (age - 18) + # 年龄越大风险越高
|
| 1011 |
+
0.05 * gender + # 性别差异
|
| 1012 |
+
0.00001 * (200000 - income) + # 收入越低风险越高
|
| 1013 |
+
0.1 * risk_score + # 风险评分
|
| 1014 |
+
random.gauss(0, 0.05) # 噪声
|
| 1015 |
+
)
|
| 1016 |
+
|
| 1017 |
+
# 保单类型调整
|
| 1018 |
+
policy_hazard = {"term_life": 0.02, "whole_life": 0.01, "health": 0.05,
|
| 1019 |
+
"auto": 0.03, "property": 0.01}[policy_type]
|
| 1020 |
+
|
| 1021 |
+
total_hazard = base_hazard + policy_hazard
|
| 1022 |
+
total_hazard = max(total_hazard, 0.001) # 最小风险
|
| 1023 |
+
|
| 1024 |
+
# 指数分布: time ~ Exp(lambda)
|
| 1025 |
+
time_to_event = random.expovariate(total_hazard)
|
| 1026 |
+
|
| 1027 |
+
# 右删失: 最大观察时间 3650天 (10年)
|
| 1028 |
+
max_observation = 3650
|
| 1029 |
+
event_observed = 1 if time_to_event < max_observation else 0
|
| 1030 |
+
duration = min(time_to_event, max_observation)
|
| 1031 |
+
|
| 1032 |
+
records.append({
|
| 1033 |
+
'user_id': f"user_{i:04d}",
|
| 1034 |
+
'age': age,
|
| 1035 |
+
'gender': gender,
|
| 1036 |
+
'income': income,
|
| 1037 |
+
'policy_type': policy_type,
|
| 1038 |
+
'premium_amount': premium_amount,
|
| 1039 |
+
'coverage_amount': coverage_amount,
|
| 1040 |
+
'risk_score': risk_score,
|
| 1041 |
+
'duration': duration,
|
| 1042 |
+
'event_observed': event_observed,
|
| 1043 |
+
})
|
| 1044 |
+
|
| 1045 |
+
return pd.DataFrame(records)
|
| 1046 |
+
|
| 1047 |
+
|
| 1048 |
+
def train_survival_analysis(n_samples, test_size, seed, use_deep_surv, epochs, lr):
|
| 1049 |
+
"""训练生存分析模型"""
|
| 1050 |
+
df = generate_survival_data(n_samples=n_samples, seed=seed)
|
| 1051 |
+
|
| 1052 |
+
# 编码分类变量
|
| 1053 |
+
df['policy_type_enc'] = pd.Categorical(df['policy_type']).codes
|
| 1054 |
+
|
| 1055 |
+
# 特征列
|
| 1056 |
+
feature_cols = ['age', 'gender', 'income', 'policy_type_enc',
|
| 1057 |
+
'premium_amount', 'coverage_amount', 'risk_score']
|
| 1058 |
+
|
| 1059 |
+
# 划分训练/测试
|
| 1060 |
+
train_df = df.sample(frac=1-test_size, random_state=seed)
|
| 1061 |
+
test_df = df.drop(train_df.index)
|
| 1062 |
+
|
| 1063 |
+
os.makedirs("outputs", exist_ok=True)
|
| 1064 |
+
|
| 1065 |
+
# ===== 1. lifelines Cox-PH =====
|
| 1066 |
+
results = ["=== 保险理赔/购买时序生存分析 ===", f"总样本: {len(df)} | 训练: {len(train_df)} | 测试: {len(test_df)}",
|
| 1067 |
+
f"事件发生率: {df['event_observed'].mean():.1%} ({df['event_observed'].sum()}/{len(df)})",
|
| 1068 |
+
f"平均观察时长: {df['duration'].mean():.0f} 天", "---"]
|
| 1069 |
+
|
| 1070 |
+
cph_figures = []
|
| 1071 |
+
|
| 1072 |
+
if LIFELINES_AVAILABLE:
|
| 1073 |
+
# Kaplan-Meier 曲线
|
| 1074 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 1075 |
+
kmf = KaplanMeierFitter()
|
| 1076 |
+
|
| 1077 |
+
# 整体
|
| 1078 |
+
kmf.fit(df['duration'], df['event_observed'], label='Overall')
|
| 1079 |
+
kmf.plot_survival_function(ax=ax, ci_show=True, color='steelblue', linewidth=2)
|
| 1080 |
+
|
| 1081 |
+
# 按性别分组
|
| 1082 |
+
for gender, color in [(0, '#E74C3C'), (1, '#2ECC71')]:
|
| 1083 |
+
sub = df[df['gender'] == gender]
|
| 1084 |
+
kmf.fit(sub['duration'], sub['event_observed'], label=f'{"Female" if gender==0 else "Male"}')
|
| 1085 |
+
kmf.plot_survival_function(ax=ax, ci_show=False, color=color, linestyle='--', linewidth=2)
|
| 1086 |
+
|
| 1087 |
+
ax.set_title('Kaplan-Meier Survival Curve', fontsize=14, fontweight='bold')
|
| 1088 |
+
ax.set_xlabel('Duration (days)', fontsize=12)
|
| 1089 |
+
ax.set_ylabel('Survival Probability S(t)', fontsize=12)
|
| 1090 |
+
ax.legend(fontsize=11); ax.grid(True, alpha=0.3)
|
| 1091 |
+
plt.tight_layout()
|
| 1092 |
+
km_path = "outputs/survival_kaplan_meier.png"
|
| 1093 |
+
plt.savefig(km_path, dpi=150); plt.close()
|
| 1094 |
+
cph_figures.append(km_path)
|
| 1095 |
+
|
| 1096 |
+
# Cox-PH 模型
|
| 1097 |
+
cph = CoxPHFitter(penalizer=0.1)
|
| 1098 |
+
cph_train = train_df[feature_cols + ['duration', 'event_observed']].copy()
|
| 1099 |
+
|
| 1100 |
+
try:
|
| 1101 |
+
cph.fit(cph_train, duration_col='duration', event_col='event_observed')
|
| 1102 |
+
|
| 1103 |
+
# 系数可视化
|
| 1104 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 1105 |
+
summary = cph.summary.copy()
|
| 1106 |
+
summary['coef'] = summary['coef'].astype(float)
|
| 1107 |
+
summary['exp(coef)'] = summary['exp(coef)'].astype(float)
|
| 1108 |
+
summary = summary.sort_values('coef')
|
| 1109 |
+
|
| 1110 |
+
colors = ['green' if c < 0 else 'red' for c in summary['coef']]
|
| 1111 |
+
ax.barh(summary.index, summary['coef'], color=colors, alpha=0.7, edgecolor='white')
|
| 1112 |
+
ax.axvline(x=0, color='black', linestyle='-', linewidth=0.5)
|
| 1113 |
+
ax.set_title('Cox-PH Coefficients (log Hazard Ratio)', fontsize=14, fontweight='bold')
|
| 1114 |
+
ax.set_xlabel('Coefficient')
|
| 1115 |
+
plt.tight_layout()
|
| 1116 |
+
coef_path = "outputs/survival_cox_coefficients.png"
|
| 1117 |
+
plt.savefig(coef_path, dpi=150); plt.close()
|
| 1118 |
+
cph_figures.append(coef_path)
|
| 1119 |
+
|
| 1120 |
+
# 预测生存函数 (测试集前5个样本)
|
| 1121 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 1122 |
+
test_subset = test_df.head(5)
|
| 1123 |
+
predictions = cph.predict_survival_function(test_subset[feature_cols])
|
| 1124 |
+
for i, col in enumerate(predictions.columns):
|
| 1125 |
+
ax.plot(predictions.index, predictions[col], label=f'Sample {i+1}', linewidth=2, alpha=0.8)
|
| 1126 |
+
ax.set_title('Predicted Survival Functions (Test Samples)', fontsize=14, fontweight='bold')
|
| 1127 |
+
ax.set_xlabel('Duration (days)', fontsize=12)
|
| 1128 |
+
ax.set_ylabel('Survival Probability', fontsize=12)
|
| 1129 |
+
ax.legend(fontsize=10); ax.grid(True, alpha=0.3)
|
| 1130 |
+
plt.tight_layout()
|
| 1131 |
+
pred_path = "outputs/survival_predictions.png"
|
| 1132 |
+
plt.savefig(pred_path, dpi=150); plt.close()
|
| 1133 |
+
cph_figures.append(pred_path)
|
| 1134 |
+
|
| 1135 |
+
# Concordance Index
|
| 1136 |
+
from lifelines.utils import concordance_index
|
| 1137 |
+
pred_risk = cph.predict_partial_hazard(test_df[feature_cols])
|
| 1138 |
+
c_index = concordance_index(test_df['duration'], -pred_risk, test_df['event_observed'])
|
| 1139 |
+
|
| 1140 |
+
results.append("--- lifelines Cox-PH ---")
|
| 1141 |
+
results.append(f"Concordance Index: {c_index:.4f}")
|
| 1142 |
+
results.append(f"Log-likelihood: {cph.log_likelihood_:.2f}")
|
| 1143 |
+
results.append(f"AIC: {cph.AIC_partial_:.2f}")
|
| 1144 |
+
results.append("")
|
| 1145 |
+
results.append("--- Cox-PH 系数 (Top 影响因子) ---")
|
| 1146 |
+
for idx, row in cph.summary.head(7).iterrows():
|
| 1147 |
+
hr = float(row['exp(coef)'])
|
| 1148 |
+
results.append(f" {idx}: HR={hr:.3f} (p={row['p']:.4f})")
|
| 1149 |
+
|
| 1150 |
+
results.append("")
|
| 1151 |
+
results.append("HR > 1: 风险增加 | HR < 1: 风险降低")
|
| 1152 |
+
|
| 1153 |
+
except Exception as e:
|
| 1154 |
+
results.append(f"⚠️ Cox-PH 拟合失败: {str(e)}")
|
| 1155 |
+
else:
|
| 1156 |
+
results.append("⚠️ lifelines 未安装。统计生存分析功能禁用。")
|
| 1157 |
+
|
| 1158 |
+
# ===== 2. DeepSurv (PyTorch) =====
|
| 1159 |
+
deep_surv_result = ""
|
| 1160 |
+
if use_deep_surv and TORCH_AVAILABLE:
|
| 1161 |
+
results.append("--- DeepSurv (Neural Cox-PH) ---")
|
| 1162 |
+
|
| 1163 |
+
X_train = train_df[feature_cols].values.astype(np.float32)
|
| 1164 |
+
X_test = test_df[feature_cols].values.astype(np.float32)
|
| 1165 |
+
|
| 1166 |
+
scaler = StandardScaler()
|
| 1167 |
+
X_train_s = scaler.fit_transform(X_train)
|
| 1168 |
+
X_test_s = scaler.transform(X_test)
|
| 1169 |
+
|
| 1170 |
+
T_train = train_df['duration'].values.astype(np.float32)
|
| 1171 |
+
E_train = train_df['event_observed'].values.astype(np.float32)
|
| 1172 |
+
T_test = test_df['duration'].values.astype(np.float32)
|
| 1173 |
+
E_test = test_df['event_observed'].values.astype(np.float32)
|
| 1174 |
+
|
| 1175 |
+
device = torch.device('cpu')
|
| 1176 |
+
|
| 1177 |
+
class DeepSurv(nn.Module):
|
| 1178 |
+
def __init__(self, input_dim, hidden_dims=[128, 64, 32], dropout=0.3):
|
| 1179 |
+
super().__init__()
|
| 1180 |
+
layers = []
|
| 1181 |
+
prev = input_dim
|
| 1182 |
+
for h in hidden_dims:
|
| 1183 |
+
layers.extend([nn.Linear(prev, h), nn.ReLU(), nn.Dropout(dropout)])
|
| 1184 |
+
prev = h
|
| 1185 |
+
layers.append(nn.Linear(prev, 1))
|
| 1186 |
+
self.net = nn.Sequential(*layers)
|
| 1187 |
+
|
| 1188 |
+
def forward(self, x):
|
| 1189 |
+
return self.net(x).squeeze(-1)
|
| 1190 |
+
|
| 1191 |
+
model = DeepSurv(input_dim=len(feature_cols), hidden_dims=[128, 64, 32]).to(device)
|
| 1192 |
+
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
|
| 1193 |
+
|
| 1194 |
+
# Cox partial likelihood loss
|
| 1195 |
+
def cox_ph_loss(pred, time, event):
|
| 1196 |
+
"""Negative Cox partial likelihood"""
|
| 1197 |
+
# Sort by time descending
|
| 1198 |
+
idx = torch.argsort(time, descending=True)
|
| 1199 |
+
pred_sorted = pred[idx]
|
| 1200 |
+
event_sorted = event[idx]
|
| 1201 |
+
|
| 1202 |
+
# logcumsumexp for numerical stability
|
| 1203 |
+
log_cumsum_h = torch.logcumsumexp(pred_sorted, dim=0)
|
| 1204 |
+
|
| 1205 |
+
# Only event samples contribute
|
| 1206 |
+
loss = -torch.sum(event_sorted * (pred_sorted - log_cumsum_h)) / event_sorted.sum().clamp(min=1)
|
| 1207 |
+
return loss
|
| 1208 |
+
|
| 1209 |
+
X_train_t = torch.tensor(X_train_s, dtype=torch.float32).to(device)
|
| 1210 |
+
T_train_t = torch.tensor(T_train, dtype=torch.float32).to(device)
|
| 1211 |
+
E_train_t = torch.tensor(E_train, dtype=torch.float32).to(device)
|
| 1212 |
+
|
| 1213 |
+
# Training
|
| 1214 |
+
model.train()
|
| 1215 |
+
for epoch in range(epochs):
|
| 1216 |
+
optimizer.zero_grad()
|
| 1217 |
+
pred = model(X_train_t)
|
| 1218 |
+
loss = cox_ph_loss(pred, T_train_t, E_train_t)
|
| 1219 |
+
loss.backward()
|
| 1220 |
+
optimizer.step()
|
| 1221 |
+
|
| 1222 |
+
if (epoch+1) % max(1, epochs//5) == 0 or epoch == 0:
|
| 1223 |
+
print(f"DeepSurv Epoch {epoch+1}/{epochs}, Loss: {loss.item():.4f}")
|
| 1224 |
+
|
| 1225 |
+
# Evaluation
|
| 1226 |
+
model.eval()
|
| 1227 |
+
with torch.no_grad():
|
| 1228 |
+
X_test_t = torch.tensor(X_test_s, dtype=torch.float32).to(device)
|
| 1229 |
+
pred_test = model(X_test_t).cpu().numpy()
|
| 1230 |
+
|
| 1231 |
+
# Concordance Index
|
| 1232 |
+
from lifelines.utils import concordance_index
|
| 1233 |
+
deep_c_index = concordance_index(T_test, -pred_test, E_test)
|
| 1234 |
+
|
| 1235 |
+
results.append(f"Concordance Index: {deep_c_index:.4f}")
|
| 1236 |
+
results.append(f"Training epochs: {epochs} | LR: {lr}")
|
| 1237 |
+
results.append("")
|
| 1238 |
+
results.append("--- DeepSurv 洞察 ---")
|
| 1239 |
+
results.append("1. 神经网络学习非线性特征交互, 捕捉复杂风险模式")
|
| 1240 |
+
results.append("2. 相比线性Cox-PH, 能建模年龄×收入×风险评分的组合效应")
|
| 1241 |
+
results.append("3. 输出log hazard ratio: 正值=高风险, 负值=低风险")
|
| 1242 |
+
|
| 1243 |
+
# 保存模型
|
| 1244 |
+
torch.save({
|
| 1245 |
+
'model_state_dict': model.state_dict(),
|
| 1246 |
+
'feature_cols': feature_cols,
|
| 1247 |
+
'hidden_dims': [128, 64, 32],
|
| 1248 |
+
'scaler_mean': scaler.mean_,
|
| 1249 |
+
'scaler_scale': scaler.scale_,
|
| 1250 |
+
'metrics': {'concordance_index': deep_c_index}
|
| 1251 |
+
}, 'outputs/deepsurv_model.pt')
|
| 1252 |
+
|
| 1253 |
+
# 风险分层可视化
|
| 1254 |
+
fig, ax = plt.subplots(figsize=(10,6))
|
| 1255 |
+
risk_scores = pred_test
|
| 1256 |
+
risk_percentiles = np.percentile(risk_scores, [33, 66])
|
| 1257 |
+
|
| 1258 |
+
low_risk = test_df[risk_scores < risk_percentiles[0]]
|
| 1259 |
+
mid_risk = test_df[(risk_scores >= risk_percentiles[0]) & (risk_scores < risk_percentiles[1])]
|
| 1260 |
+
high_risk = test_df[risk_scores >= risk_percentiles[1]]
|
| 1261 |
+
|
| 1262 |
+
colors = ['#2ECC71', '#F39C12', '#E74C3C']
|
| 1263 |
+
labels = ['Low Risk (bottom 33%)', 'Medium Risk (33-66%)', 'High Risk (top 33%)']
|
| 1264 |
+
|
| 1265 |
+
for subset, color, label in [(low_risk, colors[0], labels[0]),
|
| 1266 |
+
(mid_risk, colors[1], labels[1]),
|
| 1267 |
+
(high_risk, colors[2], labels[2])]:
|
| 1268 |
+
if len(subset) > 0:
|
| 1269 |
+
kmf = KaplanMeierFitter()
|
| 1270 |
+
kmf.fit(subset['duration'], subset['event_observed'], label=label)
|
| 1271 |
+
kmf.plot_survival_function(ax=ax, ci_show=False, color=color, linewidth=2.5)
|
| 1272 |
+
|
| 1273 |
+
ax.set_title('Survival by DeepSurv Risk Strata', fontsize=14, fontweight='bold')
|
| 1274 |
+
ax.set_xlabel('Duration (days)', fontsize=12)
|
| 1275 |
+
ax.set_ylabel('Survival Probability', fontsize=12)
|
| 1276 |
+
ax.legend(fontsize=11); ax.grid(True, alpha=0.3)
|
| 1277 |
+
plt.tight_layout()
|
| 1278 |
+
risk_path = "outputs/survival_risk_strata.png"
|
| 1279 |
+
plt.savefig(risk_path, dpi=150); plt.close()
|
| 1280 |
+
cph_figures.append(risk_path)
|
| 1281 |
+
|
| 1282 |
+
deep_surv_result = f"DeepSurv C-index: {deep_c_index:.4f}"
|
| 1283 |
+
|
| 1284 |
+
elif use_deep_surv and not TORCH_AVAILABLE:
|
| 1285 |
+
results.append("⚠️ PyTorch 未安装。DeepSurv 禁用。")
|
| 1286 |
+
|
| 1287 |
+
# 保存 lifelines 结果
|
| 1288 |
+
results.append("---")
|
| 1289 |
+
results.append(f"所有图表已保存到 outputs/ 目录")
|
| 1290 |
+
results.append(f"模型已保存至: outputs/deepsurv_model.pt (如使用DeepSurv)")
|
| 1291 |
+
|
| 1292 |
+
result_text = "\n".join(results)
|
| 1293 |
+
|
| 1294 |
+
return result_text, cph_figures[0] if len(cph_figures) > 0 else None, \
|
| 1295 |
+
cph_figures[1] if len(cph_figures) > 1 else None, \
|
| 1296 |
+
cph_figures[2] if len(cph_figures) > 2 else None, \
|
| 1297 |
+
cph_figures[3] if len(cph_figures) > 3 else None, \
|
| 1298 |
+
df.head(20)
|
| 1299 |
+
|
| 1300 |
+
|
| 1301 |
+
# =============================================================================
|
| 1302 |
+
# Gradio 回调
|
| 1303 |
+
# =============================================================================
|
| 1304 |
+
|
| 1305 |
+
def demo_train(n_users, n_events, test_size, random_state, use_cv):
|
| 1306 |
+
data = generate_synthetic_data(n_users=n_users, n_events_per_user=n_events, seed=random_state)
|
| 1307 |
+
engineer = InsuranceFeatureEngineer()
|
| 1308 |
+
features_list, labels = [], []
|
| 1309 |
+
for profile, label in data:
|
| 1310 |
+
f = engineer.extract_user_features(profile)
|
| 1311 |
+
if f: features_list.append(f); labels.append(label)
|
| 1312 |
+
return train_sklearn(features_list, labels, test_size, random_state, use_cv)
|
| 1313 |
+
|
| 1314 |
+
|
| 1315 |
+
def csv_train(csv_file, label_col, test_size, random_state, use_cv):
|
| 1316 |
+
if csv_file is None:
|
| 1317 |
+
return "请先上传CSV文件", None, None, None, None, None
|
| 1318 |
+
try:
|
| 1319 |
+
if isinstance(csv_file, str):
|
| 1320 |
+
df = pd.read_csv(csv_file)
|
| 1321 |
+
else:
|
| 1322 |
+
df = pd.read_csv(csv_file.name if hasattr(csv_file, 'name') else io.BytesIO(csv_file))
|
| 1323 |
+
label_col = label_col.strip() if label_col else None
|
| 1324 |
+
if label_col and label_col not in df.columns:
|
| 1325 |
+
return f"标签列 '{label_col}' 不存在。可用列: {list(df.columns)}", None, None, None, None, None
|
| 1326 |
+
profiles = parse_csv_to_profiles(df)
|
| 1327 |
+
engineer = InsuranceFeatureEngineer()
|
| 1328 |
+
features_list, labels = [], []
|
| 1329 |
+
for profile in profiles:
|
| 1330 |
+
f = engineer.extract_user_features(profile)
|
| 1331 |
+
if f:
|
| 1332 |
+
features_list.append(f)
|
| 1333 |
+
if label_col and label_col in df.columns:
|
| 1334 |
+
user_df = df[df["user_id"] == profile.user_id]
|
| 1335 |
+
labels.append(int(user_df[label_col].iloc[0]))
|
| 1336 |
+
else:
|
| 1337 |
+
is_high_risk = (f["has_purchased"] == 0 and f["has_renewed"] == 0 and f["total_events"] < 20)
|
| 1338 |
+
labels.append(int(is_high_risk))
|
| 1339 |
+
if len(features_list) < 50:
|
| 1340 |
+
return f"有效样本数 {len(features_list)} 太少,需要至少50个", None, None, None, None, None
|
| 1341 |
+
return train_sklearn(features_list, labels, test_size, random_state, use_cv)
|
| 1342 |
+
except Exception as e:
|
| 1343 |
+
import traceback
|
| 1344 |
+
return f"错误: {str(e)}\n\n{traceback.format_exc()}", None, None, None, None, None
|
| 1345 |
+
|
| 1346 |
+
|
| 1347 |
+
def show_csv_info(csv_file):
|
| 1348 |
+
if csv_file is None:
|
| 1349 |
+
return "请先上传CSV文件", None
|
| 1350 |
+
try:
|
| 1351 |
+
if isinstance(csv_file, str):
|
| 1352 |
+
df = pd.read_csv(csv_file)
|
| 1353 |
+
else:
|
| 1354 |
+
df = pd.read_csv(csv_file.name if hasattr(csv_file, 'name') else io.BytesIO(csv_file))
|
| 1355 |
+
info = f"""=== CSV文件信息 ===
|
| 1356 |
+
行数: {len(df)} | 列数: {len(df.columns)}
|
| 1357 |
+
列名: {list(df.columns)}
|
| 1358 |
+
|
| 1359 |
+
=== 前5行 ===
|
| 1360 |
+
{df.head().to_string()}
|
| 1361 |
+
|
| 1362 |
+
=== 事件类型分布 (前10) ===
|
| 1363 |
+
{df['event_type'].value_counts().head(10).to_string() if 'event_type' in df.columns else '无event_type列'}
|
| 1364 |
+
|
| 1365 |
+
=== 用户数: {df['user_id'].nunique() if 'user_id' in df.columns else 'N/A'} ===
|
| 1366 |
+
=== 会话数: {df['session_id'].nunique() if 'session_id' in df.columns else 'N/A'} ==="""
|
| 1367 |
+
return info, df.head(20)
|
| 1368 |
+
except Exception as e:
|
| 1369 |
+
return f"解析错误: {str(e)}", None
|
| 1370 |
+
|
| 1371 |
+
|
| 1372 |
+
# =============================================================================
|
| 1373 |
+
# Gradio 界面 (7 Tabs)
|
| 1374 |
+
# =============================================================================
|
| 1375 |
+
|
| 1376 |
+
with gr.Blocks(title="🏥 保险APP 用户行为分析模型训练平台 v3.0", theme=gr.themes.Soft()) as demo:
|
| 1377 |
+
gr.Markdown("""# 🏥 保险APP 用户行为分析模型训练平台 v3.0
|
| 1378 |
+
|
| 1379 |
+
基于最新研究论文构建的**工业级保险用户行为分析平台**。
|
| 1380 |
+
|
| 1381 |
+
**七大功能模块:** 🎲演示 | 📁CSV上传 | 🎯产品推荐(DIN) | 🔍异常检测(TabBERT) | 💾模型管理 | ⏱️生存分析 | ❓帮助
|
| 1382 |
+
|
| 1383 |
+
**参考论文:** [DIN](https://arxiv.org/abs/1706.06978) | [Churn Transformer](https://arxiv.org/abs/2309.14390) | [TabBERT](https://arxiv.org/abs/2011.01843) | [DeepSurv](https://arxiv.org/abs/1606.00931) | [RNN Survival](https://arxiv.org/abs/2304.00575)""")
|
| 1384 |
+
|
| 1385 |
+
with gr.Tabs():
|
| 1386 |
+
# ===== Tab 1: 演示模式 =====
|
| 1387 |
+
with gr.Tab("🎲 演示"):
|
| 1388 |
+
with gr.Row():
|
| 1389 |
+
with gr.Column(scale=1):
|
| 1390 |
+
gr.Markdown("### 参数设置")
|
| 1391 |
+
n_users_slider = gr.Slider(500, 5000, value=2000, step=100, label="用户数量")
|
| 1392 |
+
n_events_slider = gr.Slider(10, 100, value=50, step=5, label="每用户最大事件数")
|
| 1393 |
+
test_size_slider = gr.Slider(0.1, 0.4, value=0.2, step=0.05, label="测试集比例")
|
| 1394 |
+
random_seed = gr.Number(value=42, label="随机种子", precision=0)
|
| 1395 |
+
use_cv_check = gr.Checkbox(value=False, label="启用5折交叉验证")
|
| 1396 |
+
train_btn = gr.Button("🚀 开始训练", variant="primary", size="lg")
|
| 1397 |
+
with gr.Column(scale=2):
|
| 1398 |
+
demo_result = gr.Textbox(label="训练结果", lines=25)
|
| 1399 |
+
with gr.Row():
|
| 1400 |
+
demo_img1 = gr.Image(label="特征重要性")
|
| 1401 |
+
demo_img2 = gr.Image(label="PR曲线")
|
| 1402 |
+
with gr.Row():
|
| 1403 |
+
demo_img3 = gr.Image(label="混淆矩阵")
|
| 1404 |
+
demo_img4 = gr.Image(label="ROC曲线")
|
| 1405 |
+
with gr.Row():
|
| 1406 |
+
demo_table = gr.Dataframe(label="特征数据样本")
|
| 1407 |
+
|
| 1408 |
+
# ===== Tab 2: CSV上传 =====
|
| 1409 |
+
with gr.Tab("📁 CSV上传"):
|
| 1410 |
+
with gr.Row():
|
| 1411 |
+
with gr.Column(scale=1):
|
| 1412 |
+
gr.Markdown("""### 📤 上传数据
|
| 1413 |
+
**必需列:** `user_id`, `session_id`, `timestamp`, `event_type`, `page_id`
|
| 1414 |
+
**可选:** `product_id`, `amount`, `label`""")
|
| 1415 |
+
csv_file = gr.File(label="上传CSV文件", file_types=[".csv"])
|
| 1416 |
+
label_col_input = gr.Textbox(label="标签列名 (可选)", placeholder="如: churn")
|
| 1417 |
+
with gr.Row():
|
| 1418 |
+
csv_test_size = gr.Slider(0.1, 0.4, value=0.2, step=0.05, label="测试集比例")
|
| 1419 |
+
csv_random_seed = gr.Number(value=42, label="随机种子", precision=0)
|
| 1420 |
+
csv_use_cv = gr.Checkbox(value=False, label="启用5折交叉验证")
|
| 1421 |
+
with gr.Row():
|
| 1422 |
+
info_btn = gr.Button("📊 查看数据信息", variant="secondary")
|
| 1423 |
+
csv_train_btn = gr.Button("🚀 训练模型", variant="primary", size="lg")
|
| 1424 |
+
with gr.Column(scale=2):
|
| 1425 |
+
csv_info = gr.Textbox(label="CSV信息", lines=15)
|
| 1426 |
+
csv_preview = gr.Dataframe(label="数据预览")
|
| 1427 |
+
with gr.Row():
|
| 1428 |
+
csv_result = gr.Textbox(label="训练结果", lines=25)
|
| 1429 |
+
with gr.Row():
|
| 1430 |
+
csv_img1 = gr.Image(label="特征重要性")
|
| 1431 |
+
csv_img2 = gr.Image(label="PR曲线")
|
| 1432 |
+
with gr.Row():
|
| 1433 |
+
csv_img3 = gr.Image(label="混淆矩阵")
|
| 1434 |
+
csv_img4 = gr.Image(label="ROC曲线")
|
| 1435 |
+
with gr.Row():
|
| 1436 |
+
csv_table = gr.Dataframe(label="特征数据样本")
|
| 1437 |
+
|
| 1438 |
+
# ===== Tab 3: DIN 产品推荐 =====
|
| 1439 |
+
with gr.Tab("🎯 产品推荐 (DIN)"):
|
| 1440 |
+
gr.Markdown("""### Deep Interest Network - 保险产品推荐
|
| 1441 |
+
基于用户历史行为序列, 通过注意力机制动态计算对候选保险产品的兴趣度。""")
|
| 1442 |
+
with gr.Row():
|
| 1443 |
+
with gr.Column(scale=1):
|
| 1444 |
+
din_users = gr.Slider(500, 5000, value=2000, step=100, label="用户数量")
|
| 1445 |
+
din_emb = gr.Slider(32, 256, value=64, step=32, label="Embedding维度")
|
| 1446 |
+
din_epochs = gr.Slider(5, 50, value=20, step=5, label="训练轮数")
|
| 1447 |
+
din_batch = gr.Slider(32, 512, value=128, step=32, label="Batch Size")
|
| 1448 |
+
din_lr = gr.Slider(0.0001, 0.01, value=0.001, step=0.0001, label="学习率")
|
| 1449 |
+
din_seed = gr.Number(value=42, label="随机种子", precision=0)
|
| 1450 |
+
din_btn = gr.Button("🚀 训练DIN模型", variant="primary", size="lg")
|
| 1451 |
+
if not TORCH_AVAILABLE:
|
| 1452 |
+
gr.Markdown("⚠️ **PyTorch 未安装**。请在 requirements.txt 中添加 `torch>=2.0.0` 并重启。")
|
| 1453 |
+
with gr.Column(scale=2):
|
| 1454 |
+
din_result = gr.Textbox(label="训练结果", lines=25)
|
| 1455 |
+
with gr.Row():
|
| 1456 |
+
din_img1 = gr.Image(label="产品推荐效果")
|
| 1457 |
+
din_img2 = gr.Image(label="注意力权重示例")
|
| 1458 |
+
with gr.Row():
|
| 1459 |
+
din_img3 = gr.Image(label="ROC曲线")
|
| 1460 |
+
din_img4 = gr.Image(label="PR曲线")
|
| 1461 |
+
|
| 1462 |
+
# ===== Tab 4: TabBERT 异常检测 =====
|
| 1463 |
+
with gr.Tab("🔍 异常检测 (TabBERT)"):
|
| 1464 |
+
gr.Markdown("""### TabularBERT - 理赔欺诈/异常检测
|
| 1465 |
+
层次化Transformer架构, 学习理赔记录的多字段关联和时序模式。""")
|
| 1466 |
+
with gr.Row():
|
| 1467 |
+
with gr.Column(scale=1):
|
| 1468 |
+
tab_normal = gr.Slider(500, 2000, value=800, step=100, label="正常样本数")
|
| 1469 |
+
tab_anomaly = gr.Slider(100, 1000, value=200, step=50, label="异常样本数")
|
| 1470 |
+
tab_dmodel = gr.Slider(64, 256, value=128, step=64, label="模型维度 d_model")
|
| 1471 |
+
tab_epochs = gr.Slider(10, 100, value=30, step=10, label="训练轮数")
|
| 1472 |
+
tab_batch = gr.Slider(16, 256, value=64, step=16, label="Batch Size")
|
| 1473 |
+
tab_lr = gr.Slider(0.0001, 0.01, value=0.001, step=0.0001, label="学习率")
|
| 1474 |
+
tab_seed = gr.Number(value=42, label="随机种子", precision=0)
|
| 1475 |
+
tab_btn = gr.Button("🚀 训练TabBERT模型", variant="primary", size="lg")
|
| 1476 |
+
if not TORCH_AVAILABLE:
|
| 1477 |
+
gr.Markdown("⚠️ **PyTorch 未安装**。请在 requirements.txt 中添加 `torch>=2.0.0` 并重启。")
|
| 1478 |
+
with gr.Column(scale=2):
|
| 1479 |
+
tab_result = gr.Textbox(label="训练结果", lines=25)
|
| 1480 |
+
with gr.Row():
|
| 1481 |
+
tab_img1 = gr.Image(label="特征重要性")
|
| 1482 |
+
tab_img2 = gr.Image(label="异常分数分布")
|
| 1483 |
+
with gr.Row():
|
| 1484 |
+
tab_img3 = gr.Image(label="ROC曲线")
|
| 1485 |
+
tab_img4 = gr.Image(label="混淆矩阵与阈值分析")
|
| 1486 |
+
|
| 1487 |
+
# ===== Tab 5: 模型管理 =====
|
| 1488 |
+
with gr.Tab("💾 模型管理"):
|
| 1489 |
+
gr.Markdown("""### Hugging Face Hub 模型管理
|
| 1490 |
+
保存训练好的模型到 Hub, 或从 Hub 加载已有模型。
|
| 1491 |
+
|
| 1492 |
+
**获取 Token:** https://huggingface.co/settings/tokens""")
|
| 1493 |
+
with gr.Row():
|
| 1494 |
+
with gr.Column(scale=1):
|
| 1495 |
+
gr.Markdown("#### 保存模型到 Hub")
|
| 1496 |
+
save_repo_id = gr.Textbox(label="Hub Repo ID", placeholder="如: yourname/insurance-model-v1")
|
| 1497 |
+
save_token = gr.Textbox(label="HF Token", placeholder="hf_xxxxx", type="password")
|
| 1498 |
+
save_type = gr.Dropdown(["churn_prediction", "product_recommendation", "anomaly_detection", "all"],
|
| 1499 |
+
value="all", label="模型类型")
|
| 1500 |
+
save_notes = gr.Textbox(label="备注", placeholder="模型描述...")
|
| 1501 |
+
save_btn = gr.Button("📤 保存到 Hub", variant="primary")
|
| 1502 |
+
save_result = gr.Textbox(label="保存结果", lines=10)
|
| 1503 |
+
|
| 1504 |
+
with gr.Column(scale=1):
|
| 1505 |
+
gr.Markdown("#### 从 Hub 加载模型")
|
| 1506 |
+
load_repo_id = gr.Textbox(label="Hub Repo ID", placeholder="如: yourname/insurance-model-v1")
|
| 1507 |
+
load_token = gr.Textbox(label="HF Token", placeholder="hf_xxxxx", type="password")
|
| 1508 |
+
load_type = gr.Dropdown(["churn_prediction", "product_recommendation", "anomaly_detection", "all"],
|
| 1509 |
+
value="all", label="模型类型")
|
| 1510 |
+
load_btn = gr.Button("📥 从 Hub 加载", variant="primary")
|
| 1511 |
+
load_result = gr.Textbox(label="加载结果", lines=15)
|
| 1512 |
+
with gr.Row():
|
| 1513 |
+
load_img1 = gr.Image(label="加载模型可视化 1")
|
| 1514 |
+
load_img2 = gr.Image(label="加载模型可视化 2")
|
| 1515 |
+
load_img3 = gr.Image(label="加载模型可视化 3")
|
| 1516 |
+
|
| 1517 |
+
# ===== Tab 6: 生存分析 =====
|
| 1518 |
+
with gr.Tab("⏱️ 生存分析"):
|
| 1519 |
+
gr.Markdown("""### 保险理赔/购买时序生存分析
|
| 1520 |
+
预测从投保到理赔/购买/流失的时间, 处理右删失数据 (部分用户尚未发生事件)。
|
| 1521 |
+
|
| 1522 |
+
**统计方法:** lifelines Cox-PH + Kaplan-Meier | **深度方法:** DeepSurv (Neural Cox-PH)""")
|
| 1523 |
+
with gr.Row():
|
| 1524 |
+
with gr.Column(scale=1):
|
| 1525 |
+
surv_samples = gr.Slider(500, 5000, value=2000, step=100, label="样本数量")
|
| 1526 |
+
surv_test_size = gr.Slider(0.1, 0.4, value=0.2, step=0.05, label="测试集比例")
|
| 1527 |
+
surv_seed = gr.Number(value=42, label="随机种子", precision=0)
|
| 1528 |
+
use_deep_surv = gr.Checkbox(value=True, label="启用 DeepSurv (PyTorch)")
|
| 1529 |
+
deep_epochs = gr.Slider(10, 200, value=50, step=10, label="DeepSurv Epochs")
|
| 1530 |
+
deep_lr = gr.Slider(0.0001, 0.01, value=0.001, step=0.0001, label="DeepSurv LR")
|
| 1531 |
+
surv_btn = gr.Button("🚀 训练生存分析模型", variant="primary", size="lg")
|
| 1532 |
+
|
| 1533 |
+
if not LIFELINES_AVAILABLE:
|
| 1534 |
+
gr.Markdown("⚠️ **lifelines 未安装**。统计生存分析禁用。")
|
| 1535 |
+
if not TORCH_AVAILABLE:
|
| 1536 |
+
gr.Markdown("⚠️ **PyTorch 未安装**。DeepSurv 禁用。")
|
| 1537 |
+
|
| 1538 |
+
with gr.Column(scale=2):
|
| 1539 |
+
surv_result = gr.Textbox(label="训练结果", lines=30)
|
| 1540 |
+
with gr.Row():
|
| 1541 |
+
surv_img1 = gr.Image(label="Kaplan-Meier 生存曲线")
|
| 1542 |
+
surv_img2 = gr.Image(label="Cox-PH 系数")
|
| 1543 |
+
with gr.Row():
|
| 1544 |
+
surv_img3 = gr.Image(label="预测生存函数")
|
| 1545 |
+
surv_img4 = gr.Image(label="DeepSurv 风险分层")
|
| 1546 |
+
with gr.Row():
|
| 1547 |
+
surv_table = gr.Dataframe(label="数据样本")
|
| 1548 |
+
|
| 1549 |
+
# ===== Tab 7: 帮助文档 =====
|
| 1550 |
+
with gr.Tab("❓ 帮助"):
|
| 1551 |
+
gr.Markdown("""## 📚 完整使用指南
|
| 1552 |
+
|
| 1553 |
+
### 1. 演示模式
|
| 1554 |
+
合成保险APP行为数据, 自动标注流失/留存标签, 训练 GBDT + RF。
|
| 1555 |
+
|
| 1556 |
+
### 2. CSV上传
|
| 1557 |
+
**必需列:** `user_id`, `session_id`, `timestamp`, `event_type`, `page_id`
|
| 1558 |
+
**可选:** `product_id`, `amount`, `label`
|
| 1559 |
+
|
| 1560 |
+
### 3. DIN 产品推荐
|
| 1561 |
+
- 输入: 用户历史行为序列 + 候选保险产品
|
| 1562 |
+
- 核心: LocalActivationUnit 注意力机制
|
| 1563 |
+
- 输出: 购买概率 + 注意力权重可视化
|
| 1564 |
+
|
| 1565 |
+
### 4. TabBERT 异常检测
|
| 1566 |
+
- 输入: 理赔记录多维特征
|
| 1567 |
+
- 损失: Focal Loss (解决1:4不平衡)
|
| 1568 |
+
- 输出: 异常分数 + 阈值分析
|
| 1569 |
+
|
| 1570 |
+
### 5. 模型管理
|
| 1571 |
+
- 保存: 训练后自动保存到 `outputs/`, 可一键上传至 Hugging Face Hub
|
| 1572 |
+
- 加载: 从 Hub 下载已有模型, 查看指标和特征重要性
|
| 1573 |
+
|
| 1574 |
+
### 6. 生存分析
|
| 1575 |
+
- **lifelines Cox-PH**: 统计基线, 可解释系数, Kaplan-Meier 曲线
|
| 1576 |
+
- **DeepSurv**: 神经网络Cox-PH, 学习非线性交互, 风险分层
|
| 1577 |
+
- **右删失处理**: 自动处理尚未发生事件的用户
|
| 1578 |
+
|
| 1579 |
+
### 事件类型 (30种)
|
| 1580 |
+
浏览 | 交互 | 转化 | 理赔 | 续保 | 其他
|
| 1581 |
+
---|---|---|---|---|---
|
| 1582 |
+
page_view | quote_request | payment_success | claim_init | renewal_click | login
|
| 1583 |
+
product_view | form_submit | policy_issued | claim_doc_upload | renewal_complete | logout
|
| 1584 |
+
premium_calculator | document_upload | policy_select | claim_review | policy_cancel | app_uninstall
|
| 1585 |
+
article_read | chat_init | payment_init | claim_approved | renewal_reminder |
|
| 1586 |
+
faq_view | call_init | | claim_rejected | |
|
| 1587 |
+
product_compare | video_consult | | | |
|
| 1588 |
+
|
| 1589 |
+
### 参考文献
|
| 1590 |
+
| 论文 | 应用 | arXiv |
|
| 1591 |
+
|------|------|-------|
|
| 1592 |
+
| Deep Interest Network | 产品推荐 | [1706.06978](https://arxiv.org/abs/1706.06978) |
|
| 1593 |
+
| SDIM | 长期行为建模 | [2205.10249](https://arxiv.org/abs/2205.10249) |
|
| 1594 |
+
| TabBERT/TabFormer | 表格时序异常检测 | [2011.01843](https://arxiv.org/abs/2011.01843) |
|
| 1595 |
+
| Transformer Churn | 非合约流失预测 | [2309.14390](https://arxiv.org/abs/2309.14390) |
|
| 1596 |
+
| DeepSurv | 生存分析 | [1606.00931](https://arxiv.org/abs/1606.00931) |
|
| 1597 |
+
| RNN Survival | 购买时序预测 | [2304.00575](https://arxiv.org/abs/2304.00575) |
|
| 1598 |
+
| Focal Loss | 不平衡分类 | [1708.02002](https://arxiv.org/abs/1708.02002) |
|
| 1599 |
+
""")
|
| 1600 |
+
|
| 1601 |
+
gr.Markdown("""---
|
| 1602 |
+
<div align="center">
|
| 1603 |
+
<b>保险APP 用户行为分析模型训练平台 v3.0</b> |
|
| 1604 |
+
作者: <a href="https://huggingface.co/Stephanwu">Stephanwu</a>
|
| 1605 |
+
</div>""")
|
| 1606 |
+
|
| 1607 |
+
# ===== 事件绑定 =====
|
| 1608 |
+
train_btn.click(fn=demo_train, inputs=[n_users_slider, n_events_slider, test_size_slider, random_seed, use_cv_check],
|
| 1609 |
+
outputs=[demo_result, demo_img1, demo_img2, demo_img3, demo_img4, demo_table])
|
| 1610 |
+
info_btn.click(fn=show_csv_info, inputs=[csv_file], outputs=[csv_info, csv_preview])
|
| 1611 |
+
csv_train_btn.click(fn=csv_train, inputs=[csv_file, label_col_input, csv_test_size, csv_random_seed, csv_use_cv],
|
| 1612 |
+
outputs=[csv_result, csv_img1, csv_img2, csv_img3, csv_img4, csv_table])
|
| 1613 |
+
din_btn.click(fn=train_din_recommendation, inputs=[din_users, din_emb, din_epochs, din_batch, din_lr, din_seed],
|
| 1614 |
+
outputs=[din_result, din_img1, din_img2, din_img3, din_img4])
|
| 1615 |
+
tab_btn.click(fn=train_tabbert_anomaly, inputs=[tab_normal, tab_anomaly, tab_dmodel, tab_epochs, tab_batch, tab_lr, tab_seed],
|
| 1616 |
+
outputs=[tab_result, tab_img1, tab_img2, tab_img3, tab_img4])
|
| 1617 |
+
save_btn.click(fn=save_model_to_hub, inputs=[save_repo_id, save_token, save_type, save_notes],
|
| 1618 |
+
outputs=[save_result])
|
| 1619 |
+
load_btn.click(fn=load_model_from_hub, inputs=[load_repo_id, load_token, load_type],
|
| 1620 |
+
outputs=[load_result, load_img1, load_img2, load_img3])
|
| 1621 |
+
surv_btn.click(fn=train_survival_analysis, inputs=[surv_samples, surv_test_size, surv_seed, use_deep_surv, deep_epochs, deep_lr],
|
| 1622 |
+
outputs=[surv_result, surv_img1, surv_img2, surv_img3, surv_img4, surv_table])
|
| 1623 |
+
|
| 1624 |
+
if __name__ == "__main__":
|
| 1625 |
+
demo.launch()
|