Stephanwu commited on
Commit
b5c9d30
·
verified ·
1 Parent(s): 2f6bb01

Add model management (HF Hub save/load) and survival analysis (lifelines + DeepSurv)

Browse files
Files changed (1) hide show
  1. app.py +811 -395
app.py CHANGED
@@ -1,22 +1,25 @@
1
  """
2
- 保险APP 用户行为分析 - Gradio Space (完整版)
3
- 支持: 演示模式 | CSV上传 | 产品推荐(DIN) | 异常检测(TabBERT)
4
 
5
  参考文献:
6
  - DIN: Deep Interest Network (KDD 2018, arxiv:1706.06978)
7
  - TabBERT: Tabular Transformers (arxiv:2011.01843)
8
  - Focal Loss: RetinaNet (ICCV 2017, arxiv:1708.02002)
 
 
9
  """
10
- import os, io, math, warnings, datetime, random, json
11
  from collections import Counter, defaultdict
12
  from dataclasses import dataclass, field
13
- from typing import List, Dict, Optional
 
14
 
15
  warnings.filterwarnings('ignore')
16
  import numpy as np
17
  import pandas as pd
18
  from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
19
- from sklearn.preprocessing import StandardScaler, LabelEncoder
20
  from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
21
  from sklearn.metrics import (
22
  roc_auc_score, f1_score, confusion_matrix,
@@ -30,20 +33,39 @@ import seaborn as sns
30
 
31
  import gradio as gr
32
 
33
- # PyTorch (可选, 用于深度学习模型)
34
  try:
35
  import torch
36
  import torch.nn as nn
37
  import torch.nn.functional as F
38
- from torch.utils.data import Dataset, DataLoader
39
  TORCH_AVAILABLE = True
40
  except ImportError:
41
  TORCH_AVAILABLE = False
42
- print("PyTorch not available. Deep learning models disabled.")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
 
45
  # =============================================================================
46
- # 数据模型 & 特征工程 (保持原有)
47
  # =============================================================================
48
 
49
  INSURANCE_EVENT_TYPES = {
@@ -56,12 +78,6 @@ INSURANCE_EVENT_TYPES = {
56
  "policy_cancel", "app_uninstall", "login", "logout",
57
  }
58
 
59
- BROWSE = {"page_view","product_view","premium_calculator","article_read","faq_view","product_compare"}
60
- INTERACT = {"quote_request","form_submit","document_upload","chat_init","call_init","video_consult","quote_result_view"}
61
- CONVERT = {"policy_select","payment_init","payment_success","policy_issued"}
62
- CLAIM = {"claim_init","claim_doc_upload","claim_review","claim_approved","claim_rejected"}
63
- RENEW = {"renewal_reminder","renewal_click","renewal_complete","policy_cancel"}
64
-
65
  @dataclass
66
  class InsuranceAppEvent:
67
  event_id: str; user_id: str; session_id: str; timestamp: int
@@ -79,6 +95,10 @@ class UserBehaviorProfile:
79
  user_id: str; sessions: List[UserSession] = field(default_factory=list)
80
 
81
 
 
 
 
 
82
  class InsuranceFeatureEngineer:
83
  def extract_user_features(self, profile):
84
  sessions = profile.sessions
@@ -97,11 +117,8 @@ class InsuranceFeatureEngineer:
97
  has_renewed = any(e.event_type == "renewal_complete" for e in all_events)
98
  has_claimed = any(e.event_type in ("claim_init","claim_approved") for e in all_events)
99
  support = all_type_counts.get("chat_init", 0) + all_type_counts.get("call_init", 0)
100
-
101
- # 计算行为序列 (用于DIN)
102
  event_seq = [e.event_type for e in all_events]
103
  product_seq = [e.product_id or "none" for e in all_events]
104
-
105
  return {
106
  "total_sessions": len(sessions), "total_events": total,
107
  "days_active": days_active, "avg_events_per_session": total / len(sessions),
@@ -122,9 +139,7 @@ class InsuranceFeatureEngineer:
122
  "peak_active_hour": Counter(datetime.datetime.fromtimestamp(e.timestamp/1000).hour for e in all_events).most_common(1)[0][0],
123
  "recent_7day_events": sum(1 for e in all_events if (last_ts-e.timestamp)<7*24*3600*1000),
124
  "recent_30day_events": sum(1 for e in all_events if (last_ts-e.timestamp)<30*24*3600*1000),
125
- # 序列特征 (用于深度学习模型)
126
- "_event_sequence": event_seq,
127
- "_product_sequence": product_seq,
128
  "_user_id": profile.user_id,
129
  }
130
 
@@ -142,7 +157,6 @@ def parse_csv_to_profiles(df):
142
  df["timestamp"] = pd.to_numeric(df["timestamp"], errors="coerce")
143
  df = df.dropna(subset=["timestamp", "event_type"])
144
  df["timestamp"] = df["timestamp"].astype(int)
145
-
146
  profiles = {}
147
  for (uid, sid), group in df.groupby(["user_id", "session_id"]):
148
  if uid not in profiles:
@@ -193,108 +207,24 @@ def generate_synthetic_data(n_users=2000, n_events_per_user=50, seed=42):
193
  return data
194
 
195
 
196
- def generate_product_recommendation_data(n_users=1000, seed=42):
197
- """生成产品推荐训练数据"""
198
- random.seed(seed); np.random.seed(seed)
199
- products = ["health_basic","health_premium","critical_illness","term_life",
200
- "auto_compulsory","auto_commercial","home","travel_domestic"]
201
- event_types = list(INSURANCE_EVENT_TYPES)
202
-
203
- records = []
204
- for u in range(n_users):
205
- user_id = u
206
- n_behaviors = random.randint(5, 30)
207
- behavior_events = []
208
- behavior_products = []
209
-
210
- # 生成用户历史行为
211
- for i in range(n_behaviors):
212
- et = random.choice(["page_view","product_view","quote_request","article_read"])
213
- behavior_events.append(et)
214
- behavior_products.append(random.choice(products))
215
-
216
- # 生成候选产品和标签
217
- candidate = random.choice(products)
218
- # 如果候选产品出现过在历史中, 更可能购买
219
- label = 1 if candidate in behavior_products else random.choices([0,1], weights=[0.7,0.3])[0]
220
-
221
- records.append({
222
- 'user_id': user_id,
223
- 'behavior_events': behavior_events,
224
- 'behavior_products': behavior_products,
225
- 'candidate_product': candidate,
226
- 'label': label,
227
- 'user_features': np.random.randn(20).astype(np.float32), # 模拟用户统计特征
228
- })
229
-
230
- return pd.DataFrame(records)
231
-
232
-
233
- def generate_anomaly_data(n_normal=800, n_anomaly=200, seed=42):
234
- """生成异常检测数据 (理赔记录)"""
235
- random.seed(seed); np.random.seed(seed)
236
-
237
- normal_records = []
238
- for i in range(n_normal):
239
- record = {
240
- 'user_id': i,
241
- 'claim_amount': random.uniform(1000, 50000),
242
- 'claim_type': random.choice(["health","auto","property"]),
243
- 'days_since_policy': random.randint(30, 365),
244
- 'num_previous_claims': random.randint(0, 3),
245
- 'document_count': random.randint(3, 10),
246
- 'processing_time_days': random.uniform(1, 15),
247
- 'label': 0, # 正常
248
- }
249
- normal_records.append(record)
250
-
251
- anomaly_records = []
252
- for i in range(n_anomaly):
253
- # 异常特征: 高金额、刚投保、多理赔、少材料、快处理
254
- record = {
255
- 'user_id': n_normal + i,
256
- 'claim_amount': random.uniform(50000, 200000),
257
- 'claim_type': random.choice(["health","auto","property"]),
258
- 'days_since_policy': random.randint(1, 15), # 刚投保就理赔
259
- 'num_previous_claims': random.randint(5, 20), # 多次理赔
260
- 'document_count': random.randint(0, 2), # 材料极少
261
- 'processing_time_days': random.uniform(0.1, 2), # 异常快
262
- 'label': 1, # 异常
263
- }
264
- anomaly_records.append(record)
265
-
266
- df = pd.DataFrame(normal_records + anomaly_records)
267
- df = df.sample(frac=1, random_state=seed).reset_index(drop=True) # 打乱
268
- return df
269
-
270
-
271
  # =============================================================================
272
- # 通用训练函数 (sklearn)
273
  # =============================================================================
274
 
275
  def train_sklearn(features_list, labels, test_size=0.2, random_state=42, use_cv=False):
276
  df = pd.DataFrame(features_list)
277
  df_full = df.copy()
278
-
279
- # 移除非数值列 (内部字段)
280
  drop_cols = [c for c in df.columns if c.startswith('_')]
281
- for c in drop_cols:
282
- df.pop(c)
283
  for c in df.columns:
284
  if df[c].dtype == 'object':
285
  df[c] = pd.to_numeric(df[c], errors='coerce').fillna(0)
286
  df = df.fillna(0).replace([np.inf, -np.inf], 0)
287
-
288
  X = df.values; y = np.array(labels)
289
  feature_names = list(df.columns)
290
-
291
- X_train, X_test, y_train, y_test = train_test_split(
292
- X, y, test_size=test_size, random_state=random_state, stratify=y
293
- )
294
-
295
  scaler = StandardScaler()
296
- X_train_s = scaler.fit_transform(X_train)
297
- X_test_s = scaler.transform(X_test)
298
 
299
  gbdt = GradientBoostingClassifier(n_estimators=200, max_depth=5, learning_rate=0.1, subsample=0.8, random_state=random_state)
300
  gbdt.fit(X_train_s, y_train)
@@ -376,7 +306,6 @@ def train_sklearn(features_list, labels, test_size=0.2, random_state=42, use_cv=
376
  result_text = f"""=== 模型训练结果 ===
377
  样本数: {len(y)} | 特征数: {len(feature_names)}
378
  训练集: {len(y_train)} | 测试集: {len(y_test)}
379
- 流失率: {y.mean():.1%} | 流失数: {int(y.sum())}
380
 
381
  --- GBDT ---
382
  AUC: {auc_gbdt:.4f}
@@ -394,58 +323,77 @@ AP: {ap_rf:.4f}
394
  --- 分类报告 (GBDT) ---
395
  {report}"""
396
 
 
 
 
 
 
 
 
 
 
 
 
397
  return result_text, fig_path1, fig_path2, fig_path3, fig_path4, df_full
398
 
399
 
400
  # =============================================================================
401
- # 产品推荐 (DIN 简化版)
402
  # =============================================================================
403
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
404
  def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, seed):
405
- """训练 DIN 风格的产品推荐模型 (简化版, 使用 PyTorch 模拟)"""
406
  if not TORCH_AVAILABLE:
407
  return "❌ PyTorch 未安装。请在 requirements.txt 中添加 torch 并重启 Space。", None, None, None, None, None
408
 
409
  torch.manual_seed(seed); np.random.seed(seed); random.seed(seed)
410
-
411
- # 生成数据
412
  df = generate_product_recommendation_data(n_users=n_users, seed=seed)
413
 
414
- # 构建 vocab
415
  all_events = sorted(set(e for seq in df['behavior_events'] for e in seq))
416
  event_vocab = {e: i+1 for i, e in enumerate(all_events)}
417
  all_products = sorted(set(p for seq in df['behavior_products'] for p in seq) | set(df['candidate_product']))
418
  product_vocab = {p: i+1 for i, p in enumerate(all_products)}
419
 
420
- # 准备序列数据
421
  max_seq_len = 20
422
- behavior_events_padded = []
423
- behavior_products_padded = []
424
- behavior_masks = []
425
-
426
  for _, row in df.iterrows():
427
  e_seq = [event_vocab[e] for e in row['behavior_events'][-max_seq_len:]]
428
  p_seq = [product_vocab[p] for p in row['behavior_products'][-max_seq_len:]]
429
  mask = [1] * len(e_seq)
430
  if len(e_seq) < max_seq_len:
431
  pad = max_seq_len - len(e_seq)
432
- e_seq = [0]*pad + e_seq
433
- p_seq = [0]*pad + p_seq
434
- mask = [0]*pad + mask
435
- behavior_events_padded.append(e_seq)
436
- behavior_products_padded.append(p_seq)
437
- behavior_masks.append(mask)
438
-
439
- df['be'] = behavior_events_padded
440
- df['bp'] = behavior_products_padded
441
- df['bm'] = behavior_masks
442
  df['cp'] = df['candidate_product'].map(product_vocab)
443
 
444
- # 划分
445
  train_df = df.sample(frac=0.8, random_state=seed)
446
  test_df = df.drop(train_df.index)
447
 
448
- # 简单的 PyTorch 训练 (使用 Attention 的 MLP)
449
  device = torch.device('cpu')
450
 
451
  class SimpleDIN(nn.Module):
@@ -454,33 +402,22 @@ def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, see
454
  self.event_emb = nn.Embedding(num_events+1, d_model//2, padding_idx=0)
455
  self.prod_emb = nn.Embedding(num_products+1, d_model//2, padding_idx=0)
456
  self.cand_emb = nn.Embedding(num_products+1, d_model)
457
- self.attn = nn.Sequential(
458
- nn.Linear(d_model*4, 128), nn.ReLU(), nn.Linear(128, 1)
459
- )
460
- self.mlp = nn.Sequential(
461
- nn.Linear(d_model*3, 256), nn.ReLU(), nn.Dropout(0.3),
462
- nn.Linear(256, 128), nn.ReLU(), nn.Dropout(0.3),
463
- nn.Linear(128, 1)
464
- )
465
-
466
  def forward(self, be, bp, bm, cp):
467
  B = be.size(0); L = be.size(1)
468
- e_emb = self.event_emb(be) # (B,L,D/2)
469
- p_emb = self.prod_emb(bp) # (B,L,D/2)
470
- beh_emb = torch.cat([e_emb, p_emb], dim=-1) # (B,L,D)
471
- cand_emb = self.cand_emb(cp) # (B,D)
472
-
473
- # Attention
474
  cand_exp = cand_emb.unsqueeze(1).expand(B, L, -1)
475
- diff = cand_exp - beh_emb
476
- prod = cand_exp * beh_emb
477
  attn_in = torch.cat([cand_exp, beh_emb, diff, prod], dim=-1)
478
- attn_w = self.attn(attn_in).squeeze(-1) # (B,L)
479
  attn_w = attn_w.masked_fill(~bm.bool(), -1e9)
480
  attn_w = torch.softmax(attn_w, dim=1)
481
- interest = (beh_emb * attn_w.unsqueeze(-1)).sum(dim=1) # (B,D)
482
-
483
- # MLP
484
  x = torch.cat([interest, cand_emb, interest*cand_emb], dim=-1)
485
  return self.mlp(x).squeeze(-1)
486
 
@@ -488,10 +425,8 @@ def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, see
488
  criterion = nn.BCEWithLogitsLoss()
489
  optimizer = torch.optim.Adam(model.parameters(), lr=lr)
490
 
491
- # 训练
492
  for epoch in range(epochs):
493
- model.train()
494
- epoch_loss = 0
495
  for i in range(0, len(train_df), batch_size):
496
  batch = train_df.iloc[i:i+batch_size]
497
  be = torch.tensor(np.stack(batch['be'].values), dtype=torch.long).to(device)
@@ -499,18 +434,14 @@ def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, see
499
  bm = torch.tensor(np.stack(batch['bm'].values), dtype=torch.bool).to(device)
500
  cp = torch.tensor(batch['cp'].values, dtype=torch.long).to(device)
501
  labels = torch.tensor(batch['label'].values, dtype=torch.float32).to(device)
502
-
503
  optimizer.zero_grad()
504
  outputs = model(be, bp, bm, cp)
505
  loss = criterion(outputs, labels)
506
- loss.backward()
507
- optimizer.step()
508
  epoch_loss += loss.item()
509
-
510
  if (epoch+1) % max(1, epochs//5) == 0 or epoch == 0:
511
- print(f"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss/len(train_df)*batch_size:.4f}")
512
 
513
- # 评估
514
  model.eval()
515
  with torch.no_grad():
516
  be = torch.tensor(np.stack(test_df['be'].values), dtype=torch.long).to(device)
@@ -518,7 +449,6 @@ def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, see
518
  bm = torch.tensor(np.stack(test_df['bm'].values), dtype=torch.bool).to(device)
519
  cp = torch.tensor(test_df['cp'].values, dtype=torch.long).to(device)
520
  labels = test_df['label'].values
521
-
522
  preds = torch.sigmoid(model(be, bp, bm, cp)).cpu().numpy()
523
 
524
  auc = float(roc_auc_score(labels, preds))
@@ -526,26 +456,33 @@ def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, see
526
  f1 = float(f1_score(labels, preds > 0.5))
527
  acc = float(accuracy_score(labels, preds > 0.5))
528
 
529
- # 可视化
530
  os.makedirs("outputs", exist_ok=True)
531
 
532
- # 产品推荐效果
 
 
 
 
 
 
 
 
 
 
 
533
  fig, ax = plt.subplots(figsize=(10,6))
534
  product_perf = {}
535
  for _, row in test_df.iterrows():
536
  prod = row['candidate_product']
537
- if prod not in product_perf:
538
- product_perf[prod] = {'preds': [], 'labels': []}
539
  idx = test_df.index.get_loc(_)
540
  product_perf[prod]['preds'].append(preds[idx])
541
  product_perf[prod]['labels'].append(row['label'])
542
-
543
  prod_aucs = []
544
  for prod, data in product_perf.items():
545
  if len(set(data['labels'])) > 1 and len(data['labels']) >= 5:
546
  prod_auc = roc_auc_score(data['labels'], data['preds'])
547
  prod_aucs.append((prod, prod_auc, np.mean(data['labels'])))
548
-
549
  if prod_aucs:
550
  prod_aucs.sort(key=lambda x: x[1], reverse=True)
551
  prods, aucs, rates = zip(*prod_aucs)
@@ -554,46 +491,36 @@ def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, see
554
  ax2 = ax.twinx()
555
  ax2.plot(x, rates, 'ro-', label='Conversion Rate')
556
  ax.set_xticks(x); ax.set_xticklabels(prods, rotation=45, ha='right')
557
- ax.set_ylabel('AUC', color='steelblue')
558
- ax2.set_ylabel('Conversion Rate', color='red')
559
- ax.set_title('Product Recommendation Performance by Product', fontweight='bold')
560
  ax.legend(loc='upper left'); ax2.legend(loc='upper right')
561
  plt.tight_layout()
562
  fig_path1 = "outputs/din_product_performance.png"
563
  plt.savefig(fig_path1, dpi=150); plt.close()
564
 
565
- # 注意力可视化 (示例)
566
  fig, ax = plt.subplots(figsize=(10,6))
567
  sample_idx = 0
568
  with torch.no_grad():
569
- be_s = be[sample_idx:sample_idx+1]
570
- bp_s = bp[sample_idx:sample_idx+1]
571
- bm_s = bm[sample_idx:sample_idx+1]
572
- cp_s = cp[sample_idx:sample_idx+1]
573
-
574
  B, L = be_s.size()
575
- e_emb = model.event_emb(be_s)
576
- p_emb = model.prod_emb(bp_s)
577
  beh_emb = torch.cat([e_emb, p_emb], dim=-1)
578
  cand_emb = model.cand_emb(cp_s)
579
  cand_exp = cand_emb.unsqueeze(1).expand(B, L, -1)
580
- diff = cand_exp - beh_emb
581
- prod_feat = cand_exp * beh_emb
582
  attn_in = torch.cat([cand_exp, beh_emb, diff, prod_feat], dim=-1)
583
  attn_w = torch.softmax(model.attn(attn_in).squeeze(-1).masked_fill(~bm_s, -1e9), dim=1)
584
  weights = attn_w[0].cpu().numpy()
585
-
586
  valid_len = bm_s[0].sum().item()
587
  valid_weights = weights[-valid_len:] if valid_len > 0 else weights
588
  ax.bar(range(len(valid_weights)), valid_weights, color='coral')
589
  ax.set_title('Attention Weights (Sample User)', fontweight='bold')
590
- ax.set_xlabel('Behavior Position')
591
- ax.set_ylabel('Attention Weight')
592
  plt.tight_layout()
593
  fig_path2 = "outputs/din_attention.png"
594
  plt.savefig(fig_path2, dpi=150); plt.close()
595
 
596
- # ROC曲线
597
  fig, ax = plt.subplots(figsize=(8,6))
598
  fpr, tpr, _ = roc_curve(labels, preds)
599
  ax.plot(fpr, tpr, label=f'DIN AUC={auc:.3f}', linewidth=2, color='#2E86AB')
@@ -605,7 +532,6 @@ def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, see
605
  fig_path3 = "outputs/din_roc.png"
606
  plt.savefig(fig_path3, dpi=150); plt.close()
607
 
608
- # PR曲线
609
  fig, ax = plt.subplots(figsize=(8,6))
610
  prec, rec, _ = precision_recall_curve(labels, preds)
611
  ax.plot(rec, prec, label=f'DIN AP={ap:.3f}', linewidth=2, color='#A23B72')
@@ -618,17 +544,17 @@ def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, see
618
 
619
  result_text = f"""=== DIN 保险产品推荐模型 ===
620
  样本数: {n_users} | 产品数: {len(all_products)}
 
621
  训练集: {len(train_df)} | 测试集: {len(test_df)}
622
 
623
  --- 模型架构 ---
624
  Embedding dim: {embedding_dim}
625
- Event vocab: {len(all_events)} | Product vocab: {len(all_products)}
626
- Attention: LocalActivationUnit (4路交互特征)
627
  MLP: [emb*3] → 256 → 128 → 1
628
 
629
  --- 训练配置 ---
630
  Epochs: {epochs} | Batch size: {batch_size} | LR: {lr}
631
- Optimizer: Adam
632
 
633
  --- 测试集效果 ---
634
  AUC: {auc:.4f}
@@ -639,26 +565,55 @@ Accuracy: {acc:.4f}
639
  --- 模型洞察 ---
640
  1. 注意力机制自动学习用户历史行为中对候选产品的相关度
641
  2. 高权重通常分配给同类产品的历史浏览/购买行为
642
- 3. 新用户(历史短)依赖统计特征, 老用户依赖行为序列"""
 
 
 
 
643
 
644
  return result_text, fig_path1, fig_path2, fig_path3, fig_path4
645
 
646
 
647
  # =============================================================================
648
- # 异��检测 (TabBERT 简化版)
649
  # =============================================================================
650
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
651
  def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr, seed):
652
- """训练 TabularBERT 风格的异常检测模型"""
653
  if not TORCH_AVAILABLE:
654
- return "❌ PyTorch 未安装。请在 requirements.txt 中添加 torch 并重启 Space。", None, None, None, None, None
655
 
656
  torch.manual_seed(seed); np.random.seed(seed); random.seed(seed)
657
-
658
- # 生成数据
659
  df = generate_anomaly_data(n_normal=n_normal, n_anomaly=n_anomaly, seed=seed)
660
 
661
- # 特征编码
662
  claim_type_map = {"health": 0, "auto": 1, "property": 2}
663
  df['claim_type_enc'] = df['claim_type'].map(claim_type_map)
664
 
@@ -668,7 +623,6 @@ def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr,
668
  X = df[feature_cols].values.astype(np.float32)
669
  y = df['label'].values.astype(np.float32)
670
 
671
- # 标准化
672
  scaler = StandardScaler()
673
  X_s = scaler.fit_transform(X)
674
 
@@ -676,34 +630,21 @@ def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr,
676
  X_s, y, test_size=0.2, random_state=seed, stratify=y
677
  )
678
 
679
- # 简单的 Tabular MLP (模拟 TabBERT)
680
  device = torch.device('cpu')
681
 
682
  class SimpleTabBERT(nn.Module):
683
  def __init__(self, input_dim=6, d_model=128, n_layers=4):
684
  super().__init__()
685
  self.input_proj = nn.Linear(input_dim, d_model)
686
-
687
- # 模拟 Transformer layers
688
  layers = []
689
  for _ in range(n_layers):
690
  layers.extend([
691
- nn.Linear(d_model, d_model*4),
692
- nn.ReLU(),
693
- nn.Dropout(0.2),
694
- nn.Linear(d_model*4, d_model),
695
- nn.LayerNorm(d_model),
696
- nn.ReLU(),
697
- nn.Dropout(0.2),
698
  ])
699
  self.transformer = nn.Sequential(*layers)
700
-
701
- self.head = nn.Sequential(
702
- nn.Linear(d_model, 256), nn.ReLU(), nn.Dropout(0.3),
703
- nn.Linear(256, 64), nn.ReLU(),
704
- nn.Linear(64, 1)
705
- )
706
-
707
  def forward(self, x):
708
  x = self.input_proj(x)
709
  x = self.transformer(x)
@@ -711,12 +652,9 @@ def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr,
711
 
712
  model = SimpleTabBERT(input_dim=len(feature_cols), d_model=d_model).to(device)
713
 
714
- # Focal Loss (不平衡数据)
715
  class FocalLoss(nn.Module):
716
  def __init__(self, alpha=0.25, gamma=2.0):
717
- super().__init__()
718
- self.alpha = alpha; self.gamma = gamma
719
-
720
  def forward(self, inputs, targets):
721
  bce = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
722
  pt = torch.exp(-bce)
@@ -725,35 +663,24 @@ def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr,
725
  criterion = FocalLoss(alpha=0.25, gamma=2.0)
726
  optimizer = torch.optim.Adam(model.parameters(), lr=lr)
727
 
728
- # 转换为 tensor
729
  X_train_t = torch.tensor(X_train, dtype=torch.float32).to(device)
730
  y_train_t = torch.tensor(y_train, dtype=torch.float32).to(device)
731
  X_test_t = torch.tensor(X_test, dtype=torch.float32).to(device)
732
  y_test_t = torch.tensor(y_test, dtype=torch.float32).to(device)
733
 
734
- # 训练
735
  for epoch in range(epochs):
736
- model.train()
737
- epoch_loss = 0
738
  n_batches = math.ceil(len(X_train_t) / batch_size)
739
-
740
  for i in range(n_batches):
741
- start = i * batch_size
742
- end = min(start + batch_size, len(X_train_t))
743
- xb = X_train_t[start:end]
744
- yb = y_train_t[start:end]
745
-
746
  optimizer.zero_grad()
747
- outputs = model(xb)
748
- loss = criterion(outputs, yb)
749
- loss.backward()
750
- optimizer.step()
751
  epoch_loss += loss.item()
752
-
753
  if (epoch+1) % max(1, epochs//5) == 0 or epoch == 0:
754
  print(f"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss/n_batches:.4f}")
755
 
756
- # 评估
757
  model.eval()
758
  with torch.no_grad():
759
  preds = torch.sigmoid(model(X_test_t)).cpu().numpy()
@@ -762,16 +689,18 @@ def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr,
762
  ap = float(average_precision_score(y_test, preds))
763
  f1 = float(f1_score(y_test, preds > 0.5))
764
 
765
- # 可视化
766
- os.makedirs("outputs", exist_ok=True)
 
 
 
 
 
 
 
767
 
768
- # 特征重要性 (通过梯度近似)
769
- model.eval()
770
- X_test_grad = torch.tensor(X_test, dtype=torch.float32, requires_grad=True).to(device)
771
- with torch.no_grad():
772
- outputs = model(X_test_grad)
773
 
774
- # 使用 permutation importance 近似
775
  baseline_auc = auc
776
  importances = []
777
  for i in range(len(feature_cols)):
@@ -792,10 +721,8 @@ def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr,
792
  fig_path1 = "outputs/tabbert_feature_importance.png"
793
  plt.savefig(fig_path1, dpi=150); plt.close()
794
 
795
- # 异常分数分布
796
  fig, ax = plt.subplots(figsize=(10,6))
797
- normal_scores = preds[y_test == 0]
798
- anomaly_scores = preds[y_test == 1]
799
  ax.hist(normal_scores, bins=30, alpha=0.6, label=f'Normal (n={len(normal_scores)})', color='steelblue', edgecolor='white')
800
  ax.hist(anomaly_scores, bins=30, alpha=0.6, label=f'Anomaly (n={len(anomaly_scores)})', color='red', edgecolor='white')
801
  ax.axvline(x=0.5, color='black', linestyle='--', label='Threshold=0.5')
@@ -806,7 +733,6 @@ def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr,
806
  fig_path2 = "outputs/tabbert_distribution.png"
807
  plt.savefig(fig_path2, dpi=150); plt.close()
808
 
809
- # ROC曲线
810
  fig, ax = plt.subplots(figsize=(8,6))
811
  fpr, tpr, _ = roc_curve(y_test, preds)
812
  ax.plot(fpr, tpr, label=f'TabBERT AUC={auc:.3f}', linewidth=2, color='#2E86AB')
@@ -818,21 +744,16 @@ def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr,
818
  fig_path3 = "outputs/tabbert_roc.png"
819
  plt.savefig(fig_path3, dpi=150); plt.close()
820
 
821
- # 混淆矩阵 + 阈值分析
822
  fig, axs = plt.subplots(1, 2, figsize=(14,6))
823
-
824
- # 混淆矩阵 @ 0.5
825
  cm = confusion_matrix(y_test, preds > 0.5)
826
  sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=axs[0], cbar=False)
827
  axs[0].set_title(f'Confusion Matrix @ threshold=0.5\n(F1={f1:.3f})', fontweight='bold')
828
  axs[0].set_xlabel('Predicted'); axs[0].set_ylabel('Actual')
829
 
830
- # 阈值分析
831
  thresholds = np.linspace(0.1, 0.9, 50)
832
  f1s = [f1_score(y_test, preds > t) for t in thresholds]
833
  precs = [precision_score(y_test, preds > t, zero_division=0) for t in thresholds]
834
  recs = [recall_score(y_test, preds > t, zero_division=0) for t in thresholds]
835
-
836
  axs[1].plot(thresholds, f1s, label='F1', linewidth=2)
837
  axs[1].plot(thresholds, precs, label='Precision', linewidth=2)
838
  axs[1].plot(thresholds, recs, label='Recall', linewidth=2)
@@ -870,17 +791,518 @@ Best F1: {max(f1s):.4f} @ threshold={best_t:.2f}
870
  1. Focal Loss 自动聚焦难分异常样本, 解决类别不平衡
871
  2. 关键异常特征: claim_amount(高), days_since_policy(短), document_count(少)
872
  3. 建议阈值: {best_t:.2f} (平衡精确率与召回率)
873
- 4. 高AUC说明模型能很好区分正常与异常理赔"""
 
 
 
 
874
 
875
  return result_text, fig_path1, fig_path2, fig_path3, fig_path4
876
 
877
 
878
  # =============================================================================
879
- # Gradio 回调函数
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
880
  # =============================================================================
881
 
882
  def demo_train(n_users, n_events, test_size, random_state, use_cv):
883
- """演示模式"""
884
  data = generate_synthetic_data(n_users=n_users, n_events_per_user=n_events, seed=random_state)
885
  engineer = InsuranceFeatureEngineer()
886
  features_list, labels = [], []
@@ -891,7 +1313,6 @@ def demo_train(n_users, n_events, test_size, random_state, use_cv):
891
 
892
 
893
  def csv_train(csv_file, label_col, test_size, random_state, use_cv):
894
- """CSV模式"""
895
  if csv_file is None:
896
  return "请先上传CSV文件", None, None, None, None, None
897
  try:
@@ -899,15 +1320,12 @@ def csv_train(csv_file, label_col, test_size, random_state, use_cv):
899
  df = pd.read_csv(csv_file)
900
  else:
901
  df = pd.read_csv(csv_file.name if hasattr(csv_file, 'name') else io.BytesIO(csv_file))
902
-
903
  label_col = label_col.strip() if label_col else None
904
  if label_col and label_col not in df.columns:
905
  return f"标签列 '{label_col}' 不存在。可用列: {list(df.columns)}", None, None, None, None, None
906
-
907
  profiles = parse_csv_to_profiles(df)
908
  engineer = InsuranceFeatureEngineer()
909
  features_list, labels = [], []
910
-
911
  for profile in profiles:
912
  f = engineer.extract_user_features(profile)
913
  if f:
@@ -918,10 +1336,8 @@ def csv_train(csv_file, label_col, test_size, random_state, use_cv):
918
  else:
919
  is_high_risk = (f["has_purchased"] == 0 and f["has_renewed"] == 0 and f["total_events"] < 20)
920
  labels.append(int(is_high_risk))
921
-
922
  if len(features_list) < 50:
923
  return f"有效样本数 {len(features_list)} 太少,需要至少50个", None, None, None, None, None
924
-
925
  return train_sklearn(features_list, labels, test_size, random_state, use_cv)
926
  except Exception as e:
927
  import traceback
@@ -954,26 +1370,21 @@ def show_csv_info(csv_file):
954
 
955
 
956
  # =============================================================================
957
- # Gradio 界面 (5 Tabs)
958
  # =============================================================================
959
 
960
- with gr.Blocks(title="🏥 保险APP 用户行为分析模型训练平台", theme=gr.themes.Soft()) as demo:
961
- gr.Markdown("""# 🏥 保险APP 用户行为分析模型训练平台
962
 
963
- 基于最新研究论文构建的工��级保险用户行为分析平台。
964
 
965
- **大功能模块:**
966
- - 🎲 **演示模式**: 合成数据体验完整训练流程
967
- - 📁 **CSV上传**: 上传真实用户行为数据
968
- - 🎯 **产品推荐 (DIN)**: Deep Interest Network 保险产品推荐
969
- - 🔍 **异常检测 (TabBERT)**: 层次化Transformer理赔欺诈检测
970
- - ❓ **帮助文档**: 完整使用指南
971
 
972
- **参考论文:** Deep Interest Network (KDD 2018) | Transformer Churn Prediction (arXiv 2309.14390) | TabBERT (arXiv 2011.01843) | Focal Loss (ICCV 2017)""")
973
 
974
  with gr.Tabs():
975
  # ===== Tab 1: 演示模式 =====
976
- with gr.Tab("🎲 演示模式"):
977
  with gr.Row():
978
  with gr.Column(scale=1):
979
  gr.Markdown("### 参数设置")
@@ -995,23 +1406,14 @@ with gr.Blocks(title="🏥 保险APP 用户行为分析模型训练平台", them
995
  demo_table = gr.Dataframe(label="特征数据样本")
996
 
997
  # ===== Tab 2: CSV上传 =====
998
- with gr.Tab("📁 CSV数据上传"):
999
  with gr.Row():
1000
  with gr.Column(scale=1):
1001
  gr.Markdown("""### 📤 上传数据
1002
-
1003
  **必需列:** `user_id`, `session_id`, `timestamp`, `event_type`, `page_id`
1004
-
1005
- **可选列:** `product_id`, `amount`, `label`(流失标签)
1006
-
1007
- **示例:**
1008
- ```
1009
- user_id,session_id,timestamp,event_type,page_id,product_id,amount
1010
- user_001,sess_001,1704067200000,page_view,home,,
1011
- user_001,sess_001,1704067230000,product_view,product,health_basic,
1012
- ```""")
1013
  csv_file = gr.File(label="上传CSV文件", file_types=[".csv"])
1014
- label_col_input = gr.Textbox(label="标签列名 (可选)", placeholder="如: churn, is_churned")
1015
  with gr.Row():
1016
  csv_test_size = gr.Slider(0.1, 0.4, value=0.2, step=0.05, label="测试集比例")
1017
  csv_random_seed = gr.Number(value=42, label="随机种子", precision=0)
@@ -1033,19 +1435,12 @@ user_001,sess_001,1704067230000,product_view,product,health_basic,
1033
  with gr.Row():
1034
  csv_table = gr.Dataframe(label="特征数据样本")
1035
 
1036
- # ===== Tab 3: 产品推荐 (DIN) =====
1037
  with gr.Tab("🎯 产品推荐 (DIN)"):
1038
  gr.Markdown("""### Deep Interest Network - 保险产品推荐
1039
-
1040
- 基于用户历史行为序列, 通过注意力机制动态计算对候选保险产品的兴趣度, 预测购买概率。
1041
-
1042
- **核心架构:**
1043
- - 用户历史行为 → Embedding → LocalActivationUnit → 动态兴趣向量
1044
- - 候选产品Embedding → 拼接交互特征 → MLP → 购买概率""")
1045
-
1046
  with gr.Row():
1047
  with gr.Column(scale=1):
1048
- gr.Markdown("### DIN 参数")
1049
  din_users = gr.Slider(500, 5000, value=2000, step=100, label="用户数量")
1050
  din_emb = gr.Slider(32, 256, value=64, step=32, label="Embedding维度")
1051
  din_epochs = gr.Slider(5, 50, value=20, step=5, label="训练轮数")
@@ -1053,13 +1448,10 @@ user_001,sess_001,1704067230000,product_view,product,health_basic,
1053
  din_lr = gr.Slider(0.0001, 0.01, value=0.001, step=0.0001, label="学习率")
1054
  din_seed = gr.Number(value=42, label="随机种子", precision=0)
1055
  din_btn = gr.Button("🚀 训练DIN模型", variant="primary", size="lg")
1056
-
1057
  if not TORCH_AVAILABLE:
1058
- gr.Markdown("⚠️ **PyTorch 未安装**。请在 requirements.txt 中添加 `torch>=2.0.0` 并重启 Space。")
1059
-
1060
  with gr.Column(scale=2):
1061
  din_result = gr.Textbox(label="训练结果", lines=25, show_copy_button=True)
1062
-
1063
  with gr.Row():
1064
  din_img1 = gr.Image(label="产品推荐效果")
1065
  din_img2 = gr.Image(label="注意力权重示例")
@@ -1067,20 +1459,12 @@ user_001,sess_001,1704067230000,product_view,product,health_basic,
1067
  din_img3 = gr.Image(label="ROC曲线")
1068
  din_img4 = gr.Image(label="PR曲线")
1069
 
1070
- # ===== Tab 4: 异常检测 (TabBERT) =====
1071
  with gr.Tab("🔍 异常检测 (TabBERT)"):
1072
  gr.Markdown("""### TabularBERT - 理赔欺诈/异常检测
1073
-
1074
- 层次化Transformer架构, 学习理赔记录的多字段关联和时序模式, 自动识别异常理赔行为。
1075
-
1076
- **核心架构:**
1077
- - Field-level Transformer: 单条理赔记录内字段关联
1078
- - Sequence-level Transformer: 跨理赔记录时序模式
1079
- - Focal Loss: 解决异常样本极少的不平衡问题""")
1080
-
1081
  with gr.Row():
1082
  with gr.Column(scale=1):
1083
- gr.Markdown("### TabBERT 参数")
1084
  tab_normal = gr.Slider(500, 2000, value=800, step=100, label="正常样本数")
1085
  tab_anomaly = gr.Slider(100, 1000, value=200, step=50, label="异常样本数")
1086
  tab_dmodel = gr.Slider(64, 256, value=128, step=64, label="模型维度 d_model")
@@ -1089,13 +1473,10 @@ user_001,sess_001,1704067230000,product_view,product,health_basic,
1089
  tab_lr = gr.Slider(0.0001, 0.01, value=0.001, step=0.0001, label="学习率")
1090
  tab_seed = gr.Number(value=42, label="随机种子", precision=0)
1091
  tab_btn = gr.Button("🚀 训练TabBERT模型", variant="primary", size="lg")
1092
-
1093
  if not TORCH_AVAILABLE:
1094
- gr.Markdown("⚠️ **PyTorch 未安装**。请在 requirements.txt 中添加 `torch>=2.0.0` 并重启 Space。")
1095
-
1096
  with gr.Column(scale=2):
1097
  tab_result = gr.Textbox(label="训练结果", lines=25, show_copy_button=True)
1098
-
1099
  with gr.Row():
1100
  tab_img1 = gr.Image(label="特征重要性")
1101
  tab_img2 = gr.Image(label="异常分数分布")
@@ -1103,107 +1484,142 @@ user_001,sess_001,1704067230000,product_view,product,health_basic,
1103
  tab_img3 = gr.Image(label="ROC曲线")
1104
  tab_img4 = gr.Image(label="混淆矩阵与阈值分析")
1105
 
1106
- # ===== Tab 5: 帮助文档 =====
1107
- with gr.Tab(" 帮助文档"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1108
  gr.Markdown("""## 📚 完整使用指南
1109
 
1110
  ### 1. 演示模式
1111
- - 调整用户数量和事件数, 系统自动生成合成保险APP行为数据
1112
- - 高流失风险用户模拟: 低频浏览、无转化、短会话
1113
- - 低流失风险用户模拟: 完整行为漏斗、有保单、有续保
1114
-
1115
- ### 2. CSV数据上传
1116
- **必需列:**
1117
- | 列名 | 类型 | 说明 |
1118
- |------|------|------|
1119
- | user_id | string/int | 用户唯一标识 |
1120
- | session_id | string/int | 会话标识 |
1121
- | timestamp | int | Unix时间戳(毫秒或秒) |
1122
- | event_type | string | 见下方事件类型表 |
1123
- | page_id | string | 页面标识 |
1124
-
1125
- **可选列:**
1126
- | 列名 | 类型 | 说明 |
1127
- |------|------|------|
1128
- | product_id | string | 保险产品ID |
1129
- | amount | float | 金额/保额 |
1130
- | label | int(0/1) | 流失标签 |
1131
-
1132
- ### 3. 事件类型定义
1133
-
1134
- | 类别 | 事件 | 业务含义 |
1135
- |------|------|---------|
1136
- | **浏览** | page_view, product_view, premium_calculator, article_read, faq_view, product_compare | 用户浏览保险产品页面 |
1137
- | **交互** | quote_request, form_submit, document_upload, chat_init, call_init, video_consult, quote_result_view | 用户深度参与行为 |
1138
- | **转化** | policy_select, payment_init, payment_success, policy_issued | 核心KPI转化行为 |
1139
- | **理赔** | claim_init, claim_doc_upload, claim_review, claim_approved, claim_rejected | 理赔全流程 |
1140
- | **续保** | renewal_reminder, renewal_click, renewal_complete, policy_cancel | 续保/流失信号 |
1141
-
1142
- ### 4. 模型对比
1143
-
1144
- | 模型 | 适用场景 | 核心特点 |
1145
- |------|---------|---------|
1146
- | **GBDT** | 流失预测基线 | 高精度, 可解释, 训练快 |
1147
- | **Random Forest** | 特征筛选 | 抗过拟合, 特征重要性直观 |
1148
- | **DIN** | 产品推荐 | 注意力动态兴趣, 候选产品自适应 |
1149
- | **TabBERT** | 异常检测 | 层次化Transformer, Focal Loss |
1150
-
1151
- ### 5. 评估指标
1152
-
1153
- | 指标 | 说明 | 适用场景 |
1154
- |------|------|---------|
1155
- | **AUC-ROC** | 分类器整体区分能力 | 所有二分类任务 |
1156
- | **F1-Score** | 精确率与召回率调和平均 | 不平衡数据 |
1157
- | **AP** | PR曲线下面积 | 正样本极少时 |
1158
- | **交叉验证** | 5折StratifiedKFold | 评估模型稳定性 |
1159
-
1160
- ### 6. 参考文献
1161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1162
  | 论文 | 应用 | arXiv |
1163
  |------|------|-------|
1164
  | Deep Interest Network | 产品推荐 | [1706.06978](https://arxiv.org/abs/1706.06978) |
1165
  | SDIM | 长期行为建模 | [2205.10249](https://arxiv.org/abs/2205.10249) |
1166
  | TabBERT/TabFormer | 表格时序异常检测 | [2011.01843](https://arxiv.org/abs/2011.01843) |
1167
  | Transformer Churn | 非合约流失预测 | [2309.14390](https://arxiv.org/abs/2309.14390) |
 
 
1168
  | Focal Loss | 不平衡分类 | [1708.02002](https://arxiv.org/abs/1708.02002) |
1169
  """)
1170
 
1171
  gr.Markdown("""---
1172
  <div align="center">
1173
- <b>保险APP 用户行为分析模型训练平台</b> |
1174
- <a href="https://arxiv.org/abs/1706.06978">DIN</a> |
1175
- <a href="https://arxiv.org/abs/2309.14390">Churn Transformer</a> |
1176
- <a href="https://arxiv.org/abs/2011.01843">TabBERT</a> |
1177
- <a href="https://arxiv.org/abs/1708.02002">Focal Loss</a> |
1178
  作者: <a href="https://huggingface.co/Stephanwu">Stephanwu</a>
1179
  </div>""")
1180
 
1181
  # ===== 事件绑定 =====
1182
- train_btn.click(
1183
- fn=demo_train,
1184
- inputs=[n_users_slider, n_events_slider, test_size_slider, random_seed, use_cv_check],
1185
- outputs=[demo_result, demo_img1, demo_img2, demo_img3, demo_img4, demo_table]
1186
- )
1187
- info_btn.click(
1188
- fn=show_csv_info,
1189
- inputs=[csv_file],
1190
- outputs=[csv_info, csv_preview]
1191
- )
1192
- csv_train_btn.click(
1193
- fn=csv_train,
1194
- inputs=[csv_file, label_col_input, csv_test_size, csv_random_seed, csv_use_cv],
1195
- outputs=[csv_result, csv_img1, csv_img2, csv_img3, csv_img4, csv_table]
1196
- )
1197
- din_btn.click(
1198
- fn=train_din_recommendation,
1199
- inputs=[din_users, din_emb, din_epochs, din_batch, din_lr, din_seed],
1200
- outputs=[din_result, din_img1, din_img2, din_img3, din_img4]
1201
- )
1202
- tab_btn.click(
1203
- fn=train_tabbert_anomaly,
1204
- inputs=[tab_normal, tab_anomaly, tab_dmodel, tab_epochs, tab_batch, tab_lr, tab_seed],
1205
- outputs=[tab_result, tab_img1, tab_img2, tab_img3, tab_img4]
1206
- )
1207
 
1208
  if __name__ == "__main__":
1209
  demo.launch()
 
1
  """
2
+ 保险APP 用户行为分析 - Gradio Space (终极 v3.0)
3
+ 支持: 演示模式 | CSV上传 | 产品推荐(DIN) | 异常检测(TabBERT) | 模型管理 | 生存分析
4
 
5
  参考文献:
6
  - DIN: Deep Interest Network (KDD 2018, arxiv:1706.06978)
7
  - TabBERT: Tabular Transformers (arxiv:2011.01843)
8
  - Focal Loss: RetinaNet (ICCV 2017, arxiv:1708.02002)
9
+ - DeepSurv: Cox-PH Neural Network (JAMIA 2018, arxiv:1606.00931)
10
+ - RNN Survival: arxiv:2304.00575
11
  """
12
+ import os, io, math, warnings, datetime, random, json, tempfile, pickle
13
  from collections import Counter, defaultdict
14
  from dataclasses import dataclass, field
15
+ from typing import List, Dict, Optional, Tuple
16
+ from pathlib import Path
17
 
18
  warnings.filterwarnings('ignore')
19
  import numpy as np
20
  import pandas as pd
21
  from sklearn.model_selection import train_test_split, StratifiedKFold, cross_val_score
22
+ from sklearn.preprocessing import StandardScaler, MinMaxScaler
23
  from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
24
  from sklearn.metrics import (
25
  roc_auc_score, f1_score, confusion_matrix,
 
33
 
34
  import gradio as gr
35
 
36
+ # PyTorch
37
  try:
38
  import torch
39
  import torch.nn as nn
40
  import torch.nn.functional as F
 
41
  TORCH_AVAILABLE = True
42
  except ImportError:
43
  TORCH_AVAILABLE = False
44
+ print("⚠️ PyTorch not available. Deep learning models disabled.")
45
+
46
+ # Hugging Face Hub (模型保存/加载)
47
+ try:
48
+ from huggingface_hub import HfApi, create_repo, hf_hub_download, login
49
+ HFHUB_AVAILABLE = True
50
+ except ImportError:
51
+ HFHUB_AVAILABLE = False
52
+ print("⚠️ huggingface_hub not available. Model save/load disabled.")
53
+
54
+ # lifelines (生存分析)
55
+ try:
56
+ from lifelines import CoxPHFitter, KaplanMeierFitter, NelsonAalenFitter
57
+ from lifelines.statistics import logrank_test
58
+ LIFELINES_AVAILABLE = True
59
+ except ImportError:
60
+ LIFELINES_AVAILABLE = False
61
+ print("⚠️ lifelines not available. Statistical survival analysis disabled.")
62
+
63
+ # joblib
64
+ import joblib
65
 
66
 
67
  # =============================================================================
68
+ # 全局配置 & 数据模型
69
  # =============================================================================
70
 
71
  INSURANCE_EVENT_TYPES = {
 
78
  "policy_cancel", "app_uninstall", "login", "logout",
79
  }
80
 
 
 
 
 
 
 
81
  @dataclass
82
  class InsuranceAppEvent:
83
  event_id: str; user_id: str; session_id: str; timestamp: int
 
95
  user_id: str; sessions: List[UserSession] = field(default_factory=list)
96
 
97
 
98
+ # =============================================================================
99
+ # 特征工程
100
+ # =============================================================================
101
+
102
  class InsuranceFeatureEngineer:
103
  def extract_user_features(self, profile):
104
  sessions = profile.sessions
 
117
  has_renewed = any(e.event_type == "renewal_complete" for e in all_events)
118
  has_claimed = any(e.event_type in ("claim_init","claim_approved") for e in all_events)
119
  support = all_type_counts.get("chat_init", 0) + all_type_counts.get("call_init", 0)
 
 
120
  event_seq = [e.event_type for e in all_events]
121
  product_seq = [e.product_id or "none" for e in all_events]
 
122
  return {
123
  "total_sessions": len(sessions), "total_events": total,
124
  "days_active": days_active, "avg_events_per_session": total / len(sessions),
 
139
  "peak_active_hour": Counter(datetime.datetime.fromtimestamp(e.timestamp/1000).hour for e in all_events).most_common(1)[0][0],
140
  "recent_7day_events": sum(1 for e in all_events if (last_ts-e.timestamp)<7*24*3600*1000),
141
  "recent_30day_events": sum(1 for e in all_events if (last_ts-e.timestamp)<30*24*3600*1000),
142
+ "_event_sequence": event_seq, "_product_sequence": product_seq,
 
 
143
  "_user_id": profile.user_id,
144
  }
145
 
 
157
  df["timestamp"] = pd.to_numeric(df["timestamp"], errors="coerce")
158
  df = df.dropna(subset=["timestamp", "event_type"])
159
  df["timestamp"] = df["timestamp"].astype(int)
 
160
  profiles = {}
161
  for (uid, sid), group in df.groupby(["user_id", "session_id"]):
162
  if uid not in profiles:
 
207
  return data
208
 
209
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
210
  # =============================================================================
211
+ # 通用 sklearn 训练函数
212
  # =============================================================================
213
 
214
  def train_sklearn(features_list, labels, test_size=0.2, random_state=42, use_cv=False):
215
  df = pd.DataFrame(features_list)
216
  df_full = df.copy()
 
 
217
  drop_cols = [c for c in df.columns if c.startswith('_')]
218
+ for c in drop_cols: df.pop(c)
 
219
  for c in df.columns:
220
  if df[c].dtype == 'object':
221
  df[c] = pd.to_numeric(df[c], errors='coerce').fillna(0)
222
  df = df.fillna(0).replace([np.inf, -np.inf], 0)
 
223
  X = df.values; y = np.array(labels)
224
  feature_names = list(df.columns)
225
+ X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state=random_state, stratify=y)
 
 
 
 
226
  scaler = StandardScaler()
227
+ X_train_s = scaler.fit_transform(X_train); X_test_s = scaler.transform(X_test)
 
228
 
229
  gbdt = GradientBoostingClassifier(n_estimators=200, max_depth=5, learning_rate=0.1, subsample=0.8, random_state=random_state)
230
  gbdt.fit(X_train_s, y_train)
 
306
  result_text = f"""=== 模型训练结果 ===
307
  样本数: {len(y)} | 特征数: {len(feature_names)}
308
  训练集: {len(y_train)} | 测试集: {len(y_test)}
 
309
 
310
  --- GBDT ---
311
  AUC: {auc_gbdt:.4f}
 
323
  --- 分类报告 (GBDT) ---
324
  {report}"""
325
 
326
+ # 保存模型到内存供后续保存到Hub
327
+ model_artifacts = {
328
+ 'gbdt': gbdt,
329
+ 'rf': rf,
330
+ 'scaler': scaler,
331
+ 'feature_names': feature_names,
332
+ 'metrics': {'auc_gbdt': auc_gbdt, 'f1_gbdt': f1_gbdt, 'auc_rf': auc_rf, 'ap_gbdt': ap_gbdt, 'ap_rf': ap_rf}
333
+ }
334
+ # 保存到本地临时文件
335
+ joblib.dump(model_artifacts, 'outputs/sklearn_model_artifacts.joblib')
336
+
337
  return result_text, fig_path1, fig_path2, fig_path3, fig_path4, df_full
338
 
339
 
340
  # =============================================================================
341
+ # DIN 产品推荐
342
  # =============================================================================
343
 
344
+ def generate_product_recommendation_data(n_users=1000, seed=42):
345
+ random.seed(seed); np.random.seed(seed)
346
+ products = ["health_basic","health_premium","critical_illness","term_life",
347
+ "auto_compulsory","auto_commercial","home","travel_domestic"]
348
+ records = []
349
+ for u in range(n_users):
350
+ n_behaviors = random.randint(5, 30)
351
+ behavior_events = []
352
+ behavior_products = []
353
+ for i in range(n_behaviors):
354
+ et = random.choice(["page_view","product_view","quote_request","article_read"])
355
+ behavior_events.append(et)
356
+ behavior_products.append(random.choice(products))
357
+ candidate = random.choice(products)
358
+ label = 1 if candidate in behavior_products else random.choices([0,1], weights=[0.7,0.3])[0]
359
+ records.append({
360
+ 'user_id': u, 'behavior_events': behavior_events,
361
+ 'behavior_products': behavior_products,
362
+ 'candidate_product': candidate, 'label': label,
363
+ 'user_features': np.random.randn(20).astype(np.float32),
364
+ })
365
+ return pd.DataFrame(records)
366
+
367
+
368
  def train_din_recommendation(n_users, embedding_dim, epochs, batch_size, lr, seed):
 
369
  if not TORCH_AVAILABLE:
370
  return "❌ PyTorch 未安装。请在 requirements.txt 中添加 torch 并重启 Space。", None, None, None, None, None
371
 
372
  torch.manual_seed(seed); np.random.seed(seed); random.seed(seed)
 
 
373
  df = generate_product_recommendation_data(n_users=n_users, seed=seed)
374
 
 
375
  all_events = sorted(set(e for seq in df['behavior_events'] for e in seq))
376
  event_vocab = {e: i+1 for i, e in enumerate(all_events)}
377
  all_products = sorted(set(p for seq in df['behavior_products'] for p in seq) | set(df['candidate_product']))
378
  product_vocab = {p: i+1 for i, p in enumerate(all_products)}
379
 
 
380
  max_seq_len = 20
381
+ behavior_events_padded = []; behavior_products_padded = []; behavior_masks = []
 
 
 
382
  for _, row in df.iterrows():
383
  e_seq = [event_vocab[e] for e in row['behavior_events'][-max_seq_len:]]
384
  p_seq = [product_vocab[p] for p in row['behavior_products'][-max_seq_len:]]
385
  mask = [1] * len(e_seq)
386
  if len(e_seq) < max_seq_len:
387
  pad = max_seq_len - len(e_seq)
388
+ e_seq = [0]*pad + e_seq; p_seq = [0]*pad + p_seq; mask = [0]*pad + mask
389
+ behavior_events_padded.append(e_seq); behavior_products_padded.append(p_seq); behavior_masks.append(mask)
390
+
391
+ df['be'] = behavior_events_padded; df['bp'] = behavior_products_padded; df['bm'] = behavior_masks
 
 
 
 
 
 
392
  df['cp'] = df['candidate_product'].map(product_vocab)
393
 
 
394
  train_df = df.sample(frac=0.8, random_state=seed)
395
  test_df = df.drop(train_df.index)
396
 
 
397
  device = torch.device('cpu')
398
 
399
  class SimpleDIN(nn.Module):
 
402
  self.event_emb = nn.Embedding(num_events+1, d_model//2, padding_idx=0)
403
  self.prod_emb = nn.Embedding(num_products+1, d_model//2, padding_idx=0)
404
  self.cand_emb = nn.Embedding(num_products+1, d_model)
405
+ self.attn = nn.Sequential(nn.Linear(d_model*4, 128), nn.ReLU(), nn.Linear(128, 1))
406
+ self.mlp = nn.Sequential(nn.Linear(d_model*3, 256), nn.ReLU(), nn.Dropout(0.3),
407
+ nn.Linear(256, 128), nn.ReLU(), nn.Dropout(0.3), nn.Linear(128, 1))
 
 
 
 
 
 
408
  def forward(self, be, bp, bm, cp):
409
  B = be.size(0); L = be.size(1)
410
+ e_emb = self.event_emb(be)
411
+ p_emb = self.prod_emb(bp)
412
+ beh_emb = torch.cat([e_emb, p_emb], dim=-1)
413
+ cand_emb = self.cand_emb(cp)
 
 
414
  cand_exp = cand_emb.unsqueeze(1).expand(B, L, -1)
415
+ diff = cand_exp - beh_emb; prod = cand_exp * beh_emb
 
416
  attn_in = torch.cat([cand_exp, beh_emb, diff, prod], dim=-1)
417
+ attn_w = self.attn(attn_in).squeeze(-1)
418
  attn_w = attn_w.masked_fill(~bm.bool(), -1e9)
419
  attn_w = torch.softmax(attn_w, dim=1)
420
+ interest = (beh_emb * attn_w.unsqueeze(-1)).sum(dim=1)
 
 
421
  x = torch.cat([interest, cand_emb, interest*cand_emb], dim=-1)
422
  return self.mlp(x).squeeze(-1)
423
 
 
425
  criterion = nn.BCEWithLogitsLoss()
426
  optimizer = torch.optim.Adam(model.parameters(), lr=lr)
427
 
 
428
  for epoch in range(epochs):
429
+ model.train(); epoch_loss = 0
 
430
  for i in range(0, len(train_df), batch_size):
431
  batch = train_df.iloc[i:i+batch_size]
432
  be = torch.tensor(np.stack(batch['be'].values), dtype=torch.long).to(device)
 
434
  bm = torch.tensor(np.stack(batch['bm'].values), dtype=torch.bool).to(device)
435
  cp = torch.tensor(batch['cp'].values, dtype=torch.long).to(device)
436
  labels = torch.tensor(batch['label'].values, dtype=torch.float32).to(device)
 
437
  optimizer.zero_grad()
438
  outputs = model(be, bp, bm, cp)
439
  loss = criterion(outputs, labels)
440
+ loss.backward(); optimizer.step()
 
441
  epoch_loss += loss.item()
 
442
  if (epoch+1) % max(1, epochs//5) == 0 or epoch == 0:
443
+ print(f"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss*batch_size/len(train_df):.4f}")
444
 
 
445
  model.eval()
446
  with torch.no_grad():
447
  be = torch.tensor(np.stack(test_df['be'].values), dtype=torch.long).to(device)
 
449
  bm = torch.tensor(np.stack(test_df['bm'].values), dtype=torch.bool).to(device)
450
  cp = torch.tensor(test_df['cp'].values, dtype=torch.long).to(device)
451
  labels = test_df['label'].values
 
452
  preds = torch.sigmoid(model(be, bp, bm, cp)).cpu().numpy()
453
 
454
  auc = float(roc_auc_score(labels, preds))
 
456
  f1 = float(f1_score(labels, preds > 0.5))
457
  acc = float(accuracy_score(labels, preds > 0.5))
458
 
 
459
  os.makedirs("outputs", exist_ok=True)
460
 
461
+ # 保存 PyTorch 模型
462
+ torch.save({
463
+ 'model_state_dict': model.state_dict(),
464
+ 'event_vocab': event_vocab,
465
+ 'product_vocab': product_vocab,
466
+ 'embedding_dim': embedding_dim,
467
+ 'max_seq_len': max_seq_len,
468
+ 'num_events': len(all_events),
469
+ 'num_products': len(all_products),
470
+ 'metrics': {'auc': auc, 'ap': ap, 'f1': f1, 'acc': acc}
471
+ }, 'outputs/din_model.pt')
472
+
473
  fig, ax = plt.subplots(figsize=(10,6))
474
  product_perf = {}
475
  for _, row in test_df.iterrows():
476
  prod = row['candidate_product']
477
+ if prod not in product_perf: product_perf[prod] = {'preds': [], 'labels': []}
 
478
  idx = test_df.index.get_loc(_)
479
  product_perf[prod]['preds'].append(preds[idx])
480
  product_perf[prod]['labels'].append(row['label'])
 
481
  prod_aucs = []
482
  for prod, data in product_perf.items():
483
  if len(set(data['labels'])) > 1 and len(data['labels']) >= 5:
484
  prod_auc = roc_auc_score(data['labels'], data['preds'])
485
  prod_aucs.append((prod, prod_auc, np.mean(data['labels'])))
 
486
  if prod_aucs:
487
  prod_aucs.sort(key=lambda x: x[1], reverse=True)
488
  prods, aucs, rates = zip(*prod_aucs)
 
491
  ax2 = ax.twinx()
492
  ax2.plot(x, rates, 'ro-', label='Conversion Rate')
493
  ax.set_xticks(x); ax.set_xticklabels(prods, rotation=45, ha='right')
494
+ ax.set_ylabel('AUC', color='steelblue'); ax2.set_ylabel('Conversion Rate', color='red')
495
+ ax.set_title('Product Recommendation Performance', fontweight='bold')
 
496
  ax.legend(loc='upper left'); ax2.legend(loc='upper right')
497
  plt.tight_layout()
498
  fig_path1 = "outputs/din_product_performance.png"
499
  plt.savefig(fig_path1, dpi=150); plt.close()
500
 
 
501
  fig, ax = plt.subplots(figsize=(10,6))
502
  sample_idx = 0
503
  with torch.no_grad():
504
+ be_s = be[sample_idx:sample_idx+1]; bp_s = bp[sample_idx:sample_idx+1]
505
+ bm_s = bm[sample_idx:sample_idx+1]; cp_s = cp[sample_idx:sample_idx+1]
 
 
 
506
  B, L = be_s.size()
507
+ e_emb = model.event_emb(be_s); p_emb = model.prod_emb(bp_s)
 
508
  beh_emb = torch.cat([e_emb, p_emb], dim=-1)
509
  cand_emb = model.cand_emb(cp_s)
510
  cand_exp = cand_emb.unsqueeze(1).expand(B, L, -1)
511
+ diff = cand_exp - beh_emb; prod_feat = cand_exp * beh_emb
 
512
  attn_in = torch.cat([cand_exp, beh_emb, diff, prod_feat], dim=-1)
513
  attn_w = torch.softmax(model.attn(attn_in).squeeze(-1).masked_fill(~bm_s, -1e9), dim=1)
514
  weights = attn_w[0].cpu().numpy()
 
515
  valid_len = bm_s[0].sum().item()
516
  valid_weights = weights[-valid_len:] if valid_len > 0 else weights
517
  ax.bar(range(len(valid_weights)), valid_weights, color='coral')
518
  ax.set_title('Attention Weights (Sample User)', fontweight='bold')
519
+ ax.set_xlabel('Behavior Position'); ax.set_ylabel('Attention Weight')
 
520
  plt.tight_layout()
521
  fig_path2 = "outputs/din_attention.png"
522
  plt.savefig(fig_path2, dpi=150); plt.close()
523
 
 
524
  fig, ax = plt.subplots(figsize=(8,6))
525
  fpr, tpr, _ = roc_curve(labels, preds)
526
  ax.plot(fpr, tpr, label=f'DIN AUC={auc:.3f}', linewidth=2, color='#2E86AB')
 
532
  fig_path3 = "outputs/din_roc.png"
533
  plt.savefig(fig_path3, dpi=150); plt.close()
534
 
 
535
  fig, ax = plt.subplots(figsize=(8,6))
536
  prec, rec, _ = precision_recall_curve(labels, preds)
537
  ax.plot(rec, prec, label=f'DIN AP={ap:.3f}', linewidth=2, color='#A23B72')
 
544
 
545
  result_text = f"""=== DIN 保险产品推荐模型 ===
546
  样本数: {n_users} | 产品数: {len(all_products)}
547
+ Event vocab: {len(all_events)} | Product vocab: {len(all_products)}
548
  训练集: {len(train_df)} | 测试集: {len(test_df)}
549
 
550
  --- 模型架构 ---
551
  Embedding dim: {embedding_dim}
552
+ Attention: LocalActivationUnit (4路交互: [c, b, c-b, c*b])
 
553
  MLP: [emb*3] → 256 → 128 → 1
554
 
555
  --- 训练配置 ---
556
  Epochs: {epochs} | Batch size: {batch_size} | LR: {lr}
557
+ Optimizer: Adam | Loss: BCEWithLogitsLoss
558
 
559
  --- 测试集效果 ---
560
  AUC: {auc:.4f}
 
565
  --- 模型洞察 ---
566
  1. 注意力机制自动学习用户历史行为中对候选产品的相关度
567
  2. 高权重通常分配给同类产品的历史浏览/购买行为
568
+ 3. 新用户(历史短)依赖统计特征, 老用户依赖行为序列
569
+
570
+ --- 模型文件 ---
571
+ 模型已保存至: outputs/din_model.pt
572
+ 可使用"模型管理"Tab上传至Hugging Face Hub"""
573
 
574
  return result_text, fig_path1, fig_path2, fig_path3, fig_path4
575
 
576
 
577
  # =============================================================================
578
+ # TabBERT 检测
579
  # =============================================================================
580
 
581
+ def generate_anomaly_data(n_normal=800, n_anomaly=200, seed=42):
582
+ random.seed(seed); np.random.seed(seed)
583
+ normal_records = []
584
+ for i in range(n_normal):
585
+ normal_records.append({
586
+ 'user_id': i, 'claim_amount': random.uniform(1000, 50000),
587
+ 'claim_type': random.choice(["health","auto","property"]),
588
+ 'days_since_policy': random.randint(30, 365),
589
+ 'num_previous_claims': random.randint(0, 3),
590
+ 'document_count': random.randint(3, 10),
591
+ 'processing_time_days': random.uniform(1, 15),
592
+ 'label': 0,
593
+ })
594
+ anomaly_records = []
595
+ for i in range(n_anomaly):
596
+ anomaly_records.append({
597
+ 'user_id': n_normal + i, 'claim_amount': random.uniform(50000, 200000),
598
+ 'claim_type': random.choice(["health","auto","property"]),
599
+ 'days_since_policy': random.randint(1, 15),
600
+ 'num_previous_claims': random.randint(5, 20),
601
+ 'document_count': random.randint(0, 2),
602
+ 'processing_time_days': random.uniform(0.1, 2),
603
+ 'label': 1,
604
+ })
605
+ df = pd.DataFrame(normal_records + anomaly_records)
606
+ df = df.sample(frac=1, random_state=seed).reset_index(drop=True)
607
+ return df
608
+
609
+
610
  def train_tabbert_anomaly(n_normal, n_anomaly, d_model, epochs, batch_size, lr, seed):
 
611
  if not TORCH_AVAILABLE:
612
+ return "❌ PyTorch 未安装。请在 requirements.txt 中添加 torch 并重启 Space。", None, None, None, None
613
 
614
  torch.manual_seed(seed); np.random.seed(seed); random.seed(seed)
 
 
615
  df = generate_anomaly_data(n_normal=n_normal, n_anomaly=n_anomaly, seed=seed)
616
 
 
617
  claim_type_map = {"health": 0, "auto": 1, "property": 2}
618
  df['claim_type_enc'] = df['claim_type'].map(claim_type_map)
619
 
 
623
  X = df[feature_cols].values.astype(np.float32)
624
  y = df['label'].values.astype(np.float32)
625
 
 
626
  scaler = StandardScaler()
627
  X_s = scaler.fit_transform(X)
628
 
 
630
  X_s, y, test_size=0.2, random_state=seed, stratify=y
631
  )
632
 
 
633
  device = torch.device('cpu')
634
 
635
  class SimpleTabBERT(nn.Module):
636
  def __init__(self, input_dim=6, d_model=128, n_layers=4):
637
  super().__init__()
638
  self.input_proj = nn.Linear(input_dim, d_model)
 
 
639
  layers = []
640
  for _ in range(n_layers):
641
  layers.extend([
642
+ nn.Linear(d_model, d_model*4), nn.ReLU(), nn.Dropout(0.2),
643
+ nn.Linear(d_model*4, d_model), nn.LayerNorm(d_model), nn.ReLU(), nn.Dropout(0.2),
 
 
 
 
 
644
  ])
645
  self.transformer = nn.Sequential(*layers)
646
+ self.head = nn.Sequential(nn.Linear(d_model, 256), nn.ReLU(), nn.Dropout(0.3),
647
+ nn.Linear(256, 64), nn.ReLU(), nn.Linear(64, 1))
 
 
 
 
 
648
  def forward(self, x):
649
  x = self.input_proj(x)
650
  x = self.transformer(x)
 
652
 
653
  model = SimpleTabBERT(input_dim=len(feature_cols), d_model=d_model).to(device)
654
 
 
655
  class FocalLoss(nn.Module):
656
  def __init__(self, alpha=0.25, gamma=2.0):
657
+ super().__init__(); self.alpha = alpha; self.gamma = gamma
 
 
658
  def forward(self, inputs, targets):
659
  bce = F.binary_cross_entropy_with_logits(inputs, targets, reduction='none')
660
  pt = torch.exp(-bce)
 
663
  criterion = FocalLoss(alpha=0.25, gamma=2.0)
664
  optimizer = torch.optim.Adam(model.parameters(), lr=lr)
665
 
 
666
  X_train_t = torch.tensor(X_train, dtype=torch.float32).to(device)
667
  y_train_t = torch.tensor(y_train, dtype=torch.float32).to(device)
668
  X_test_t = torch.tensor(X_test, dtype=torch.float32).to(device)
669
  y_test_t = torch.tensor(y_test, dtype=torch.float32).to(device)
670
 
 
671
  for epoch in range(epochs):
672
+ model.train(); epoch_loss = 0
 
673
  n_batches = math.ceil(len(X_train_t) / batch_size)
 
674
  for i in range(n_batches):
675
+ start = i * batch_size; end = min(start + batch_size, len(X_train_t))
676
+ xb = X_train_t[start:end]; yb = y_train_t[start:end]
 
 
 
677
  optimizer.zero_grad()
678
+ outputs = model(xb); loss = criterion(outputs, yb)
679
+ loss.backward(); optimizer.step()
 
 
680
  epoch_loss += loss.item()
 
681
  if (epoch+1) % max(1, epochs//5) == 0 or epoch == 0:
682
  print(f"Epoch {epoch+1}/{epochs}, Loss: {epoch_loss/n_batches:.4f}")
683
 
 
684
  model.eval()
685
  with torch.no_grad():
686
  preds = torch.sigmoid(model(X_test_t)).cpu().numpy()
 
689
  ap = float(average_precision_score(y_test, preds))
690
  f1 = float(f1_score(y_test, preds > 0.5))
691
 
692
+ # 保存模型
693
+ torch.save({
694
+ 'model_state_dict': model.state_dict(),
695
+ 'feature_cols': feature_cols,
696
+ 'd_model': d_model,
697
+ 'scaler_mean': scaler.mean_,
698
+ 'scaler_scale': scaler.scale_,
699
+ 'metrics': {'auc': auc, 'ap': ap, 'f1': f1}
700
+ }, 'outputs/tabbert_model.pt')
701
 
702
+ os.makedirs("outputs", exist_ok=True)
 
 
 
 
703
 
 
704
  baseline_auc = auc
705
  importances = []
706
  for i in range(len(feature_cols)):
 
721
  fig_path1 = "outputs/tabbert_feature_importance.png"
722
  plt.savefig(fig_path1, dpi=150); plt.close()
723
 
 
724
  fig, ax = plt.subplots(figsize=(10,6))
725
+ normal_scores = preds[y_test == 0]; anomaly_scores = preds[y_test == 1]
 
726
  ax.hist(normal_scores, bins=30, alpha=0.6, label=f'Normal (n={len(normal_scores)})', color='steelblue', edgecolor='white')
727
  ax.hist(anomaly_scores, bins=30, alpha=0.6, label=f'Anomaly (n={len(anomaly_scores)})', color='red', edgecolor='white')
728
  ax.axvline(x=0.5, color='black', linestyle='--', label='Threshold=0.5')
 
733
  fig_path2 = "outputs/tabbert_distribution.png"
734
  plt.savefig(fig_path2, dpi=150); plt.close()
735
 
 
736
  fig, ax = plt.subplots(figsize=(8,6))
737
  fpr, tpr, _ = roc_curve(y_test, preds)
738
  ax.plot(fpr, tpr, label=f'TabBERT AUC={auc:.3f}', linewidth=2, color='#2E86AB')
 
744
  fig_path3 = "outputs/tabbert_roc.png"
745
  plt.savefig(fig_path3, dpi=150); plt.close()
746
 
 
747
  fig, axs = plt.subplots(1, 2, figsize=(14,6))
 
 
748
  cm = confusion_matrix(y_test, preds > 0.5)
749
  sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', ax=axs[0], cbar=False)
750
  axs[0].set_title(f'Confusion Matrix @ threshold=0.5\n(F1={f1:.3f})', fontweight='bold')
751
  axs[0].set_xlabel('Predicted'); axs[0].set_ylabel('Actual')
752
 
 
753
  thresholds = np.linspace(0.1, 0.9, 50)
754
  f1s = [f1_score(y_test, preds > t) for t in thresholds]
755
  precs = [precision_score(y_test, preds > t, zero_division=0) for t in thresholds]
756
  recs = [recall_score(y_test, preds > t, zero_division=0) for t in thresholds]
 
757
  axs[1].plot(thresholds, f1s, label='F1', linewidth=2)
758
  axs[1].plot(thresholds, precs, label='Precision', linewidth=2)
759
  axs[1].plot(thresholds, recs, label='Recall', linewidth=2)
 
791
  1. Focal Loss 自动聚焦难分异常样本, 解决类别不平衡
792
  2. 关键异常特征: claim_amount(高), days_since_policy(短), document_count(少)
793
  3. 建议阈值: {best_t:.2f} (平衡精确率与召回率)
794
+ 4. 高AUC说明模型能很好区分正常与异常理赔
795
+
796
+ --- 模型文件 ---
797
+ 模型已保存至: outputs/tabbert_model.pt
798
+ 可使用"模型管理"Tab上传至Hugging Face Hub"""
799
 
800
  return result_text, fig_path1, fig_path2, fig_path3, fig_path4
801
 
802
 
803
  # =============================================================================
804
+ # 模型管理 — 保存/加载到 Hugging Face Hub
805
+ # =============================================================================
806
+
807
+ def save_model_to_hub(repo_id, token, model_type, notes):
808
+ """将训练好的模型保存到 Hugging Face Hub"""
809
+ if not HFHUB_AVAILABLE:
810
+ return "❌ huggingface_hub 未安装。无法保存到 Hub。", None
811
+
812
+ if not token or not token.strip():
813
+ return "❌ 需要提供 Hugging Face Token。在 https://huggingface.co/settings/tokens 获取。", None
814
+
815
+ try:
816
+ api = HfApi(token=token.strip())
817
+ create_repo(repo_id, repo_type="model", exist_ok=True, token=token.strip())
818
+
819
+ with tempfile.TemporaryDirectory() as tmpdir:
820
+ tmpdir = Path(tmpdir)
821
+
822
+ # 收集所有模型文件
823
+ model_files = []
824
+ artifacts = {}
825
+
826
+ # 检查 sklearn 模型
827
+ sklearn_path = Path("outputs/sklearn_model_artifacts.joblib")
828
+ if sklearn_path.exists():
829
+ artifacts['sklearn'] = joblib.load(sklearn_path)
830
+ joblib.dump(artifacts['sklearn'], tmpdir / "sklearn_model.joblib")
831
+ model_files.append("sklearn_model.joblib")
832
+
833
+ # 检查 DIN 模型
834
+ din_path = Path("outputs/din_model.pt")
835
+ if din_path.exists():
836
+ artifacts['din'] = torch.load(din_path, map_location='cpu')
837
+ torch.save(artifacts['din'], tmpdir / "din_model.pt")
838
+ model_files.append("din_model.pt")
839
+
840
+ # 检查 TabBERT 模型
841
+ tab_path = Path("outputs/tabbert_model.pt")
842
+ if tab_path.exists():
843
+ artifacts['tabbert'] = torch.load(tab_path, map_location='cpu')
844
+ torch.save(artifacts['tabbert'], tmpdir / "tabbert_model.pt")
845
+ model_files.append("tabbert_model.pt")
846
+
847
+ if not model_files:
848
+ return "❌ 未找到训练好的模型。请先在其他Tab训练模型。", None
849
+
850
+ # 保存元数据
851
+ metadata = {
852
+ "model_type": model_type,
853
+ "notes": notes,
854
+ "files": model_files,
855
+ "timestamp": datetime.datetime.now().isoformat(),
856
+ "insurance_app_behavior": True,
857
+ "version": "3.0"
858
+ }
859
+ with open(tmpdir / "model_metadata.json", "w") as f:
860
+ json.dump(metadata, f, indent=2, ensure_ascii=False)
861
+
862
+ # 保存 README
863
+ readme = f"""# Insurance App Behavior Model
864
+
865
+ **Model Type:** {model_type}
866
+ **Notes:** {notes}
867
+ **Date:** {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}
868
+
869
+ ## Files
870
+
871
+ | File | Description |
872
+ |------|-------------|
873
+ | `sklearn_model.joblib` | GBDT + Random Forest + Scaler (sklearn) |
874
+ | `din_model.pt` | Deep Interest Network (PyTorch) |
875
+ | `tabbert_model.pt` | TabularBERT Anomaly Detection (PyTorch) |
876
+ | `model_metadata.json` | Model metadata |
877
+
878
+ ## Usage
879
+
880
+ ```python
881
+ from huggingface_hub import hf_hub_download
882
+ import joblib
883
+ import torch
884
+
885
+ # Load sklearn models
886
+ model_path = hf_hub_download(repo_id="{repo_id}", filename="sklearn_model.joblib")
887
+ artifacts = joblib.load(model_path)
888
+ # artifacts['gbdt'], artifacts['rf'], artifacts['scaler']
889
+
890
+ # Load DIN
891
+ din_path = hf_hub_download(repo_id="{repo_id}", filename="din_model.pt")
892
+ din_ckpt = torch.load(din_path)
893
+ # din_ckpt['model_state_dict'], din_ckpt['event_vocab'], din_ckpt['product_vocab']
894
+ ```
895
+
896
+ ## Reference
897
+
898
+ - Deep Interest Network (KDD 2018): https://arxiv.org/abs/1706.06978
899
+ - TabBERT (arXiv 2011.01843): https://arxiv.org/abs/2011.01843
900
+ """
901
+ with open(tmpdir / "README.md", "w") as f:
902
+ f.write(readme)
903
+
904
+ api.upload_folder(
905
+ folder_path=str(tmpdir),
906
+ repo_id=repo_id,
907
+ repo_type="model",
908
+ token=token.strip()
909
+ )
910
+
911
+ return f"✅ 模型已成功保存到 https://huggingface.co/{repo_id}", None
912
+
913
+ except Exception as e:
914
+ import traceback
915
+ return f"❌ 保存失败: {str(e)}\n\n{traceback.format_exc()}", None
916
+
917
+
918
+ def load_model_from_hub(repo_id, token, model_type):
919
+ """从 Hugging Face Hub 加载模型"""
920
+ if not HFHUB_AVAILABLE:
921
+ return "❌ huggingface_hub 未安装。无法从 Hub 加载。", None, None, None
922
+
923
+ if not token or not token.strip():
924
+ return "❌ 需要提供 Hugging Face Token。", None, None, None
925
+
926
+ try:
927
+ token = token.strip()
928
+
929
+ # 尝试下载元数据
930
+ metadata_path = hf_hub_download(repo_id=repo_id, filename="model_metadata.json", token=token, repo_type="model")
931
+ with open(metadata_path) as f:
932
+ metadata = json.load(f)
933
+
934
+ results = [f"✅ 成功加载模型: {repo_id}", f"模型类型: {metadata.get('model_type', 'Unknown')}",
935
+ f"备注: {metadata.get('notes', 'N/A')}", f"时间: {metadata.get('timestamp', 'N/A')}",
936
+ f"文件列表: {', '.join(metadata.get('files', []))}", "---"]
937
+
938
+ images = []
939
+
940
+ # 加载 sklearn 模型
941
+ if "sklearn_model.joblib" in metadata.get('files', []):
942
+ sklearn_path = hf_hub_download(repo_id=repo_id, filename="sklearn_model.joblib", token=token, repo_type="model")
943
+ artifacts = joblib.load(sklearn_path)
944
+ metrics = artifacts.get('metrics', {})
945
+ results.append(f"📦 sklearn 模型已加载")
946
+ results.append(f" GBDT AUC: {metrics.get('auc_gbdt', 'N/A')}")
947
+ results.append(f" RF AUC: {metrics.get('auc_rf', 'N/A')}")
948
+ results.append(f" 特征数: {len(artifacts.get('feature_names', []))}")
949
+
950
+ # 特征重要性图
951
+ if 'rf' in artifacts:
952
+ fig, ax = plt.subplots(figsize=(10,6))
953
+ fi = pd.DataFrame({'feature': artifacts['feature_names'], 'importance': artifacts['rf'].feature_importances_})
954
+ fi = fi.sort_values('importance', ascending=False).head(10)
955
+ ax.barh(fi['feature'][::-1], fi['importance'][::-1], color='steelblue')
956
+ ax.set_title('Loaded Model - Feature Importance', fontweight='bold')
957
+ plt.tight_layout()
958
+ img_path = "outputs/loaded_feature_importance.png"
959
+ plt.savefig(img_path, dpi=150); plt.close()
960
+ images.append(img_path)
961
+
962
+ # 加载 DIN
963
+ if "din_model.pt" in metadata.get('files', []):
964
+ din_path = hf_hub_download(repo_id=repo_id, filename="din_model.pt", token=token, repo_type="model")
965
+ din_ckpt = torch.load(din_path, map_location='cpu')
966
+ metrics = din_ckpt.get('metrics', {})
967
+ results.append(f"📦 DIN 模型已加载")
968
+ results.append(f" AUC: {metrics.get('auc', 'N/A')}")
969
+ results.append(f" Embedding dim: {din_ckpt.get('embedding_dim', 'N/A')}")
970
+ results.append(f" Event vocab: {len(din_ckpt.get('event_vocab', {}))}")
971
+ results.append(f" Product vocab: {len(din_ckpt.get('product_vocab', {}))}")
972
+
973
+ # 加载 TabBERT
974
+ if "tabbert_model.pt" in metadata.get('files', []):
975
+ tab_path = hf_hub_download(repo_id=repo_id, filename="tabbert_model.pt", token=token, repo_type="model")
976
+ tab_ckpt = torch.load(tab_path, map_location='cpu')
977
+ metrics = tab_ckpt.get('metrics', {})
978
+ results.append(f"📦 TabBERT 模型已加载")
979
+ results.append(f" AUC: {metrics.get('auc', 'N/A')}")
980
+ results.append(f" d_model: {tab_ckpt.get('d_model', 'N/A')}")
981
+ results.append(f" 特征: {', '.join(tab_ckpt.get('feature_cols', []))}")
982
+
983
+ return "\n".join(results), images[0] if images else None, images[1] if len(images) > 1 else None, images[2] if len(images) > 2 else None
984
+
985
+ except Exception as e:
986
+ import traceback
987
+ return f"❌ 加载失败: {str(e)}\n\n{traceback.format_exc()}", None, None, None
988
+
989
+
990
+ # =============================================================================
991
+ # 生存分析 — lifelines + DeepSurv
992
+ # =============================================================================
993
+
994
+ def generate_survival_data(n_samples=2000, seed=42):
995
+ """生成保险生存分析合成数据"""
996
+ random.seed(seed); np.random.seed(seed)
997
+
998
+ records = []
999
+ for i in range(n_samples):
1000
+ age = random.randint(18, 75)
1001
+ gender = random.choice([0, 1]) # 0=female, 1=male
1002
+ income = random.uniform(30000, 200000)
1003
+ policy_type = random.choice(["term_life", "whole_life", "health", "auto", "property"])
1004
+ premium_amount = random.uniform(1000, 50000)
1005
+ coverage_amount = premium_amount * random.uniform(10, 100)
1006
+ risk_score = random.uniform(0, 1)
1007
+
1008
+ # 根据特征计算基础风险
1009
+ base_hazard = (
1010
+ 0.001 * (age - 18) + # 年龄越大风险越高
1011
+ 0.05 * gender + # 性别差异
1012
+ 0.00001 * (200000 - income) + # 收入越低风险越高
1013
+ 0.1 * risk_score + # 风险评分
1014
+ random.gauss(0, 0.05) # 噪声
1015
+ )
1016
+
1017
+ # 保单类型调整
1018
+ policy_hazard = {"term_life": 0.02, "whole_life": 0.01, "health": 0.05,
1019
+ "auto": 0.03, "property": 0.01}[policy_type]
1020
+
1021
+ total_hazard = base_hazard + policy_hazard
1022
+ total_hazard = max(total_hazard, 0.001) # 最小风险
1023
+
1024
+ # 指数分布: time ~ Exp(lambda)
1025
+ time_to_event = random.expovariate(total_hazard)
1026
+
1027
+ # ��删失: 最大观察时间 3650天 (10年)
1028
+ max_observation = 3650
1029
+ event_observed = 1 if time_to_event < max_observation else 0
1030
+ duration = min(time_to_event, max_observation)
1031
+
1032
+ records.append({
1033
+ 'user_id': f"user_{i:04d}",
1034
+ 'age': age,
1035
+ 'gender': gender,
1036
+ 'income': income,
1037
+ 'policy_type': policy_type,
1038
+ 'premium_amount': premium_amount,
1039
+ 'coverage_amount': coverage_amount,
1040
+ 'risk_score': risk_score,
1041
+ 'duration': duration,
1042
+ 'event_observed': event_observed,
1043
+ })
1044
+
1045
+ return pd.DataFrame(records)
1046
+
1047
+
1048
+ def train_survival_analysis(n_samples, test_size, seed, use_deep_surv, epochs, lr):
1049
+ """训练生存分析模型"""
1050
+ df = generate_survival_data(n_samples=n_samples, seed=seed)
1051
+
1052
+ # 编码分类变量
1053
+ df['policy_type_enc'] = pd.Categorical(df['policy_type']).codes
1054
+
1055
+ # 特征列
1056
+ feature_cols = ['age', 'gender', 'income', 'policy_type_enc',
1057
+ 'premium_amount', 'coverage_amount', 'risk_score']
1058
+
1059
+ # 划分训练/测试
1060
+ train_df = df.sample(frac=1-test_size, random_state=seed)
1061
+ test_df = df.drop(train_df.index)
1062
+
1063
+ os.makedirs("outputs", exist_ok=True)
1064
+
1065
+ # ===== 1. lifelines Cox-PH =====
1066
+ results = ["=== 保险理赔/购买时序生存分析 ===", f"总样本: {len(df)} | 训练: {len(train_df)} | 测试: {len(test_df)}",
1067
+ f"事件发生率: {df['event_observed'].mean():.1%} ({df['event_observed'].sum()}/{len(df)})",
1068
+ f"平均观察时长: {df['duration'].mean():.0f} 天", "---"]
1069
+
1070
+ cph_figures = []
1071
+
1072
+ if LIFELINES_AVAILABLE:
1073
+ # Kaplan-Meier 曲线
1074
+ fig, ax = plt.subplots(figsize=(10,6))
1075
+ kmf = KaplanMeierFitter()
1076
+
1077
+ # 整体
1078
+ kmf.fit(df['duration'], df['event_observed'], label='Overall')
1079
+ kmf.plot_survival_function(ax=ax, ci_show=True, color='steelblue', linewidth=2)
1080
+
1081
+ # 按性别分组
1082
+ for gender, color in [(0, '#E74C3C'), (1, '#2ECC71')]:
1083
+ sub = df[df['gender'] == gender]
1084
+ kmf.fit(sub['duration'], sub['event_observed'], label=f'{"Female" if gender==0 else "Male"}')
1085
+ kmf.plot_survival_function(ax=ax, ci_show=False, color=color, linestyle='--', linewidth=2)
1086
+
1087
+ ax.set_title('Kaplan-Meier Survival Curve', fontsize=14, fontweight='bold')
1088
+ ax.set_xlabel('Duration (days)', fontsize=12)
1089
+ ax.set_ylabel('Survival Probability S(t)', fontsize=12)
1090
+ ax.legend(fontsize=11); ax.grid(True, alpha=0.3)
1091
+ plt.tight_layout()
1092
+ km_path = "outputs/survival_kaplan_meier.png"
1093
+ plt.savefig(km_path, dpi=150); plt.close()
1094
+ cph_figures.append(km_path)
1095
+
1096
+ # Cox-PH 模型
1097
+ cph = CoxPHFitter(penalizer=0.1)
1098
+ cph_train = train_df[feature_cols + ['duration', 'event_observed']].copy()
1099
+
1100
+ try:
1101
+ cph.fit(cph_train, duration_col='duration', event_col='event_observed')
1102
+
1103
+ # 系数可视化
1104
+ fig, ax = plt.subplots(figsize=(10,6))
1105
+ summary = cph.summary.copy()
1106
+ summary['coef'] = summary['coef'].astype(float)
1107
+ summary['exp(coef)'] = summary['exp(coef)'].astype(float)
1108
+ summary = summary.sort_values('coef')
1109
+
1110
+ colors = ['green' if c < 0 else 'red' for c in summary['coef']]
1111
+ ax.barh(summary.index, summary['coef'], color=colors, alpha=0.7, edgecolor='white')
1112
+ ax.axvline(x=0, color='black', linestyle='-', linewidth=0.5)
1113
+ ax.set_title('Cox-PH Coefficients (log Hazard Ratio)', fontsize=14, fontweight='bold')
1114
+ ax.set_xlabel('Coefficient')
1115
+ plt.tight_layout()
1116
+ coef_path = "outputs/survival_cox_coefficients.png"
1117
+ plt.savefig(coef_path, dpi=150); plt.close()
1118
+ cph_figures.append(coef_path)
1119
+
1120
+ # 预测生存函数 (测试集前5个样本)
1121
+ fig, ax = plt.subplots(figsize=(10,6))
1122
+ test_subset = test_df.head(5)
1123
+ predictions = cph.predict_survival_function(test_subset[feature_cols])
1124
+ for i, col in enumerate(predictions.columns):
1125
+ ax.plot(predictions.index, predictions[col], label=f'Sample {i+1}', linewidth=2, alpha=0.8)
1126
+ ax.set_title('Predicted Survival Functions (Test Samples)', fontsize=14, fontweight='bold')
1127
+ ax.set_xlabel('Duration (days)', fontsize=12)
1128
+ ax.set_ylabel('Survival Probability', fontsize=12)
1129
+ ax.legend(fontsize=10); ax.grid(True, alpha=0.3)
1130
+ plt.tight_layout()
1131
+ pred_path = "outputs/survival_predictions.png"
1132
+ plt.savefig(pred_path, dpi=150); plt.close()
1133
+ cph_figures.append(pred_path)
1134
+
1135
+ # Concordance Index
1136
+ from lifelines.utils import concordance_index
1137
+ pred_risk = cph.predict_partial_hazard(test_df[feature_cols])
1138
+ c_index = concordance_index(test_df['duration'], -pred_risk, test_df['event_observed'])
1139
+
1140
+ results.append("--- lifelines Cox-PH ---")
1141
+ results.append(f"Concordance Index: {c_index:.4f}")
1142
+ results.append(f"Log-likelihood: {cph.log_likelihood_:.2f}")
1143
+ results.append(f"AIC: {cph.AIC_partial_:.2f}")
1144
+ results.append("")
1145
+ results.append("--- Cox-PH 系数 (Top 影响因子) ---")
1146
+ for idx, row in cph.summary.head(7).iterrows():
1147
+ hr = float(row['exp(coef)'])
1148
+ results.append(f" {idx}: HR={hr:.3f} (p={row['p']:.4f})")
1149
+
1150
+ results.append("")
1151
+ results.append("HR > 1: 风险增加 | HR < 1: 风险降低")
1152
+
1153
+ except Exception as e:
1154
+ results.append(f"⚠️ Cox-PH 拟合失败: {str(e)}")
1155
+ else:
1156
+ results.append("⚠️ lifelines 未安装。统计生存分析功能禁用。")
1157
+
1158
+ # ===== 2. DeepSurv (PyTorch) =====
1159
+ deep_surv_result = ""
1160
+ if use_deep_surv and TORCH_AVAILABLE:
1161
+ results.append("--- DeepSurv (Neural Cox-PH) ---")
1162
+
1163
+ X_train = train_df[feature_cols].values.astype(np.float32)
1164
+ X_test = test_df[feature_cols].values.astype(np.float32)
1165
+
1166
+ scaler = StandardScaler()
1167
+ X_train_s = scaler.fit_transform(X_train)
1168
+ X_test_s = scaler.transform(X_test)
1169
+
1170
+ T_train = train_df['duration'].values.astype(np.float32)
1171
+ E_train = train_df['event_observed'].values.astype(np.float32)
1172
+ T_test = test_df['duration'].values.astype(np.float32)
1173
+ E_test = test_df['event_observed'].values.astype(np.float32)
1174
+
1175
+ device = torch.device('cpu')
1176
+
1177
+ class DeepSurv(nn.Module):
1178
+ def __init__(self, input_dim, hidden_dims=[128, 64, 32], dropout=0.3):
1179
+ super().__init__()
1180
+ layers = []
1181
+ prev = input_dim
1182
+ for h in hidden_dims:
1183
+ layers.extend([nn.Linear(prev, h), nn.ReLU(), nn.Dropout(dropout)])
1184
+ prev = h
1185
+ layers.append(nn.Linear(prev, 1))
1186
+ self.net = nn.Sequential(*layers)
1187
+
1188
+ def forward(self, x):
1189
+ return self.net(x).squeeze(-1)
1190
+
1191
+ model = DeepSurv(input_dim=len(feature_cols), hidden_dims=[128, 64, 32]).to(device)
1192
+ optimizer = torch.optim.Adam(model.parameters(), lr=lr)
1193
+
1194
+ # Cox partial likelihood loss
1195
+ def cox_ph_loss(pred, time, event):
1196
+ """Negative Cox partial likelihood"""
1197
+ # Sort by time descending
1198
+ idx = torch.argsort(time, descending=True)
1199
+ pred_sorted = pred[idx]
1200
+ event_sorted = event[idx]
1201
+
1202
+ # logcumsumexp for numerical stability
1203
+ log_cumsum_h = torch.logcumsumexp(pred_sorted, dim=0)
1204
+
1205
+ # Only event samples contribute
1206
+ loss = -torch.sum(event_sorted * (pred_sorted - log_cumsum_h)) / event_sorted.sum().clamp(min=1)
1207
+ return loss
1208
+
1209
+ X_train_t = torch.tensor(X_train_s, dtype=torch.float32).to(device)
1210
+ T_train_t = torch.tensor(T_train, dtype=torch.float32).to(device)
1211
+ E_train_t = torch.tensor(E_train, dtype=torch.float32).to(device)
1212
+
1213
+ # Training
1214
+ model.train()
1215
+ for epoch in range(epochs):
1216
+ optimizer.zero_grad()
1217
+ pred = model(X_train_t)
1218
+ loss = cox_ph_loss(pred, T_train_t, E_train_t)
1219
+ loss.backward()
1220
+ optimizer.step()
1221
+
1222
+ if (epoch+1) % max(1, epochs//5) == 0 or epoch == 0:
1223
+ print(f"DeepSurv Epoch {epoch+1}/{epochs}, Loss: {loss.item():.4f}")
1224
+
1225
+ # Evaluation
1226
+ model.eval()
1227
+ with torch.no_grad():
1228
+ X_test_t = torch.tensor(X_test_s, dtype=torch.float32).to(device)
1229
+ pred_test = model(X_test_t).cpu().numpy()
1230
+
1231
+ # Concordance Index
1232
+ from lifelines.utils import concordance_index
1233
+ deep_c_index = concordance_index(T_test, -pred_test, E_test)
1234
+
1235
+ results.append(f"Concordance Index: {deep_c_index:.4f}")
1236
+ results.append(f"Training epochs: {epochs} | LR: {lr}")
1237
+ results.append("")
1238
+ results.append("--- DeepSurv 洞察 ---")
1239
+ results.append("1. 神经网络学习非线性特征交互, 捕捉复杂风险模式")
1240
+ results.append("2. 相比线性Cox-PH, 能建模年龄×收入×风险评分的组合效应")
1241
+ results.append("3. 输出log hazard ratio: 正值=高风险, 负值=低风险")
1242
+
1243
+ # 保存模型
1244
+ torch.save({
1245
+ 'model_state_dict': model.state_dict(),
1246
+ 'feature_cols': feature_cols,
1247
+ 'hidden_dims': [128, 64, 32],
1248
+ 'scaler_mean': scaler.mean_,
1249
+ 'scaler_scale': scaler.scale_,
1250
+ 'metrics': {'concordance_index': deep_c_index}
1251
+ }, 'outputs/deepsurv_model.pt')
1252
+
1253
+ # 风险分层可视化
1254
+ fig, ax = plt.subplots(figsize=(10,6))
1255
+ risk_scores = pred_test
1256
+ risk_percentiles = np.percentile(risk_scores, [33, 66])
1257
+
1258
+ low_risk = test_df[risk_scores < risk_percentiles[0]]
1259
+ mid_risk = test_df[(risk_scores >= risk_percentiles[0]) & (risk_scores < risk_percentiles[1])]
1260
+ high_risk = test_df[risk_scores >= risk_percentiles[1]]
1261
+
1262
+ colors = ['#2ECC71', '#F39C12', '#E74C3C']
1263
+ labels = ['Low Risk (bottom 33%)', 'Medium Risk (33-66%)', 'High Risk (top 33%)']
1264
+
1265
+ for subset, color, label in [(low_risk, colors[0], labels[0]),
1266
+ (mid_risk, colors[1], labels[1]),
1267
+ (high_risk, colors[2], labels[2])]:
1268
+ if len(subset) > 0:
1269
+ kmf = KaplanMeierFitter()
1270
+ kmf.fit(subset['duration'], subset['event_observed'], label=label)
1271
+ kmf.plot_survival_function(ax=ax, ci_show=False, color=color, linewidth=2.5)
1272
+
1273
+ ax.set_title('Survival by DeepSurv Risk Strata', fontsize=14, fontweight='bold')
1274
+ ax.set_xlabel('Duration (days)', fontsize=12)
1275
+ ax.set_ylabel('Survival Probability', fontsize=12)
1276
+ ax.legend(fontsize=11); ax.grid(True, alpha=0.3)
1277
+ plt.tight_layout()
1278
+ risk_path = "outputs/survival_risk_strata.png"
1279
+ plt.savefig(risk_path, dpi=150); plt.close()
1280
+ cph_figures.append(risk_path)
1281
+
1282
+ deep_surv_result = f"DeepSurv C-index: {deep_c_index:.4f}"
1283
+
1284
+ elif use_deep_surv and not TORCH_AVAILABLE:
1285
+ results.append("⚠️ PyTorch 未安装。DeepSurv 禁用。")
1286
+
1287
+ # 保存 lifelines 结果
1288
+ results.append("---")
1289
+ results.append(f"所有图表已保存到 outputs/ 目录")
1290
+ results.append(f"模型已保存至: outputs/deepsurv_model.pt (如使用DeepSurv)")
1291
+
1292
+ result_text = "\n".join(results)
1293
+
1294
+ return result_text, cph_figures[0] if len(cph_figures) > 0 else None, \
1295
+ cph_figures[1] if len(cph_figures) > 1 else None, \
1296
+ cph_figures[2] if len(cph_figures) > 2 else None, \
1297
+ cph_figures[3] if len(cph_figures) > 3 else None, \
1298
+ df.head(20)
1299
+
1300
+
1301
+ # =============================================================================
1302
+ # Gradio 回调
1303
  # =============================================================================
1304
 
1305
  def demo_train(n_users, n_events, test_size, random_state, use_cv):
 
1306
  data = generate_synthetic_data(n_users=n_users, n_events_per_user=n_events, seed=random_state)
1307
  engineer = InsuranceFeatureEngineer()
1308
  features_list, labels = [], []
 
1313
 
1314
 
1315
  def csv_train(csv_file, label_col, test_size, random_state, use_cv):
 
1316
  if csv_file is None:
1317
  return "请先上传CSV文件", None, None, None, None, None
1318
  try:
 
1320
  df = pd.read_csv(csv_file)
1321
  else:
1322
  df = pd.read_csv(csv_file.name if hasattr(csv_file, 'name') else io.BytesIO(csv_file))
 
1323
  label_col = label_col.strip() if label_col else None
1324
  if label_col and label_col not in df.columns:
1325
  return f"标签列 '{label_col}' 不存在。可用列: {list(df.columns)}", None, None, None, None, None
 
1326
  profiles = parse_csv_to_profiles(df)
1327
  engineer = InsuranceFeatureEngineer()
1328
  features_list, labels = [], []
 
1329
  for profile in profiles:
1330
  f = engineer.extract_user_features(profile)
1331
  if f:
 
1336
  else:
1337
  is_high_risk = (f["has_purchased"] == 0 and f["has_renewed"] == 0 and f["total_events"] < 20)
1338
  labels.append(int(is_high_risk))
 
1339
  if len(features_list) < 50:
1340
  return f"有效样本数 {len(features_list)} 太少,需要至少50个", None, None, None, None, None
 
1341
  return train_sklearn(features_list, labels, test_size, random_state, use_cv)
1342
  except Exception as e:
1343
  import traceback
 
1370
 
1371
 
1372
  # =============================================================================
1373
+ # Gradio 界面 (7 Tabs)
1374
  # =============================================================================
1375
 
1376
+ with gr.Blocks(title="🏥 保险APP 用户行为分析模型训练平台 v3.0", theme=gr.themes.Soft()) as demo:
1377
+ gr.Markdown("""# 🏥 保险APP 用户行为分析模型训练平台 v3.0
1378
 
1379
+ 基于最新研究论文构建的**级保险用户行为分析平台**
1380
 
1381
+ **大功能模块:** 🎲演示 | 📁CSV上传 | 🎯产品推荐(DIN) | 🔍异常检测(TabBERT) | 💾模型管理 | ⏱️生存分析 | ❓帮助
 
 
 
 
 
1382
 
1383
+ **参考论文:** [DIN](https://arxiv.org/abs/1706.06978) | [Churn Transformer](https://arxiv.org/abs/2309.14390) | [TabBERT](https://arxiv.org/abs/2011.01843) | [DeepSurv](https://arxiv.org/abs/1606.00931) | [RNN Survival](https://arxiv.org/abs/2304.00575)""")
1384
 
1385
  with gr.Tabs():
1386
  # ===== Tab 1: 演示模式 =====
1387
+ with gr.Tab("🎲 演示"):
1388
  with gr.Row():
1389
  with gr.Column(scale=1):
1390
  gr.Markdown("### 参数设置")
 
1406
  demo_table = gr.Dataframe(label="特征数据样本")
1407
 
1408
  # ===== Tab 2: CSV上传 =====
1409
+ with gr.Tab("📁 CSV上传"):
1410
  with gr.Row():
1411
  with gr.Column(scale=1):
1412
  gr.Markdown("""### 📤 上传数据
 
1413
  **必需列:** `user_id`, `session_id`, `timestamp`, `event_type`, `page_id`
1414
+ **可选:** `product_id`, `amount`, `label`""")
 
 
 
 
 
 
 
 
1415
  csv_file = gr.File(label="上传CSV文件", file_types=[".csv"])
1416
+ label_col_input = gr.Textbox(label="标签列名 (可选)", placeholder="如: churn")
1417
  with gr.Row():
1418
  csv_test_size = gr.Slider(0.1, 0.4, value=0.2, step=0.05, label="测试集比例")
1419
  csv_random_seed = gr.Number(value=42, label="随机种子", precision=0)
 
1435
  with gr.Row():
1436
  csv_table = gr.Dataframe(label="特征数据样本")
1437
 
1438
+ # ===== Tab 3: DIN 产品推荐 =====
1439
  with gr.Tab("🎯 产品推荐 (DIN)"):
1440
  gr.Markdown("""### Deep Interest Network - 保险产品推荐
1441
+ 基于用户历史行为序列, 通过注意力机制动态计算对候选保险产品的兴趣度。""")
 
 
 
 
 
 
1442
  with gr.Row():
1443
  with gr.Column(scale=1):
 
1444
  din_users = gr.Slider(500, 5000, value=2000, step=100, label="用户数量")
1445
  din_emb = gr.Slider(32, 256, value=64, step=32, label="Embedding维度")
1446
  din_epochs = gr.Slider(5, 50, value=20, step=5, label="训练轮数")
 
1448
  din_lr = gr.Slider(0.0001, 0.01, value=0.001, step=0.0001, label="学习率")
1449
  din_seed = gr.Number(value=42, label="随机种子", precision=0)
1450
  din_btn = gr.Button("🚀 训练DIN模型", variant="primary", size="lg")
 
1451
  if not TORCH_AVAILABLE:
1452
+ gr.Markdown("⚠️ **PyTorch 未安装**。请在 requirements.txt 中添加 `torch>=2.0.0` 并重启。")
 
1453
  with gr.Column(scale=2):
1454
  din_result = gr.Textbox(label="训练结果", lines=25, show_copy_button=True)
 
1455
  with gr.Row():
1456
  din_img1 = gr.Image(label="产品推荐效果")
1457
  din_img2 = gr.Image(label="注意力权重示例")
 
1459
  din_img3 = gr.Image(label="ROC曲线")
1460
  din_img4 = gr.Image(label="PR曲线")
1461
 
1462
+ # ===== Tab 4: TabBERT 异常检测 =====
1463
  with gr.Tab("🔍 异常检测 (TabBERT)"):
1464
  gr.Markdown("""### TabularBERT - 理赔欺诈/异常检测
1465
+ 层次化Transformer架构, 学习理赔记录的多字段关联和时序模式。""")
 
 
 
 
 
 
 
1466
  with gr.Row():
1467
  with gr.Column(scale=1):
 
1468
  tab_normal = gr.Slider(500, 2000, value=800, step=100, label="正常样本数")
1469
  tab_anomaly = gr.Slider(100, 1000, value=200, step=50, label="异常样本数")
1470
  tab_dmodel = gr.Slider(64, 256, value=128, step=64, label="模型维度 d_model")
 
1473
  tab_lr = gr.Slider(0.0001, 0.01, value=0.001, step=0.0001, label="学习率")
1474
  tab_seed = gr.Number(value=42, label="随机种子", precision=0)
1475
  tab_btn = gr.Button("🚀 训练TabBERT模型", variant="primary", size="lg")
 
1476
  if not TORCH_AVAILABLE:
1477
+ gr.Markdown("⚠️ **PyTorch 未安装**。请在 requirements.txt 中添加 `torch>=2.0.0` 并重启。")
 
1478
  with gr.Column(scale=2):
1479
  tab_result = gr.Textbox(label="训练结果", lines=25, show_copy_button=True)
 
1480
  with gr.Row():
1481
  tab_img1 = gr.Image(label="特征重要性")
1482
  tab_img2 = gr.Image(label="异常分数分布")
 
1484
  tab_img3 = gr.Image(label="ROC曲线")
1485
  tab_img4 = gr.Image(label="混淆矩阵与阈值分析")
1486
 
1487
+ # ===== Tab 5: 模型管理 =====
1488
+ with gr.Tab("💾 模型管理"):
1489
+ gr.Markdown("""### Hugging Face Hub 模型管理
1490
+ 保存训练好的模型到 Hub, 或从 Hub 加载已有模型。
1491
+
1492
+ **获取 Token:** https://huggingface.co/settings/tokens""")
1493
+ with gr.Row():
1494
+ with gr.Column(scale=1):
1495
+ gr.Markdown("#### 保存模型到 Hub")
1496
+ save_repo_id = gr.Textbox(label="Hub Repo ID", placeholder="如: yourname/insurance-model-v1")
1497
+ save_token = gr.Textbox(label="HF Token", placeholder="hf_xxxxx", type="password")
1498
+ save_type = gr.Dropdown(["churn_prediction", "product_recommendation", "anomaly_detection", "all"],
1499
+ value="all", label="模型类型")
1500
+ save_notes = gr.Textbox(label="备注", placeholder="模型描述...")
1501
+ save_btn = gr.Button("📤 保存到 Hub", variant="primary")
1502
+ save_result = gr.Textbox(label="保存结果", lines=10)
1503
+
1504
+ with gr.Column(scale=1):
1505
+ gr.Markdown("#### 从 Hub 加载模型")
1506
+ load_repo_id = gr.Textbox(label="Hub Repo ID", placeholder="如: yourname/insurance-model-v1")
1507
+ load_token = gr.Textbox(label="HF Token", placeholder="hf_xxxxx", type="password")
1508
+ load_type = gr.Dropdown(["churn_prediction", "product_recommendation", "anomaly_detection", "all"],
1509
+ value="all", label="模型类型")
1510
+ load_btn = gr.Button("📥 从 Hub 加载", variant="primary")
1511
+ load_result = gr.Textbox(label="加载结果", lines=15, show_copy_button=True)
1512
+ with gr.Row():
1513
+ load_img1 = gr.Image(label="加载模型可视化 1")
1514
+ load_img2 = gr.Image(label="加载模型可视化 2")
1515
+ load_img3 = gr.Image(label="加载模型可视化 3")
1516
+
1517
+ # ===== Tab 6: 生存分析 =====
1518
+ with gr.Tab("⏱️ 生存分析"):
1519
+ gr.Markdown("""### 保险理赔/购买时序生存分析
1520
+ 预测从投保到理赔/购买/流失的时间, 处理右删失数据 (部分用户尚未发生事件)。
1521
+
1522
+ **统计方法:** lifelines Cox-PH + Kaplan-Meier | **深度方法:** DeepSurv (Neural Cox-PH)""")
1523
+ with gr.Row():
1524
+ with gr.Column(scale=1):
1525
+ surv_samples = gr.Slider(500, 5000, value=2000, step=100, label="样本数量")
1526
+ surv_test_size = gr.Slider(0.1, 0.4, value=0.2, step=0.05, label="测试集比例")
1527
+ surv_seed = gr.Number(value=42, label="随机种子", precision=0)
1528
+ use_deep_surv = gr.Checkbox(value=True, label="启用 DeepSurv (PyTorch)")
1529
+ deep_epochs = gr.Slider(10, 200, value=50, step=10, label="DeepSurv Epochs")
1530
+ deep_lr = gr.Slider(0.0001, 0.01, value=0.001, step=0.0001, label="DeepSurv LR")
1531
+ surv_btn = gr.Button("🚀 训练生存分析模型", variant="primary", size="lg")
1532
+
1533
+ if not LIFELINES_AVAILABLE:
1534
+ gr.Markdown("⚠️ **lifelines 未安装**。统计生存分析禁用。")
1535
+ if not TORCH_AVAILABLE:
1536
+ gr.Markdown("⚠️ **PyTorch 未安装**。DeepSurv 禁用。")
1537
+
1538
+ with gr.Column(scale=2):
1539
+ surv_result = gr.Textbox(label="训练结果", lines=30, show_copy_button=True)
1540
+ with gr.Row():
1541
+ surv_img1 = gr.Image(label="Kaplan-Meier 生存曲线")
1542
+ surv_img2 = gr.Image(label="Cox-PH 系数")
1543
+ with gr.Row():
1544
+ surv_img3 = gr.Image(label="预测生存函数")
1545
+ surv_img4 = gr.Image(label="DeepSurv 风险分层")
1546
+ with gr.Row():
1547
+ surv_table = gr.Dataframe(label="数据样本")
1548
+
1549
+ # ===== Tab 7: 帮助文档 =====
1550
+ with gr.Tab("❓ 帮助"):
1551
  gr.Markdown("""## 📚 完整使用指南
1552
 
1553
  ### 1. 演示模式
1554
+ 合成保险APP行为数据, 自动标注流失/留存标签, 训练 GBDT + RF。
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1555
 
1556
+ ### 2. CSV上传
1557
+ **必需列:** `user_id`, `session_id`, `timestamp`, `event_type`, `page_id`
1558
+ **可选:** `product_id`, `amount`, `label`
1559
+
1560
+ ### 3. DIN 产品推荐
1561
+ - 输入: 用户历史行为序列 + 候选保险产品
1562
+ - 核心: LocalActivationUnit 注意力机制
1563
+ - 输出: 购买概率 + 注意力权重可视化
1564
+
1565
+ ### 4. TabBERT 异常检测
1566
+ - 输入: 理赔记录多维特征
1567
+ - 损失: Focal Loss (解决1:4不平衡)
1568
+ - 输出: 异常分数 + 阈值分析
1569
+
1570
+ ### 5. 模型管理
1571
+ - 保存: 训练后自动保存到 `outputs/`, 可一键上传至 Hugging Face Hub
1572
+ - 加载: 从 Hub 下载已有模型, 查看指标和特征重要性
1573
+
1574
+ ### 6. 生存分析
1575
+ - **lifelines Cox-PH**: 统计基线, 可解释系数, Kaplan-Meier 曲线
1576
+ - **DeepSurv**: 神经网络Cox-PH, 学习非线性交互, 风险分层
1577
+ - **右删失处理**: 自动处理尚未发生事件的用户
1578
+
1579
+ ### 事件类型 (30种)
1580
+ 浏览 | 交互 | 转化 | 理赔 | 续保 | 其他
1581
+ ---|---|---|---|---|---
1582
+ page_view | quote_request | payment_success | claim_init | renewal_click | login
1583
+ product_view | form_submit | policy_issued | claim_doc_upload | renewal_complete | logout
1584
+ premium_calculator | document_upload | policy_select | claim_review | policy_cancel | app_uninstall
1585
+ article_read | chat_init | payment_init | claim_approved | renewal_reminder |
1586
+ faq_view | call_init | | claim_rejected | |
1587
+ product_compare | video_consult | | | |
1588
+
1589
+ ### 参考文献
1590
  | 论文 | 应用 | arXiv |
1591
  |------|------|-------|
1592
  | Deep Interest Network | 产品推荐 | [1706.06978](https://arxiv.org/abs/1706.06978) |
1593
  | SDIM | 长期行为建模 | [2205.10249](https://arxiv.org/abs/2205.10249) |
1594
  | TabBERT/TabFormer | 表格时序异常检测 | [2011.01843](https://arxiv.org/abs/2011.01843) |
1595
  | Transformer Churn | 非合约流失预测 | [2309.14390](https://arxiv.org/abs/2309.14390) |
1596
+ | DeepSurv | 生存分析 | [1606.00931](https://arxiv.org/abs/1606.00931) |
1597
+ | RNN Survival | 购买时序预测 | [2304.00575](https://arxiv.org/abs/2304.00575) |
1598
  | Focal Loss | 不平衡分类 | [1708.02002](https://arxiv.org/abs/1708.02002) |
1599
  """)
1600
 
1601
  gr.Markdown("""---
1602
  <div align="center">
1603
+ <b>保险APP 用户行为分析模型训练平台 v3.0</b> |
 
 
 
 
1604
  作者: <a href="https://huggingface.co/Stephanwu">Stephanwu</a>
1605
  </div>""")
1606
 
1607
  # ===== 事件绑定 =====
1608
+ train_btn.click(fn=demo_train, inputs=[n_users_slider, n_events_slider, test_size_slider, random_seed, use_cv_check],
1609
+ outputs=[demo_result, demo_img1, demo_img2, demo_img3, demo_img4, demo_table])
1610
+ info_btn.click(fn=show_csv_info, inputs=[csv_file], outputs=[csv_info, csv_preview])
1611
+ csv_train_btn.click(fn=csv_train, inputs=[csv_file, label_col_input, csv_test_size, csv_random_seed, csv_use_cv],
1612
+ outputs=[csv_result, csv_img1, csv_img2, csv_img3, csv_img4, csv_table])
1613
+ din_btn.click(fn=train_din_recommendation, inputs=[din_users, din_emb, din_epochs, din_batch, din_lr, din_seed],
1614
+ outputs=[din_result, din_img1, din_img2, din_img3, din_img4])
1615
+ tab_btn.click(fn=train_tabbert_anomaly, inputs=[tab_normal, tab_anomaly, tab_dmodel, tab_epochs, tab_batch, tab_lr, tab_seed],
1616
+ outputs=[tab_result, tab_img1, tab_img2, tab_img3, tab_img4])
1617
+ save_btn.click(fn=save_model_to_hub, inputs=[save_repo_id, save_token, save_type, save_notes],
1618
+ outputs=[save_result])
1619
+ load_btn.click(fn=load_model_from_hub, inputs=[load_repo_id, load_token, load_type],
1620
+ outputs=[load_result, load_img1, load_img2, load_img3])
1621
+ surv_btn.click(fn=train_survival_analysis, inputs=[surv_samples, surv_test_size, surv_seed, use_deep_surv, deep_epochs, deep_lr],
1622
+ outputs=[surv_result, surv_img1, surv_img2, surv_img3, surv_img4, surv_table])
 
 
 
 
 
 
 
 
 
 
1623
 
1624
  if __name__ == "__main__":
1625
  demo.launch()