| """ |
| 阶段4+5:实体追踪 + 仓位方向推断。 |
| |
| 跨日追踪行为指纹实体,推断主力仓位变化方向。 |
| """ |
|
|
| from __future__ import annotations |
|
|
| from collections import defaultdict |
| from typing import Dict, List, Optional, Tuple |
|
|
| import json |
| import os |
| import pickle |
| from typing import Dict, List, Tuple |
|
|
| import numpy as np |
| import pandas as pd |
|
|
|
|
| class EntityTracker: |
| """跨日实体追踪器。维护从每日簇到持久实体的映射。 |
| |
| 持久化说明: |
| - save_state() 保存完整状态为 pickle(可恢复继续追踪) |
| - 数据表通过 get_*() 方法导出为 parquet(浏览/分析用) |
| """ |
|
|
| def __init__(self, inactive_threshold: int = 5): |
| self.inactive_threshold = inactive_threshold |
|
|
| |
| self.entities: Dict[int, dict] = {} |
|
|
| |
| self.cluster_registry: Dict[Tuple[int, int], int] = {} |
|
|
| |
| self.daily_active: Dict[int, set] = defaultdict(set) |
|
|
| |
| self.daily_matches: Dict[int, list] = defaultdict(list) |
|
|
| self._next_eid = 0 |
|
|
| |
| self._processed_dates: List[int] = [] |
|
|
| |
| self.meta: dict = { |
| "version": 1, |
| "stock": "600809.SH", |
| "inactive_threshold": inactive_threshold, |
| } |
|
|
| |
|
|
| def process_day( |
| self, |
| date: int, |
| clusters: Dict[int, dict], |
| matches: List[Tuple[int, int, int, float]], |
| |
| ) -> Dict[int, int]: |
| """ |
| 处理一个交易日。 |
| |
| Args: |
| date: 当前日期 YYYYMMDD |
| clusters: {cid: centroid_info_dict} 当日聚类结果 |
| matches: 跨日匹配对列表 |
| |
| Returns: |
| {cid: entity_id} 当日簇到实体的映射 |
| """ |
| cid_to_entity: Dict[int, int] = {} |
| matched_cids: set = set() |
|
|
| |
| for prev_date, prev_cid, curr_cid, cost in matches: |
| eid = self.cluster_registry.get((prev_date, prev_cid)) |
| if eid is None: |
| |
| eid = self._create_entity(date, clusters[curr_cid]) |
| else: |
| self._update_entity(eid, date, clusters[curr_cid], cost) |
| self.cluster_registry[(date, curr_cid)] = eid |
| cid_to_entity[curr_cid] = eid |
| matched_cids.add(curr_cid) |
|
|
| |
| for cid, info in clusters.items(): |
| if cid not in matched_cids: |
| eid = self._create_entity(date, info) |
| self.cluster_registry[(date, cid)] = eid |
| cid_to_entity[cid] = eid |
|
|
| |
| active = set(cid_to_entity.values()) |
| self.daily_active[date] = active |
|
|
| self._processed_dates.append(date) |
| return cid_to_entity |
|
|
| def compute_position_signal( |
| self, |
| date: int, |
| daily_volume: float | None = None, |
| ) -> dict: |
| """ |
| 计算当日仓位方向推断。 |
| |
| Returns: |
| {"score": float, "bid_entities": int, "ask_entities": int, |
| "accumulation_entities": list[int], "distribution_entities": list[int]} |
| """ |
| active = self.daily_active.get(date, set()) |
| if not active: |
| return {"score": 0.0, "bid_entities": 0, "ask_entities": 0, |
| "accumulation_entities": [], "distribution_entities": []} |
|
|
| bid_score = 0.0 |
| ask_score = 0.0 |
| bid_entities = [] |
| ask_entities = [] |
|
|
| for eid in active: |
| e = self.entities[eid] |
| |
| side = self._dominant_side(eid) |
| |
| growth = self._amount_growth(eid) |
| |
| weight = np.log1p(e.get("total_amount_latest", 0)) |
|
|
| if side == "bid": |
| bid_score += growth * weight |
| if growth > 0: |
| bid_entities.append(eid) |
| else: |
| ask_score += growth * weight |
| if growth > 0: |
| ask_entities.append(eid) |
|
|
| raw = bid_score - ask_score |
| |
| score = float(raw) |
|
|
| return { |
| "score": score, |
| "bid_entities": len([e for e in active if self._dominant_side(e) == "bid"]), |
| "ask_entities": len([e for e in active if self._dominant_side(e) == "ask"]), |
| "accumulation_entities": bid_entities, |
| "distribution_entities": ask_entities, |
| } |
|
|
| def get_active_entities(self, date: int) -> List[int]: |
| """获取某日活跃实体 ID 列表。""" |
| return sorted(self.daily_active.get(date, set())) |
|
|
| def get_entity_timeline(self) -> pd.DataFrame: |
| """导出实体生命周期表。""" |
| rows = [] |
| for eid, e in self.entities.items(): |
| rows.append({ |
| "entity_id": eid, |
| "first_seen": e["first_seen"], |
| "last_seen": e["last_seen"], |
| "active_days": e["active_days"], |
| "total_amount_latest": e.get("total_amount_latest", 0), |
| "total_amount_first": e.get("total_amount_first", 0), |
| "amount_growth": self._amount_growth(eid), |
| "dominant_side": self._dominant_side(eid), |
| "bid_ratio": e.get("bid_ratio", 0), |
| "avg_cost": e.get("avg_match_cost", 0), |
| "status": "active" if self._is_active(e["last_seen"]) else "inactive", |
| }) |
| return pd.DataFrame(rows).sort_values("entity_id") |
|
|
| def get_daily_signals(self) -> pd.DataFrame: |
| """导出每日仓位信号表。""" |
| rows = [] |
| for date in sorted(self._processed_dates): |
| sig = self.compute_position_signal(date) |
| sig["date"] = date |
| sig["n_active_entities"] = len(self.daily_active.get(date, set())) |
| sig["n_total_entities"] = len(self.entities) |
| rows.append(sig) |
| df = pd.DataFrame(rows) |
| |
| if len(df) > 20: |
| df["score_z"] = ( |
| (df["score"] - df["score"].rolling(20, min_periods=5).mean()) |
| / df["score"].rolling(20, min_periods=5).std().replace(0, 1) |
| ) |
| else: |
| df["score_z"] = 0.0 |
| return df |
|
|
| |
|
|
| def _create_entity(self, date: int, cluster_info: dict) -> int: |
| eid = self._next_eid |
| self._next_eid += 1 |
|
|
| amount = cluster_info.get("total_amount", 0) |
| self.entities[eid] = { |
| "id": eid, |
| "first_seen": date, |
| "last_seen": date, |
| "active_days": 1, |
| "total_amount_latest": amount, |
| "total_amount_first": amount, |
| "amounts": [(date, amount)], |
| "centroids": [(date, cluster_info.get("centroid", np.zeros(7)))], |
| "dominant_sides": [cluster_info.get("dominant_side", "unknown")], |
| "bid_ratio": cluster_info.get("bid_ratio", 0.5), |
| "match_costs": [], |
| "cluster_count": 1, |
| } |
| return eid |
|
|
| def _update_entity(self, eid: int, date: int, cluster_info: dict, cost: float): |
| e = self.entities[eid] |
| e["last_seen"] = date |
| e["active_days"] += 1 |
| amount = cluster_info.get("total_amount", 0) |
| e["total_amount_latest"] = amount |
| e["amounts"].append((date, amount)) |
| e["centroids"].append((date, cluster_info.get("centroid", np.zeros(7)))) |
| e["dominant_sides"].append(cluster_info.get("dominant_side", "unknown")) |
| e["bid_ratio"] = ( |
| 0.7 * e["bid_ratio"] + 0.3 * cluster_info.get("bid_ratio", 0.5) |
| ) |
| e["match_costs"].append(cost) |
| e["cluster_count"] += 1 |
| e["avg_match_cost"] = float(np.mean(e["match_costs"])) if e["match_costs"] else 0.0 |
|
|
| def _dominant_side(self, eid: int) -> str: |
| e = self.entities[eid] |
| sides = e.get("dominant_sides", []) |
| if not sides: |
| return "unknown" |
| bid_count = sum(1 for s in sides if s == "bid") |
| ask_count = sum(1 for s in sides if s == "ask") |
| return "bid" if bid_count >= ask_count else "ask" |
|
|
| def _amount_growth(self, eid: int) -> float: |
| """金额增长趋势:最近 vs 最早(log 空间比值)。""" |
| e = self.entities[eid] |
| first = e.get("total_amount_first", 0) |
| latest = e.get("total_amount_latest", 0) |
| if first <= 0: |
| return 0.0 |
| return float(np.log1p(latest) - np.log1p(first)) |
|
|
| def _is_active(self, last_seen: int) -> bool: |
| """判断实体是否仍活跃(最近 N 天内出现过)。""" |
| if not self._processed_dates: |
| return True |
| latest = max(self._processed_dates) |
| try: |
| idx = self._processed_dates.index(last_seen) |
| return (len(self._processed_dates) - 1 - idx) <= self.inactive_threshold |
| except ValueError: |
| return False |
|
|
| |
|
|
| def save_state(self, path: str): |
| """保存完整追踪器状态(pickle),可恢复继续追踪。""" |
| os.makedirs(os.path.dirname(path) or ".", exist_ok=True) |
| state = { |
| "entities": self.entities, |
| "cluster_registry": dict(self.cluster_registry), |
| "daily_active": {int(k): list(v) for k, v in self.daily_active.items()}, |
| "daily_matches": {int(k): v for k, v in self.daily_matches.items()}, |
| "_next_eid": self._next_eid, |
| "_processed_dates": self._processed_dates, |
| "inactive_threshold": self.inactive_threshold, |
| "meta": self.meta, |
| } |
| with open(path, "wb") as f: |
| pickle.dump(state, f, protocol=5) |
| print(f"Tracker state saved to {path} ({os.path.getsize(path)/1024/1024:.1f} MB)") |
|
|
| @classmethod |
| def load_state(cls, path: str) -> "EntityTracker": |
| """加载追踪器状态,恢复继续追踪。""" |
| with open(path, "rb") as f: |
| state = pickle.load(f) |
|
|
| tracker = cls(inactive_threshold=state.get("inactive_threshold", 5)) |
| tracker.entities = state["entities"] |
| tracker.cluster_registry = state["cluster_registry"] |
| tracker.daily_active = defaultdict( |
| set, {int(k): set(v) for k, v in state["daily_active"].items()} |
| ) |
| tracker.daily_matches = defaultdict( |
| list, {int(k): v for k, v in state["daily_matches"].items()} |
| ) |
| tracker._next_eid = state["_next_eid"] |
| tracker._processed_dates = state["_processed_dates"] |
| tracker.meta = state.get("meta", {}) |
| return tracker |
|
|
| def get_cluster_registry_table(self) -> pd.DataFrame: |
| """导出簇注册表为 DataFrame。""" |
| rows = [] |
| for (date, cid), eid in sorted(self.cluster_registry.items()): |
| e = self.entities.get(eid, {}) |
| rows.append({ |
| "date": date, |
| "cluster_id": cid, |
| "entity_id": eid, |
| "entity_first_seen": e.get("first_seen"), |
| "entity_dominant_side": self._dominant_side(eid) if eid in self.entities else "unknown", |
| }) |
| return pd.DataFrame(rows) |
|
|