| """ |
| Per-field tokenizers for domain-specific data. |
| |
| Each tokenizer converts a single field value into one or more token strings. |
| These are the building blocks assembled by DomainTokenizerBuilder. |
| |
| References: |
| - Nubank nuFormer: sign(2) + amount_bucket(21) + calendar(74) tokenization |
| - TP-BERTa (arXiv:2403.01841): Relative Magnitude Tokenization for numbers |
| - Banking TF (arXiv:2410.08243): date + amount + wording composite tokens |
| - Temporal Tokenization (arXiv:2512.13618): log-based bins for skewed financial data |
| """ |
|
|
| import json |
| import math |
| from datetime import datetime |
| from typing import Any, Dict, List, Optional, Tuple, Union |
|
|
| import numpy as np |
|
|
|
|
| class BaseFieldTokenizer: |
| """Base class for all field tokenizers.""" |
|
|
| def __init__(self, prefix: str): |
| self.prefix = prefix |
|
|
| @property |
| def vocab(self) -> List[str]: |
| """All possible token strings this tokenizer can produce.""" |
| raise NotImplementedError |
|
|
| def __call__(self, value: Any) -> Union[str, List[str]]: |
| """Tokenize a single value. Returns one token string or a list.""" |
| raise NotImplementedError |
|
|
| @property |
| def vocab_size(self) -> int: |
| return len(self.vocab) |
|
|
| def to_dict(self) -> Dict: |
| """Serialize tokenizer state for saving.""" |
| return {"type": self.__class__.__name__, "prefix": self.prefix} |
|
|
| @classmethod |
| def from_dict(cls, d: Dict) -> "BaseFieldTokenizer": |
| """Deserialize tokenizer state.""" |
| raise NotImplementedError |
|
|
|
|
| class SignTokenizer(BaseFieldTokenizer): |
| """Tokenizes the sign of a numerical value. |
| |
| Nubank uses this for credit/debit distinction (2 tokens). |
| Can be generalized to inflow/outflow, buy/sell, etc. |
| |
| Example: |
| >>> tok = SignTokenizer("AMT_SIGN") |
| >>> tok(79.99) # -> "[AMT_SIGN_POS]" |
| >>> tok(-50.0) # -> "[AMT_SIGN_NEG]" |
| """ |
|
|
| def __init__(self, prefix: str = "SIGN", pos_label: str = "POS", neg_label: str = "NEG"): |
| super().__init__(prefix) |
| self.pos_label = pos_label |
| self.neg_label = neg_label |
| self._pos_token = f"[{prefix}_{pos_label}]" |
| self._neg_token = f"[{prefix}_{neg_label}]" |
|
|
| @property |
| def vocab(self) -> List[str]: |
| return [self._pos_token, self._neg_token] |
|
|
| def __call__(self, value: float) -> str: |
| if value is None or (isinstance(value, float) and math.isnan(value)): |
| return self._pos_token |
| return self._pos_token if value >= 0 else self._neg_token |
|
|
| def to_dict(self) -> Dict: |
| return {**super().to_dict(), "pos_label": self.pos_label, "neg_label": self.neg_label} |
|
|
|
|
| class MagnitudeBucketTokenizer(BaseFieldTokenizer): |
| """Quantizes continuous values into bins using quantile-based binning. |
| |
| Follows Nubank's 21-bin quantization and TP-BERTa's Relative Magnitude |
| Tokenization principle. Uses absolute values so sign and magnitude are |
| tokenized independently. |
| |
| Must be fit on training data before use. |
| |
| Example: |
| >>> tok = MagnitudeBucketTokenizer("AMT", n_bins=21) |
| >>> tok.fit(np.array([1.0, 5.0, 10.0, 50.0, 100.0, 500.0])) |
| >>> tok(79.99) # -> "[AMT_15]" (some bin in the upper range) |
| """ |
|
|
| def __init__(self, prefix: str = "AMT", n_bins: int = 21): |
| super().__init__(prefix) |
| self.n_bins = n_bins |
| self.bin_edges: Optional[np.ndarray] = None |
| self._is_fitted = False |
|
|
| @property |
| def vocab(self) -> List[str]: |
| return [f"[{self.prefix}_{i:02d}]" for i in range(self.n_bins)] |
|
|
| def fit(self, values: np.ndarray) -> "MagnitudeBucketTokenizer": |
| """Compute bin edges from training data using quantiles on absolute values.""" |
| values = np.asarray(values, dtype=np.float64) |
| |
| valid = values[~np.isnan(values)] |
| abs_vals = np.abs(valid) |
|
|
| if len(abs_vals) == 0: |
| raise ValueError("Cannot fit on empty array") |
|
|
| |
| quantiles = np.linspace(0, 100, self.n_bins + 1) |
| self.bin_edges = np.unique(np.percentile(abs_vals, quantiles)) |
|
|
| |
| if len(self.bin_edges) < 3: |
| self.bin_edges = np.linspace(abs_vals.min(), abs_vals.max(), self.n_bins + 1) |
|
|
| self._is_fitted = True |
| return self |
|
|
| def __call__(self, value: float) -> str: |
| if not self._is_fitted: |
| raise RuntimeError(f"MagnitudeBucketTokenizer({self.prefix}) not fitted. Call .fit() first.") |
|
|
| if value is None or (isinstance(value, float) and math.isnan(value)): |
| return f"[{self.prefix}_00]" |
|
|
| abs_val = abs(float(value)) |
| |
| bin_idx = int(np.searchsorted(self.bin_edges[1:-1], abs_val)) |
| |
| bin_idx = min(bin_idx, self.n_bins - 1) |
| return f"[{self.prefix}_{bin_idx:02d}]" |
|
|
| def to_dict(self) -> Dict: |
| d = {**super().to_dict(), "n_bins": self.n_bins, "is_fitted": self._is_fitted} |
| if self._is_fitted: |
| d["bin_edges"] = self.bin_edges.tolist() |
| return d |
|
|
| @classmethod |
| def from_dict(cls, d: Dict) -> "MagnitudeBucketTokenizer": |
| tok = cls(prefix=d["prefix"], n_bins=d["n_bins"]) |
| if d.get("is_fitted") and "bin_edges" in d: |
| tok.bin_edges = np.array(d["bin_edges"]) |
| tok._is_fitted = True |
| return tok |
|
|
|
|
| class DiscreteNumericalTokenizer(BaseFieldTokenizer): |
| """Tokenizes small discrete numerical values (quantities, counts). |
| |
| Maps integers 0..max_value to individual tokens, with an overflow token |
| for values exceeding max_value. |
| |
| Example: |
| >>> tok = DiscreteNumericalTokenizer("QTY", max_value=10) |
| >>> tok(3) # -> "[QTY_03]" |
| >>> tok(15) # -> "[QTY_OVER]" |
| """ |
|
|
| def __init__(self, prefix: str = "QTY", max_value: int = 10): |
| super().__init__(prefix) |
| self.max_value = max_value |
| self._overflow_token = f"[{prefix}_OVER]" |
|
|
| @property |
| def vocab(self) -> List[str]: |
| tokens = [f"[{self.prefix}_{i:02d}]" for i in range(self.max_value + 1)] |
| tokens.append(self._overflow_token) |
| return tokens |
|
|
| def __call__(self, value: Any) -> str: |
| if value is None: |
| return f"[{self.prefix}_00]" |
| v = int(value) |
| if v < 0: |
| v = 0 |
| if v > self.max_value: |
| return self._overflow_token |
| return f"[{self.prefix}_{v:02d}]" |
|
|
| def to_dict(self) -> Dict: |
| return {**super().to_dict(), "max_value": self.max_value} |
|
|
|
|
| class CalendarTokenizer(BaseFieldTokenizer): |
| """Decomposes timestamps into calendar component tokens. |
| |
| Follows Nubank's approach: month(12) + dow(7) + dom(31) + hour(24) = 74 tokens. |
| Accepts datetime objects or ISO format strings. |
| |
| Example: |
| >>> tok = CalendarTokenizer("TS", fields=["month", "dow", "dom", "hour"]) |
| >>> tok(datetime(2025, 3, 15, 14, 30)) |
| ['[TS_MON_03]', '[TS_DOW_5]', '[TS_DOM_15]', '[TS_HOUR_14]'] |
| """ |
|
|
| |
| FIELD_REGISTRY = { |
| "month": (lambda p, i: f"[{p}_MON_{i+1:02d}]", lambda dt: dt.month - 1, 12), |
| "dow": (lambda p, i: f"[{p}_DOW_{i}]", lambda dt: dt.weekday(), 7), |
| "dom": (lambda p, i: f"[{p}_DOM_{i+1:02d}]", lambda dt: dt.day - 1, 31), |
| "hour": (lambda p, i: f"[{p}_HOUR_{i:02d}]", lambda dt: dt.hour, 24), |
| "quarter": (lambda p, i: f"[{p}_Q{i+1}]", lambda dt: (dt.month-1)//3, 4), |
| "minute_bin": (lambda p, i: f"[{p}_MINBIN_{i}]", lambda dt: dt.minute // 15, 4), |
| } |
|
|
| def __init__(self, prefix: str = "TS", fields: Optional[List[str]] = None): |
| super().__init__(prefix) |
| self.fields = fields or ["month", "dow", "dom", "hour"] |
| |
| for f in self.fields: |
| if f not in self.FIELD_REGISTRY: |
| raise ValueError(f"Unknown calendar field: '{f}'. Available: {list(self.FIELD_REGISTRY.keys())}") |
|
|
| @property |
| def vocab(self) -> List[str]: |
| tokens = [] |
| for field_name in self.fields: |
| fmt_fn, _, count = self.FIELD_REGISTRY[field_name] |
| tokens.extend(fmt_fn(self.prefix, i) for i in range(count)) |
| return tokens |
|
|
| def _parse_datetime(self, value: Any) -> datetime: |
| if isinstance(value, datetime): |
| return value |
| if isinstance(value, str): |
| |
| for fmt in ("%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d"): |
| try: |
| return datetime.strptime(value, fmt) |
| except ValueError: |
| continue |
| raise ValueError(f"Cannot parse datetime string: {value}") |
| raise TypeError(f"Expected datetime or str, got {type(value)}") |
|
|
| def __call__(self, value: Any) -> List[str]: |
| dt = self._parse_datetime(value) |
| tokens = [] |
| for field_name in self.fields: |
| fmt_fn, extract_fn, count = self.FIELD_REGISTRY[field_name] |
| idx = extract_fn(dt) |
| idx = max(0, min(idx, count - 1)) |
| tokens.append(fmt_fn(self.prefix, idx)) |
| return tokens |
|
|
| def to_dict(self) -> Dict: |
| return {**super().to_dict(), "fields": self.fields} |
|
|
|
|
| class CategoricalTokenizer(BaseFieldTokenizer): |
| """Maps categorical string values to fixed vocabulary tokens. |
| |
| Unknown values map to an [PREFIX_UNK] token. |
| |
| Example: |
| >>> tok = CategoricalTokenizer("EVT", ["view", "purchase", "return"]) |
| >>> tok("purchase") # -> "[EVT_001]" |
| >>> tok("unknown") # -> "[EVT_UNK]" |
| """ |
|
|
| def __init__(self, prefix: str, categories: List[str]): |
| super().__init__(prefix) |
| self.categories = list(categories) |
| self._token_map = {cat: f"[{prefix}_{i:03d}]" for i, cat in enumerate(categories)} |
| self._unk_token = f"[{prefix}_UNK]" |
| |
| self._reverse_map = {v: k for k, v in self._token_map.items()} |
| self._reverse_map[self._unk_token] = "<unknown>" |
|
|
| @property |
| def vocab(self) -> List[str]: |
| return list(self._token_map.values()) + [self._unk_token] |
|
|
| def __call__(self, value: Any) -> str: |
| if value is None: |
| return self._unk_token |
| return self._token_map.get(str(value), self._unk_token) |
|
|
| def decode_token(self, token: str) -> str: |
| """Map a token string back to its category value.""" |
| return self._reverse_map.get(token, "<unknown>") |
|
|
| def to_dict(self) -> Dict: |
| return {**super().to_dict(), "categories": self.categories} |
|
|
| @classmethod |
| def from_dict(cls, d: Dict) -> "CategoricalTokenizer": |
| return cls(prefix=d["prefix"], categories=d["categories"]) |
|
|
|
|
| |
| |
| |
|
|
| def create_field_tokenizer(spec) -> BaseFieldTokenizer: |
| """Create the appropriate field tokenizer from a FieldSpec. |
| |
| Args: |
| spec: A FieldSpec instance from schema.py |
| |
| Returns: |
| An initialized BaseFieldTokenizer subclass |
| """ |
| from ..schema import FieldType |
| |
| if spec.field_type == FieldType.SIGN: |
| return SignTokenizer(prefix=spec.prefix) |
| |
| elif spec.field_type == FieldType.NUMERICAL_CONTINUOUS: |
| return MagnitudeBucketTokenizer(prefix=spec.prefix, n_bins=spec.n_bins) |
| |
| elif spec.field_type == FieldType.NUMERICAL_DISCRETE: |
| return DiscreteNumericalTokenizer(prefix=spec.prefix, max_value=spec.max_value) |
| |
| elif spec.field_type == FieldType.CATEGORICAL_FIXED: |
| return CategoricalTokenizer(prefix=spec.prefix, categories=spec.categories) |
| |
| elif spec.field_type == FieldType.TEMPORAL: |
| return CalendarTokenizer(prefix=spec.prefix, fields=spec.calendar_fields) |
| |
| elif spec.field_type == FieldType.TEXT: |
| return None |
| |
| else: |
| raise ValueError(f"Unknown field type: {spec.field_type}") |
|
|