Add field_tokenizers.py — Sign, MagnitudeBucket, Calendar, Categorical, DiscreteNumerical tokenizers
Browse files
src/domain_tokenizer/tokenizers/field_tokenizers.py
ADDED
|
@@ -0,0 +1,335 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Per-field tokenizers for domain-specific data.
|
| 3 |
+
|
| 4 |
+
Each tokenizer converts a single field value into one or more token strings.
|
| 5 |
+
These are the building blocks assembled by DomainTokenizerBuilder.
|
| 6 |
+
|
| 7 |
+
References:
|
| 8 |
+
- Nubank nuFormer: sign(2) + amount_bucket(21) + calendar(74) tokenization
|
| 9 |
+
- TP-BERTa (arXiv:2403.01841): Relative Magnitude Tokenization for numbers
|
| 10 |
+
- Banking TF (arXiv:2410.08243): date + amount + wording composite tokens
|
| 11 |
+
- Temporal Tokenization (arXiv:2512.13618): log-based bins for skewed financial data
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import json
|
| 15 |
+
import math
|
| 16 |
+
from datetime import datetime
|
| 17 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 18 |
+
|
| 19 |
+
import numpy as np
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class BaseFieldTokenizer:
|
| 23 |
+
"""Base class for all field tokenizers."""
|
| 24 |
+
|
| 25 |
+
def __init__(self, prefix: str):
|
| 26 |
+
self.prefix = prefix
|
| 27 |
+
|
| 28 |
+
@property
|
| 29 |
+
def vocab(self) -> List[str]:
|
| 30 |
+
"""All possible token strings this tokenizer can produce."""
|
| 31 |
+
raise NotImplementedError
|
| 32 |
+
|
| 33 |
+
def __call__(self, value: Any) -> Union[str, List[str]]:
|
| 34 |
+
"""Tokenize a single value. Returns one token string or a list."""
|
| 35 |
+
raise NotImplementedError
|
| 36 |
+
|
| 37 |
+
@property
|
| 38 |
+
def vocab_size(self) -> int:
|
| 39 |
+
return len(self.vocab)
|
| 40 |
+
|
| 41 |
+
def to_dict(self) -> Dict:
|
| 42 |
+
"""Serialize tokenizer state for saving."""
|
| 43 |
+
return {"type": self.__class__.__name__, "prefix": self.prefix}
|
| 44 |
+
|
| 45 |
+
@classmethod
|
| 46 |
+
def from_dict(cls, d: Dict) -> "BaseFieldTokenizer":
|
| 47 |
+
"""Deserialize tokenizer state."""
|
| 48 |
+
raise NotImplementedError
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class SignTokenizer(BaseFieldTokenizer):
|
| 52 |
+
"""Tokenizes the sign of a numerical value.
|
| 53 |
+
|
| 54 |
+
Nubank uses this for credit/debit distinction (2 tokens).
|
| 55 |
+
Can be generalized to inflow/outflow, buy/sell, etc.
|
| 56 |
+
|
| 57 |
+
Example:
|
| 58 |
+
>>> tok = SignTokenizer("AMT_SIGN")
|
| 59 |
+
>>> tok(79.99) # -> "[AMT_SIGN_POS]"
|
| 60 |
+
>>> tok(-50.0) # -> "[AMT_SIGN_NEG]"
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(self, prefix: str = "SIGN", pos_label: str = "POS", neg_label: str = "NEG"):
|
| 64 |
+
super().__init__(prefix)
|
| 65 |
+
self.pos_label = pos_label
|
| 66 |
+
self.neg_label = neg_label
|
| 67 |
+
self._pos_token = f"[{prefix}_{pos_label}]"
|
| 68 |
+
self._neg_token = f"[{prefix}_{neg_label}]"
|
| 69 |
+
|
| 70 |
+
@property
|
| 71 |
+
def vocab(self) -> List[str]:
|
| 72 |
+
return [self._pos_token, self._neg_token]
|
| 73 |
+
|
| 74 |
+
def __call__(self, value: float) -> str:
|
| 75 |
+
if value is None or (isinstance(value, float) and math.isnan(value)):
|
| 76 |
+
return self._pos_token # default to positive for missing
|
| 77 |
+
return self._pos_token if value >= 0 else self._neg_token
|
| 78 |
+
|
| 79 |
+
def to_dict(self) -> Dict:
|
| 80 |
+
return {**super().to_dict(), "pos_label": self.pos_label, "neg_label": self.neg_label}
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class MagnitudeBucketTokenizer(BaseFieldTokenizer):
|
| 84 |
+
"""Quantizes continuous values into bins using quantile-based binning.
|
| 85 |
+
|
| 86 |
+
Follows Nubank's 21-bin quantization and TP-BERTa's Relative Magnitude
|
| 87 |
+
Tokenization principle. Uses absolute values so sign and magnitude are
|
| 88 |
+
tokenized independently.
|
| 89 |
+
|
| 90 |
+
Must be fit on training data before use.
|
| 91 |
+
|
| 92 |
+
Example:
|
| 93 |
+
>>> tok = MagnitudeBucketTokenizer("AMT", n_bins=21)
|
| 94 |
+
>>> tok.fit(np.array([1.0, 5.0, 10.0, 50.0, 100.0, 500.0]))
|
| 95 |
+
>>> tok(79.99) # -> "[AMT_15]" (some bin in the upper range)
|
| 96 |
+
"""
|
| 97 |
+
|
| 98 |
+
def __init__(self, prefix: str = "AMT", n_bins: int = 21):
|
| 99 |
+
super().__init__(prefix)
|
| 100 |
+
self.n_bins = n_bins
|
| 101 |
+
self.bin_edges: Optional[np.ndarray] = None
|
| 102 |
+
self._is_fitted = False
|
| 103 |
+
|
| 104 |
+
@property
|
| 105 |
+
def vocab(self) -> List[str]:
|
| 106 |
+
return [f"[{self.prefix}_{i:02d}]" for i in range(self.n_bins)]
|
| 107 |
+
|
| 108 |
+
def fit(self, values: np.ndarray) -> "MagnitudeBucketTokenizer":
|
| 109 |
+
"""Compute bin edges from training data using quantiles on absolute values."""
|
| 110 |
+
values = np.asarray(values, dtype=np.float64)
|
| 111 |
+
# Filter NaN and take absolute values
|
| 112 |
+
valid = values[~np.isnan(values)]
|
| 113 |
+
abs_vals = np.abs(valid)
|
| 114 |
+
|
| 115 |
+
if len(abs_vals) == 0:
|
| 116 |
+
raise ValueError("Cannot fit on empty array")
|
| 117 |
+
|
| 118 |
+
# Compute quantile edges
|
| 119 |
+
quantiles = np.linspace(0, 100, self.n_bins + 1)
|
| 120 |
+
self.bin_edges = np.unique(np.percentile(abs_vals, quantiles))
|
| 121 |
+
|
| 122 |
+
# If too few unique edges (degenerate distribution), use linspace
|
| 123 |
+
if len(self.bin_edges) < 3:
|
| 124 |
+
self.bin_edges = np.linspace(abs_vals.min(), abs_vals.max(), self.n_bins + 1)
|
| 125 |
+
|
| 126 |
+
self._is_fitted = True
|
| 127 |
+
return self
|
| 128 |
+
|
| 129 |
+
def __call__(self, value: float) -> str:
|
| 130 |
+
if not self._is_fitted:
|
| 131 |
+
raise RuntimeError(f"MagnitudeBucketTokenizer({self.prefix}) not fitted. Call .fit() first.")
|
| 132 |
+
|
| 133 |
+
if value is None or (isinstance(value, float) and math.isnan(value)):
|
| 134 |
+
return f"[{self.prefix}_00]" # default to lowest bin for missing
|
| 135 |
+
|
| 136 |
+
abs_val = abs(float(value))
|
| 137 |
+
# searchsorted on interior edges (exclude first and last)
|
| 138 |
+
bin_idx = int(np.searchsorted(self.bin_edges[1:-1], abs_val))
|
| 139 |
+
# Clamp to valid range
|
| 140 |
+
bin_idx = min(bin_idx, self.n_bins - 1)
|
| 141 |
+
return f"[{self.prefix}_{bin_idx:02d}]"
|
| 142 |
+
|
| 143 |
+
def to_dict(self) -> Dict:
|
| 144 |
+
d = {**super().to_dict(), "n_bins": self.n_bins, "is_fitted": self._is_fitted}
|
| 145 |
+
if self._is_fitted:
|
| 146 |
+
d["bin_edges"] = self.bin_edges.tolist()
|
| 147 |
+
return d
|
| 148 |
+
|
| 149 |
+
@classmethod
|
| 150 |
+
def from_dict(cls, d: Dict) -> "MagnitudeBucketTokenizer":
|
| 151 |
+
tok = cls(prefix=d["prefix"], n_bins=d["n_bins"])
|
| 152 |
+
if d.get("is_fitted") and "bin_edges" in d:
|
| 153 |
+
tok.bin_edges = np.array(d["bin_edges"])
|
| 154 |
+
tok._is_fitted = True
|
| 155 |
+
return tok
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
class DiscreteNumericalTokenizer(BaseFieldTokenizer):
|
| 159 |
+
"""Tokenizes small discrete numerical values (quantities, counts).
|
| 160 |
+
|
| 161 |
+
Maps integers 0..max_value to individual tokens, with an overflow token
|
| 162 |
+
for values exceeding max_value.
|
| 163 |
+
|
| 164 |
+
Example:
|
| 165 |
+
>>> tok = DiscreteNumericalTokenizer("QTY", max_value=10)
|
| 166 |
+
>>> tok(3) # -> "[QTY_03]"
|
| 167 |
+
>>> tok(15) # -> "[QTY_OVER]"
|
| 168 |
+
"""
|
| 169 |
+
|
| 170 |
+
def __init__(self, prefix: str = "QTY", max_value: int = 10):
|
| 171 |
+
super().__init__(prefix)
|
| 172 |
+
self.max_value = max_value
|
| 173 |
+
self._overflow_token = f"[{prefix}_OVER]"
|
| 174 |
+
|
| 175 |
+
@property
|
| 176 |
+
def vocab(self) -> List[str]:
|
| 177 |
+
tokens = [f"[{self.prefix}_{i:02d}]" for i in range(self.max_value + 1)]
|
| 178 |
+
tokens.append(self._overflow_token)
|
| 179 |
+
return tokens
|
| 180 |
+
|
| 181 |
+
def __call__(self, value: Any) -> str:
|
| 182 |
+
if value is None:
|
| 183 |
+
return f"[{self.prefix}_00]"
|
| 184 |
+
v = int(value)
|
| 185 |
+
if v < 0:
|
| 186 |
+
v = 0
|
| 187 |
+
if v > self.max_value:
|
| 188 |
+
return self._overflow_token
|
| 189 |
+
return f"[{self.prefix}_{v:02d}]"
|
| 190 |
+
|
| 191 |
+
def to_dict(self) -> Dict:
|
| 192 |
+
return {**super().to_dict(), "max_value": self.max_value}
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
class CalendarTokenizer(BaseFieldTokenizer):
|
| 196 |
+
"""Decomposes timestamps into calendar component tokens.
|
| 197 |
+
|
| 198 |
+
Follows Nubank's approach: month(12) + dow(7) + dom(31) + hour(24) = 74 tokens.
|
| 199 |
+
Accepts datetime objects or ISO format strings.
|
| 200 |
+
|
| 201 |
+
Example:
|
| 202 |
+
>>> tok = CalendarTokenizer("TS", fields=["month", "dow", "dom", "hour"])
|
| 203 |
+
>>> tok(datetime(2025, 3, 15, 14, 30))
|
| 204 |
+
['[TS_MON_03]', '[TS_DOW_5]', '[TS_DOM_15]', '[TS_HOUR_14]']
|
| 205 |
+
"""
|
| 206 |
+
|
| 207 |
+
# Maps field name -> (token format, extraction function, count)
|
| 208 |
+
FIELD_REGISTRY = {
|
| 209 |
+
"month": (lambda p, i: f"[{p}_MON_{i+1:02d}]", lambda dt: dt.month - 1, 12),
|
| 210 |
+
"dow": (lambda p, i: f"[{p}_DOW_{i}]", lambda dt: dt.weekday(), 7),
|
| 211 |
+
"dom": (lambda p, i: f"[{p}_DOM_{i+1:02d}]", lambda dt: dt.day - 1, 31),
|
| 212 |
+
"hour": (lambda p, i: f"[{p}_HOUR_{i:02d}]", lambda dt: dt.hour, 24),
|
| 213 |
+
"quarter": (lambda p, i: f"[{p}_Q{i+1}]", lambda dt: (dt.month-1)//3, 4),
|
| 214 |
+
"minute_bin": (lambda p, i: f"[{p}_MINBIN_{i}]", lambda dt: dt.minute // 15, 4),
|
| 215 |
+
}
|
| 216 |
+
|
| 217 |
+
def __init__(self, prefix: str = "TS", fields: Optional[List[str]] = None):
|
| 218 |
+
super().__init__(prefix)
|
| 219 |
+
self.fields = fields or ["month", "dow", "dom", "hour"]
|
| 220 |
+
# Validate
|
| 221 |
+
for f in self.fields:
|
| 222 |
+
if f not in self.FIELD_REGISTRY:
|
| 223 |
+
raise ValueError(f"Unknown calendar field: '{f}'. Available: {list(self.FIELD_REGISTRY.keys())}")
|
| 224 |
+
|
| 225 |
+
@property
|
| 226 |
+
def vocab(self) -> List[str]:
|
| 227 |
+
tokens = []
|
| 228 |
+
for field_name in self.fields:
|
| 229 |
+
fmt_fn, _, count = self.FIELD_REGISTRY[field_name]
|
| 230 |
+
tokens.extend(fmt_fn(self.prefix, i) for i in range(count))
|
| 231 |
+
return tokens
|
| 232 |
+
|
| 233 |
+
def _parse_datetime(self, value: Any) -> datetime:
|
| 234 |
+
if isinstance(value, datetime):
|
| 235 |
+
return value
|
| 236 |
+
if isinstance(value, str):
|
| 237 |
+
# Try common formats
|
| 238 |
+
for fmt in ("%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S", "%Y-%m-%d"):
|
| 239 |
+
try:
|
| 240 |
+
return datetime.strptime(value, fmt)
|
| 241 |
+
except ValueError:
|
| 242 |
+
continue
|
| 243 |
+
raise ValueError(f"Cannot parse datetime string: {value}")
|
| 244 |
+
raise TypeError(f"Expected datetime or str, got {type(value)}")
|
| 245 |
+
|
| 246 |
+
def __call__(self, value: Any) -> List[str]:
|
| 247 |
+
dt = self._parse_datetime(value)
|
| 248 |
+
tokens = []
|
| 249 |
+
for field_name in self.fields:
|
| 250 |
+
fmt_fn, extract_fn, count = self.FIELD_REGISTRY[field_name]
|
| 251 |
+
idx = extract_fn(dt)
|
| 252 |
+
idx = max(0, min(idx, count - 1)) # clamp
|
| 253 |
+
tokens.append(fmt_fn(self.prefix, idx))
|
| 254 |
+
return tokens
|
| 255 |
+
|
| 256 |
+
def to_dict(self) -> Dict:
|
| 257 |
+
return {**super().to_dict(), "fields": self.fields}
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class CategoricalTokenizer(BaseFieldTokenizer):
|
| 261 |
+
"""Maps categorical string values to fixed vocabulary tokens.
|
| 262 |
+
|
| 263 |
+
Unknown values map to an [PREFIX_UNK] token.
|
| 264 |
+
|
| 265 |
+
Example:
|
| 266 |
+
>>> tok = CategoricalTokenizer("EVT", ["view", "purchase", "return"])
|
| 267 |
+
>>> tok("purchase") # -> "[EVT_001]"
|
| 268 |
+
>>> tok("unknown") # -> "[EVT_UNK]"
|
| 269 |
+
"""
|
| 270 |
+
|
| 271 |
+
def __init__(self, prefix: str, categories: List[str]):
|
| 272 |
+
super().__init__(prefix)
|
| 273 |
+
self.categories = list(categories)
|
| 274 |
+
self._token_map = {cat: f"[{prefix}_{i:03d}]" for i, cat in enumerate(categories)}
|
| 275 |
+
self._unk_token = f"[{prefix}_UNK]"
|
| 276 |
+
# Also build reverse map for decoding
|
| 277 |
+
self._reverse_map = {v: k for k, v in self._token_map.items()}
|
| 278 |
+
self._reverse_map[self._unk_token] = "<unknown>"
|
| 279 |
+
|
| 280 |
+
@property
|
| 281 |
+
def vocab(self) -> List[str]:
|
| 282 |
+
return list(self._token_map.values()) + [self._unk_token]
|
| 283 |
+
|
| 284 |
+
def __call__(self, value: Any) -> str:
|
| 285 |
+
if value is None:
|
| 286 |
+
return self._unk_token
|
| 287 |
+
return self._token_map.get(str(value), self._unk_token)
|
| 288 |
+
|
| 289 |
+
def decode_token(self, token: str) -> str:
|
| 290 |
+
"""Map a token string back to its category value."""
|
| 291 |
+
return self._reverse_map.get(token, "<unknown>")
|
| 292 |
+
|
| 293 |
+
def to_dict(self) -> Dict:
|
| 294 |
+
return {**super().to_dict(), "categories": self.categories}
|
| 295 |
+
|
| 296 |
+
@classmethod
|
| 297 |
+
def from_dict(cls, d: Dict) -> "CategoricalTokenizer":
|
| 298 |
+
return cls(prefix=d["prefix"], categories=d["categories"])
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# =============================================================================
|
| 302 |
+
# Factory function to create field tokenizer from FieldSpec
|
| 303 |
+
# =============================================================================
|
| 304 |
+
|
| 305 |
+
def create_field_tokenizer(spec) -> BaseFieldTokenizer:
|
| 306 |
+
"""Create the appropriate field tokenizer from a FieldSpec.
|
| 307 |
+
|
| 308 |
+
Args:
|
| 309 |
+
spec: A FieldSpec instance from schema.py
|
| 310 |
+
|
| 311 |
+
Returns:
|
| 312 |
+
An initialized BaseFieldTokenizer subclass
|
| 313 |
+
"""
|
| 314 |
+
from ..schema import FieldType # avoid circular import
|
| 315 |
+
|
| 316 |
+
if spec.field_type == FieldType.SIGN:
|
| 317 |
+
return SignTokenizer(prefix=spec.prefix)
|
| 318 |
+
|
| 319 |
+
elif spec.field_type == FieldType.NUMERICAL_CONTINUOUS:
|
| 320 |
+
return MagnitudeBucketTokenizer(prefix=spec.prefix, n_bins=spec.n_bins)
|
| 321 |
+
|
| 322 |
+
elif spec.field_type == FieldType.NUMERICAL_DISCRETE:
|
| 323 |
+
return DiscreteNumericalTokenizer(prefix=spec.prefix, max_value=spec.max_value)
|
| 324 |
+
|
| 325 |
+
elif spec.field_type == FieldType.CATEGORICAL_FIXED:
|
| 326 |
+
return CategoricalTokenizer(prefix=spec.prefix, categories=spec.categories)
|
| 327 |
+
|
| 328 |
+
elif spec.field_type == FieldType.TEMPORAL:
|
| 329 |
+
return CalendarTokenizer(prefix=spec.prefix, fields=spec.calendar_fields)
|
| 330 |
+
|
| 331 |
+
elif spec.field_type == FieldType.TEXT:
|
| 332 |
+
return None # Text is handled by the BPE tokenizer in DomainTokenizerBuilder
|
| 333 |
+
|
| 334 |
+
else:
|
| 335 |
+
raise ValueError(f"Unknown field type: {spec.field_type}")
|