| """ |
| FinJEPA: Financial Joint-Embedding Predictive Architecture |
| A JEPA-based world model for portfolio optimization over a separated action space. |
| """ |
|
|
| import math |
| from typing import Optional, Tuple, Dict, List |
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import numpy as np |
|
|
|
|
| def off_diagonal(x): |
| n, m = x.shape |
| assert n == m |
| return x.flatten()[:-1].view(n - 1, n + 1)[:, 1:].flatten() |
|
|
|
|
| def sigreg_loss(z_a, z_b, sim_coeff=0.0, std_coeff=16.0, cov_coeff=8.0): |
| sim_loss = torch.tensor(0.0, device=z_a.device) |
| std_z_a = torch.sqrt(z_a.var(dim=0) + 1e-4) |
| std_z_b = torch.sqrt(z_b.var(dim=0) + 1e-4) |
| std_loss = torch.mean(F.relu(1 - std_z_a)) + torch.mean(F.relu(1 - std_z_b)) |
| N = z_a.size(0) |
| z_a = z_a - z_a.mean(dim=0) |
| z_b = z_b - z_b.mean(dim=0) |
| cov_z_a = (z_a.T @ z_a) / (N - 1) |
| cov_z_b = (z_b.T @ z_b) / (N - 1) |
| cov_loss = off_diagonal(cov_z_a).pow_(2).sum() / z_a.size(1) + off_diagonal(cov_z_b).pow_(2).sum() / z_b.size(1) |
| loss = sim_coeff * sim_loss + std_coeff * std_loss + cov_coeff * cov_loss |
| return loss, sim_loss, std_loss, cov_loss |
|
|
|
|
| class TimeSeriesTokenizer(nn.Module): |
| def __init__(self, in_features: int, embed_dim: int = 128, patch_size: int = 4): |
| super().__init__() |
| self.patch_size = patch_size |
| self.embed_dim = embed_dim |
| self.proj = nn.Conv1d(in_features, embed_dim, kernel_size=patch_size, stride=patch_size) |
| self.max_patches = 1024 |
| self.pos_embed = nn.Parameter(torch.zeros(1, self.max_patches, embed_dim)) |
| nn.init.normal_(self.pos_embed, std=0.02) |
|
|
| def forward(self, x: torch.Tensor) -> torch.Tensor: |
| B, T, F = x.shape |
| x = x.transpose(1, 2) |
| x = self.proj(x) |
| x = x.transpose(1, 2) |
| N = x.size(1) |
| pos = self.pos_embed[:, :N] |
| x = x + pos |
| return x |
|
|
|
|
| class RotaryPositionEmbedding(nn.Module): |
| def __init__(self, dim: int, max_seq_len: int = 2048, base: float = 10000.0): |
| super().__init__() |
| inv_freq = 1.0 / (base ** (torch.arange(0, dim, 2).float() / dim)) |
| self.register_buffer("inv_freq", inv_freq) |
| self.max_seq_len = max_seq_len |
| self.dim = dim |
|
|
| def forward(self, x: torch.Tensor, seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]: |
| device = x.device |
| t = torch.arange(seq_len, device=device).type_as(self.inv_freq) |
| freqs = torch.einsum("i,j->ij", t, self.inv_freq) |
| emb = torch.cat((freqs, freqs), dim=-1) |
| cos = emb.cos()[None, :, :] |
| sin = emb.sin()[None, :, :] |
| return cos, sin |
|
|
|
|
| def apply_rotary(x: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor) -> torch.Tensor: |
| x1, x2 = x[..., ::2], x[..., 1::2] |
| rotated = torch.stack([-x2, x1], dim=-1).flatten(-2) |
| return x * cos + rotated * sin |
|
|
|
|
| class AdaLN(nn.Module): |
| def __init__(self, dim: int, action_dim: int): |
| super().__init__() |
| self.norm = nn.LayerNorm(dim, elementwise_affine=False) |
| self.scale_shift = nn.Linear(action_dim, dim * 2) |
| nn.init.zeros_(self.scale_shift.weight) |
| nn.init.zeros_(self.scale_shift.bias) |
| self.scale_shift.bias.data[:dim] = 1.0 |
|
|
| def forward(self, x: torch.Tensor, a_emb: torch.Tensor) -> torch.Tensor: |
| x = self.norm(x) |
| scale, shift = self.scale_shift(a_emb).chunk(2, dim=-1) |
| return x * (1 + scale) + shift |
|
|
|
|
| class FinJEPATransformerBlock(nn.Module): |
| def __init__(self, dim: int, num_heads: int, mlp_ratio: float = 4.0, action_dim: int = 128, dropout: float = 0.0): |
| super().__init__() |
| self.dim = dim |
| self.num_heads = num_heads |
| self.head_dim = dim // num_heads |
| assert dim % num_heads == 0 |
| self.adaln1 = AdaLN(dim, action_dim) |
| self.qkv = nn.Linear(dim, dim * 3) |
| self.proj = nn.Linear(dim, dim) |
| self.dropout = nn.Dropout(dropout) |
| self.adaln2 = AdaLN(dim, action_dim) |
| mlp_dim = int(dim * mlp_ratio) |
| self.mlp = nn.Sequential( |
| nn.Linear(dim, mlp_dim), nn.GELU(), nn.Dropout(dropout), |
| nn.Linear(mlp_dim, dim), nn.Dropout(dropout), |
| ) |
| self.rope = RotaryPositionEmbedding(self.head_dim) |
|
|
| def forward(self, x: torch.Tensor, a_emb: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor, mask=None): |
| B, N, D = x.shape |
| x_norm = self.adaln1(x, a_emb) |
| qkv = self.qkv(x_norm).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4) |
| q, k, v = qkv[0], qkv[1], qkv[2] |
| q = apply_rotary(q, cos, sin) |
| k = apply_rotary(k, cos, sin) |
| attn = (q @ k.transpose(-2, -1)) / math.sqrt(self.head_dim) |
| if mask is not None: |
| attn = attn.masked_fill(mask == 0, float('-inf')) |
| attn = F.softmax(attn, dim=-1) |
| attn = self.dropout(attn) |
| out = (attn @ v).transpose(1, 2).reshape(B, N, D) |
| out = self.proj(out) |
| x = x + out |
| x_norm = self.adaln2(x, a_emb) |
| x = x + self.mlp(x_norm) |
| return x |
|
|
|
|
| class FinJEPATransformerEncoder(nn.Module): |
| def __init__(self, dim=128, depth=4, num_heads=4, mlp_ratio=4.0, dropout=0.0): |
| super().__init__() |
| self.blocks = nn.ModuleList([ |
| FinJEPATransformerBlock(dim, num_heads, mlp_ratio, action_dim=dim, dropout=dropout) |
| for _ in range(depth) |
| ]) |
| self.register_buffer("dummy_action", torch.zeros(1, 1, dim)) |
| self.norm = nn.LayerNorm(dim) |
| self.rope = RotaryPositionEmbedding(dim // num_heads) |
|
|
| def forward(self, x, mask=None): |
| B, N, D = x.shape |
| cos, sin = self.rope(x, N) |
| a = self.dummy_action.expand(B, N, D) |
| for block in self.blocks: |
| x = block(x, a, cos, sin, mask) |
| x = self.norm(x) |
| return x |
|
|
|
|
| class ActionEmbedder(nn.Module): |
| def __init__(self, n_assets=10, signal_vocab_size=3, hidden_dim=128, out_dim=128): |
| super().__init__() |
| self.n_assets = n_assets |
| self.weight_proj = nn.Sequential( |
| nn.Linear(n_assets, hidden_dim), nn.GELU(), nn.Linear(hidden_dim, hidden_dim), |
| ) |
| self.signal_embed = nn.Embedding(signal_vocab_size, hidden_dim // n_assets) |
| self.hedge_embed = nn.Embedding(2, hidden_dim // 2) |
| fusion_in = hidden_dim + n_assets * (hidden_dim // n_assets) + hidden_dim // 2 |
| self.fusion = nn.Sequential( |
| nn.Linear(fusion_in, out_dim), nn.GELU(), nn.Linear(out_dim, out_dim), |
| ) |
|
|
| def forward(self, weights, signals, hedge=None): |
| w_emb = self.weight_proj(weights) |
| s_emb = self.signal_embed(signals).flatten(1) |
| if hedge is None: |
| h_emb = torch.zeros(weights.size(0), self.fusion[0].in_features - w_emb.size(1) - s_emb.size(1), device=weights.device) |
| else: |
| h_emb = self.hedge_embed(hedge).squeeze(1) |
| a_emb = self.fusion(torch.cat([w_emb, s_emb, h_emb], dim=-1)) |
| return a_emb |
|
|
|
|
| class FinJEPAPredictor(nn.Module): |
| def __init__(self, dim=128, depth=6, num_heads=4, mlp_ratio=4.0, action_dim=128, dropout=0.0, max_target_tokens=64): |
| super().__init__() |
| self.dim = dim |
| self.max_target_tokens = max_target_tokens |
| self.target_queries = nn.Parameter(torch.zeros(1, max_target_tokens, dim)) |
| nn.init.normal_(self.target_queries, std=0.02) |
| self.blocks = nn.ModuleList([ |
| FinJEPATransformerBlock(dim, num_heads, mlp_ratio, action_dim, dropout) |
| for _ in range(depth) |
| ]) |
| self.norm = nn.LayerNorm(dim) |
| self.action_proj = nn.Linear(action_dim, dim) |
| self.rope = RotaryPositionEmbedding(dim // num_heads) |
|
|
| def forward(self, x, action_emb, n_target_tokens=None, mask=None): |
| B, N_ctx, D = x.shape |
| n_target = n_target_tokens if n_target_tokens is not None else 1 |
| queries = self.target_queries[:, :n_target].expand(B, -1, -1) |
| x = torch.cat([x, queries], dim=1) |
| N_total = x.size(1) |
| a_seq = self.action_proj(action_emb).unsqueeze(1).expand(B, N_total, D) |
| cos, sin = self.rope(x, N_total) |
| for block in self.blocks: |
| x = block(x, a_seq, cos, sin, mask) |
| x = self.norm(x) |
| target_pred = x[:, N_ctx:] |
| return target_pred |
|
|
|
|
| class InverseDynamicsModel(nn.Module): |
| def __init__(self, z_dim, n_assets, signal_vocab_size=3, hidden_dim=256): |
| super().__init__() |
| self.n_assets = n_assets |
| self.mlp = nn.Sequential( |
| nn.Linear(z_dim * 2, hidden_dim), nn.GELU(), |
| nn.Linear(hidden_dim, hidden_dim), nn.GELU(), |
| ) |
| self.weight_head = nn.Linear(hidden_dim, n_assets) |
| self.signal_head = nn.Linear(hidden_dim, n_assets * signal_vocab_size) |
|
|
| def forward(self, z_t, z_tp1): |
| h = self.mlp(torch.cat([z_t, z_tp1], dim=-1)) |
| weights = self.weight_head(h) |
| signals_logits = self.signal_head(h).reshape(-1, self.n_assets, 3) |
| return {"weights": weights, "signals_logits": signals_logits} |
|
|
|
|
| class FinJEPA(nn.Module): |
| def __init__( |
| self, in_features=7, n_assets=10, patch_size=4, embed_dim=128, |
| encoder_depth=4, encoder_heads=4, predictor_depth=6, predictor_heads=4, |
| action_hidden_dim=128, signal_vocab_size=3, mlp_ratio=4.0, |
| dropout=0.0, ema_decay=0.996, use_idm=True, use_td_branch=False, |
| ): |
| super().__init__() |
| self.embed_dim = embed_dim |
| self.ema_decay = ema_decay |
| self.use_idm = use_idm |
| self.use_td_branch = use_td_branch |
| self.n_assets = n_assets |
|
|
| self.tokenizer = TimeSeriesTokenizer(in_features, embed_dim, patch_size) |
| self.context_encoder = FinJEPATransformerEncoder(embed_dim, encoder_depth, encoder_heads, mlp_ratio, dropout) |
| self.target_encoder = FinJEPATransformerEncoder(embed_dim, encoder_depth, encoder_heads, mlp_ratio, dropout) |
| for p in self.target_encoder.parameters(): |
| p.requires_grad = False |
|
|
| self.action_embedder = ActionEmbedder(n_assets, signal_vocab_size, action_hidden_dim, embed_dim) |
| self.predictor = FinJEPAPredictor(embed_dim, predictor_depth, predictor_heads, mlp_ratio, embed_dim, dropout) |
|
|
| if use_idm: |
| self.idm = InverseDynamicsModel(embed_dim, n_assets, signal_vocab_size, hidden_dim=256) |
| else: |
| self.idm = None |
|
|
| if use_td_branch: |
| self.task_encoder = FinJEPATransformerEncoder(embed_dim, encoder_depth, encoder_heads, mlp_ratio, dropout) |
| for p in self.task_encoder.parameters(): |
| p.requires_grad = False |
| self.task_predictor = FinJEPAPredictor(embed_dim, predictor_depth, predictor_heads, mlp_ratio, embed_dim, dropout) |
| else: |
| self.task_encoder = None |
| self.task_predictor = None |
|
|
| self._init_target_encoder() |
|
|
| def _init_target_encoder(self): |
| self.target_encoder.load_state_dict(self.context_encoder.state_dict()) |
| for p in self.target_encoder.parameters(): |
| p.requires_grad = False |
|
|
| def update_target(self): |
| with torch.no_grad(): |
| for param_s, param_t in zip(self.context_encoder.parameters(), self.target_encoder.parameters()): |
| param_t.data.mul_(self.ema_decay).add_(param_s.data, alpha=1 - self.ema_decay) |
|
|
| def encode_context(self, x, mask=None): |
| tokens = self.tokenizer(x) |
| z = self.context_encoder(tokens, mask) |
| return z |
|
|
| @torch.no_grad() |
| def encode_target(self, x, mask=None): |
| tokens = self.tokenizer(x) |
| z = self.target_encoder(tokens, mask) |
| return z |
|
|
| def forward(self, context_series, target_series, weights, signals, hedge=None, context_mask=None, target_mask=None): |
| z_context = self.encode_context(context_series, context_mask) |
| with torch.no_grad(): |
| z_target = self.encode_target(target_series, target_mask) |
| action_emb = self.action_embedder(weights, signals, hedge) |
| n_target = z_target.size(1) |
| z_pred = self.predictor(z_context, action_emb, n_target_tokens=n_target, mask=context_mask) |
| idm_out = None |
| if self.use_idm and self.idm is not None: |
| z_t = z_context[:, -1] |
| z_next = z_target[:, 0] |
| idm_out = self.idm(z_t, z_next) |
| return {"z_pred": z_pred, "z_target": z_target, "action_emb": action_emb, "idm": idm_out} |
|
|
| def predict_next_state(self, state_series, weights, signals, hedge=None, mask=None): |
| z = self.encode_context(state_series, mask) |
| action_emb = self.action_embedder(weights, signals, hedge) |
| z_next = self.predictor(z, action_emb, n_target_tokens=1, mask=mask) |
| return z_next |
|
|
|
|
| class FinJEPALoss(nn.Module): |
| def __init__(self, pred_loss="l1", alpha=2.0, beta=1.0, delta=4.0, omega=0.5, gamma=0.5, use_sigreg=True): |
| super().__init__() |
| self.pred_loss = pred_loss |
| self.alpha = alpha |
| self.beta = beta |
| self.delta = delta |
| self.omega = omega |
| self.gamma = gamma |
| self.use_sigreg = use_sigreg |
|
|
| def forward(self, outputs, actions_gt=None, rollout_outputs=None): |
| z_pred = outputs["z_pred"] |
| z_target = outputs["z_target"] |
| idm_out = outputs.get("idm") |
|
|
| if self.pred_loss == "l1": |
| l_pred = F.l1_loss(z_pred, z_target) |
| else: |
| l_pred = F.mse_loss(z_pred, z_target) |
|
|
| B, N, D = z_pred.shape |
| z_pred_flat = z_pred.reshape(B * N, D) |
| z_target_flat = z_target.reshape(B * N, D) |
| l_reg, l_sim, l_std, l_cov = sigreg_loss( |
| z_pred_flat, z_target_flat, sim_coeff=0.0, std_coeff=self.alpha, cov_coeff=self.beta |
| ) |
|
|
| if z_pred.size(1) > 1: |
| z_diff = (z_pred[:, 1:] - z_pred[:, :-1]).pow(2).mean() |
| else: |
| z_diff = torch.tensor(0.0, device=z_pred.device) |
| l_temporal = self.delta * z_diff |
|
|
| l_idm = torch.tensor(0.0, device=z_pred.device) |
| if idm_out is not None and actions_gt is not None: |
| l_w = F.mse_loss(idm_out["weights"], actions_gt["weights"]) |
| l_s = F.cross_entropy(idm_out["signals_logits"].reshape(-1, 3), actions_gt["signals"].reshape(-1)) |
| l_idm = self.omega * (l_w + l_s) |
|
|
| l_rollout = torch.tensor(0.0, device=z_pred.device) |
| if rollout_outputs is not None and len(rollout_outputs) > 0: |
| for ro in rollout_outputs: |
| if self.pred_loss == "l1": |
| l_rollout += F.l1_loss(ro["z_pred"], ro["z_target"]) |
| else: |
| l_rollout += F.mse_loss(ro["z_pred"], ro["z_target"]) |
| l_rollout = self.gamma * (l_rollout / len(rollout_outputs)) |
|
|
| total = l_pred + l_reg + l_temporal + l_idm + l_rollout |
| return { |
| "loss": total, |
| "loss_pred": l_pred.item(), |
| "loss_reg": l_reg.item(), |
| "loss_sim": l_sim.item(), |
| "loss_std": l_std.item(), |
| "loss_cov": l_cov.item(), |
| "loss_temporal": l_temporal.item(), |
| "loss_idm": l_idm.item() if isinstance(l_idm, torch.Tensor) else 0.0, |
| "loss_rollout": l_rollout.item() if isinstance(l_rollout, torch.Tensor) else 0.0, |
| } |
|
|