v7: past-track masked JEPA pretraining script
Browse files- pretrain_v7.py +612 -0
pretrain_v7.py
ADDED
|
@@ -0,0 +1,612 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# /// script
|
| 2 |
+
# requires-python = ">=3.10"
|
| 3 |
+
# dependencies = [
|
| 4 |
+
# "torch>=2.1",
|
| 5 |
+
# "numpy",
|
| 6 |
+
# "pandas",
|
| 7 |
+
# "scikit-learn",
|
| 8 |
+
# "huggingface-hub",
|
| 9 |
+
# "trackio",
|
| 10 |
+
# ]
|
| 11 |
+
# ///
|
| 12 |
+
"""
|
| 13 |
+
Flight-JEPA v7 — past-track masked JEPA pretraining.
|
| 14 |
+
|
| 15 |
+
Adapted from Forecast-MAE (arxiv:2308.09882) and I-JEPA (arxiv:2301.08243):
|
| 16 |
+
mask contiguous blocks of *past-track* patches and train an encoder +
|
| 17 |
+
EMA target + predictor to reconstruct masked-patch latents from visible
|
| 18 |
+
context. Encoder weights then transfer to v6 fine-tuning.
|
| 19 |
+
|
| 20 |
+
Key differences from v6's JEPA aux:
|
| 21 |
+
- Pretraining-only objective (no forecasting head, no Δ conditioning).
|
| 22 |
+
- Masks past-track patches, not future segments.
|
| 23 |
+
- Trains on the same RKSIa data — this is small-scale demo, not OpenSky-scale.
|
| 24 |
+
- Output: a `pretrained_encoder.pt` checkpoint loadable by v6 fine-tune.
|
| 25 |
+
|
| 26 |
+
Decision criterion at fine-tune time:
|
| 27 |
+
- Significant FDE improvement at ≥30% past-track dropout (test-time).
|
| 28 |
+
- No regression at 0% dropout.
|
| 29 |
+
"""
|
| 30 |
+
from __future__ import annotations
|
| 31 |
+
import argparse
|
| 32 |
+
import copy
|
| 33 |
+
import json
|
| 34 |
+
import math
|
| 35 |
+
import os
|
| 36 |
+
import shutil
|
| 37 |
+
import time
|
| 38 |
+
|
| 39 |
+
import numpy as np
|
| 40 |
+
import pandas as pd
|
| 41 |
+
import torch
|
| 42 |
+
import torch.nn as nn
|
| 43 |
+
import torch.nn.functional as F
|
| 44 |
+
from torch.utils.data import Dataset, DataLoader
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
import trackio
|
| 48 |
+
HAS_TRACKIO = True
|
| 49 |
+
except ImportError:
|
| 50 |
+
HAS_TRACKIO = False
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
# ============================================================================
|
| 54 |
+
# DATA UTILITIES (inlined from train_v2_prod.py for self-contained job)
|
| 55 |
+
# ============================================================================
|
| 56 |
+
|
| 57 |
+
def load_atfm(dset_name, mode, path):
|
| 58 |
+
variables = ["X", "Y", "Z"]
|
| 59 |
+
data, labels = [], None
|
| 60 |
+
for var in variables:
|
| 61 |
+
df = pd.read_csv(os.path.join(path, f"{dset_name}_{mode}_{var}.tsv"),
|
| 62 |
+
sep="\t", header=None, na_values="NaN")
|
| 63 |
+
if labels is None:
|
| 64 |
+
labels = df.values[:, 0]
|
| 65 |
+
data.append(df.values[:, 1:])
|
| 66 |
+
return np.stack(data, axis=-1), labels.astype(int)
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def compute_features(traj_xyz: np.ndarray) -> np.ndarray:
|
| 70 |
+
if traj_xyz.shape[0] < 2:
|
| 71 |
+
T = traj_xyz.shape[0]
|
| 72 |
+
return np.concatenate([
|
| 73 |
+
traj_xyz, np.zeros((T, 3), dtype=traj_xyz.dtype),
|
| 74 |
+
np.zeros((T, 3), dtype=traj_xyz.dtype)
|
| 75 |
+
], axis=1)
|
| 76 |
+
x, y, z = traj_xyz[:, 0], traj_xyz[:, 1], traj_xyz[:, 2]
|
| 77 |
+
diff = np.diff(traj_xyz, axis=0)
|
| 78 |
+
norms = np.maximum(np.linalg.norm(diff, axis=1, keepdims=True), 1e-8)
|
| 79 |
+
u = diff / norms
|
| 80 |
+
u = np.vstack([u, u[-1:]])
|
| 81 |
+
r = np.sqrt(x ** 2 + y ** 2)
|
| 82 |
+
theta = np.arctan2(y, x)
|
| 83 |
+
return np.column_stack([
|
| 84 |
+
traj_xyz, u,
|
| 85 |
+
r[:, None], np.sin(theta)[:, None], np.cos(theta)[:, None]
|
| 86 |
+
]).astype(np.float32)
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def ensure_data(airport: str, data_dir: str = "data"):
|
| 90 |
+
target = os.path.join(data_dir, airport)
|
| 91 |
+
if os.path.isdir(target) and any(f.endswith(".tsv") for f in os.listdir(target)):
|
| 92 |
+
return target
|
| 93 |
+
print(f"[data] downloading {airport} from HF ...")
|
| 94 |
+
from huggingface_hub import snapshot_download
|
| 95 |
+
snap = snapshot_download(
|
| 96 |
+
"petchthwr/ATFMTraj",
|
| 97 |
+
repo_type="dataset",
|
| 98 |
+
allow_patterns=[f"{airport}/*"],
|
| 99 |
+
)
|
| 100 |
+
os.makedirs(data_dir, exist_ok=True)
|
| 101 |
+
src = os.path.join(snap, airport)
|
| 102 |
+
if not os.path.isdir(target):
|
| 103 |
+
shutil.copytree(src, target)
|
| 104 |
+
return target
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# ============================================================================
|
| 108 |
+
# DATASET — full past-track windows (no Δ / target)
|
| 109 |
+
# ============================================================================
|
| 110 |
+
|
| 111 |
+
class PastTrackDataset(Dataset):
|
| 112 |
+
"""
|
| 113 |
+
Yields fixed-length past-track windows for masked-prediction pretraining.
|
| 114 |
+
|
| 115 |
+
Per __getitem__:
|
| 116 |
+
- sample a random window of length past_len from a trajectory
|
| 117 |
+
- return its 9-dim features padded to past_max
|
| 118 |
+
"""
|
| 119 |
+
|
| 120 |
+
def __init__(self, airport, mode, data_dir,
|
| 121 |
+
past_max=256, past_min=128,
|
| 122 |
+
seed=0, epoch_multiplier=4):
|
| 123 |
+
ensure_data(airport, data_dir)
|
| 124 |
+
airport_dir = os.path.join(data_dir, airport)
|
| 125 |
+
raw, labels = load_atfm(airport, mode, airport_dir)
|
| 126 |
+
|
| 127 |
+
self.past_max = past_max
|
| 128 |
+
self.past_min = past_min
|
| 129 |
+
self.epoch_multiplier = epoch_multiplier
|
| 130 |
+
self.rng_seed = seed
|
| 131 |
+
|
| 132 |
+
lengths = np.array(
|
| 133 |
+
[int(np.sum(~np.isnan(raw[i, :, 0]))) for i in range(raw.shape[0])],
|
| 134 |
+
dtype=np.int64,
|
| 135 |
+
)
|
| 136 |
+
keep = lengths >= past_min + 1
|
| 137 |
+
raw = raw[keep]
|
| 138 |
+
lengths = lengths[keep]
|
| 139 |
+
|
| 140 |
+
self.positions = []
|
| 141 |
+
for i in range(raw.shape[0]):
|
| 142 |
+
L = int(lengths[i])
|
| 143 |
+
self.positions.append(np.nan_to_num(raw[i, :L], nan=0.0).astype(np.float32))
|
| 144 |
+
del raw
|
| 145 |
+
self.n_traj = len(self.positions)
|
| 146 |
+
print(f"[data] {airport}/{mode}: {self.n_traj} trajectories")
|
| 147 |
+
|
| 148 |
+
def __len__(self):
|
| 149 |
+
return self.n_traj * self.epoch_multiplier
|
| 150 |
+
|
| 151 |
+
def __getitem__(self, idx):
|
| 152 |
+
traj_idx = idx % self.n_traj
|
| 153 |
+
rng = np.random.default_rng(self.rng_seed + idx * 9173)
|
| 154 |
+
positions = self.positions[traj_idx]
|
| 155 |
+
L = positions.shape[0]
|
| 156 |
+
past_len = min(self.past_max, L)
|
| 157 |
+
start = int(rng.integers(0, max(1, L - past_len + 1)))
|
| 158 |
+
window = positions[start:start + past_len]
|
| 159 |
+
feats = compute_features(window)
|
| 160 |
+
T = feats.shape[0]
|
| 161 |
+
feat_pad = np.zeros((self.past_max, 9), dtype=np.float32)
|
| 162 |
+
feat_pad[:T] = feats
|
| 163 |
+
return {
|
| 164 |
+
"features": torch.from_numpy(feat_pad),
|
| 165 |
+
"length": torch.tensor(T, dtype=torch.long),
|
| 166 |
+
}
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
# ============================================================================
|
| 170 |
+
# MODEL — encoder + EMA target + predictor (no decoder, no Δ)
|
| 171 |
+
# ============================================================================
|
| 172 |
+
|
| 173 |
+
class LearnablePosEnc(nn.Module):
|
| 174 |
+
def __init__(self, max_len, d_model):
|
| 175 |
+
super().__init__()
|
| 176 |
+
self.pe = nn.Parameter(torch.randn(1, max_len, d_model) * 0.02)
|
| 177 |
+
def forward(self, x):
|
| 178 |
+
return x + self.pe[:, :x.size(1)]
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
class PatchTokenizer(nn.Module):
|
| 182 |
+
def __init__(self, in_channels=9, d_model=256, patch_size=8, max_patches=64):
|
| 183 |
+
super().__init__()
|
| 184 |
+
self.patch_size = patch_size
|
| 185 |
+
self.d_model = d_model
|
| 186 |
+
self.embed = nn.Sequential(
|
| 187 |
+
nn.Conv1d(in_channels, d_model // 2, 5, padding=2),
|
| 188 |
+
nn.GELU(),
|
| 189 |
+
nn.Conv1d(d_model // 2, d_model, 3, padding=1),
|
| 190 |
+
nn.GELU(),
|
| 191 |
+
)
|
| 192 |
+
self.pos_enc = LearnablePosEnc(max_patches, d_model)
|
| 193 |
+
self.norm = nn.LayerNorm(d_model)
|
| 194 |
+
|
| 195 |
+
def forward(self, features, lengths):
|
| 196 |
+
B, T, C = features.shape
|
| 197 |
+
h = self.embed(features.transpose(1, 2))
|
| 198 |
+
N = max(1, T // self.patch_size)
|
| 199 |
+
h = h[:, :, :N * self.patch_size]
|
| 200 |
+
h = h.reshape(B, self.d_model, N, self.patch_size).mean(-1)
|
| 201 |
+
h = h.transpose(1, 2)
|
| 202 |
+
h = self.norm(self.pos_enc(h))
|
| 203 |
+
patch_lengths = (lengths.float() / self.patch_size).clamp(min=1).long()
|
| 204 |
+
patch_lengths = patch_lengths.clamp(max=N)
|
| 205 |
+
return h, patch_lengths
|
| 206 |
+
|
| 207 |
+
|
| 208 |
+
class CausalEncoder(nn.Module):
|
| 209 |
+
def __init__(self, d_model=256, n_heads=8, n_layers=4, d_ff=1024, dropout=0.1):
|
| 210 |
+
super().__init__()
|
| 211 |
+
layer = nn.TransformerEncoderLayer(
|
| 212 |
+
d_model=d_model, nhead=n_heads, dim_feedforward=d_ff,
|
| 213 |
+
dropout=dropout, activation="gelu", batch_first=True,
|
| 214 |
+
norm_first=True,
|
| 215 |
+
)
|
| 216 |
+
self.tf = nn.TransformerEncoder(layer, num_layers=n_layers)
|
| 217 |
+
self.norm = nn.LayerNorm(d_model)
|
| 218 |
+
|
| 219 |
+
def forward(self, x, key_padding_mask, attn_mask=None):
|
| 220 |
+
N = x.size(1)
|
| 221 |
+
if attn_mask is None:
|
| 222 |
+
# Default: causal. For pretraining we may pass full bidirectional.
|
| 223 |
+
attn_mask = torch.triu(
|
| 224 |
+
torch.ones(N, N, dtype=torch.bool, device=x.device), diagonal=1
|
| 225 |
+
)
|
| 226 |
+
return self.norm(
|
| 227 |
+
self.tf(x, mask=attn_mask, src_key_padding_mask=key_padding_mask)
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
class JEPAPredictor(nn.Module):
|
| 232 |
+
"""Predict target patch latents from context patch latents.
|
| 233 |
+
Adds a query token per masked position via positional embedding."""
|
| 234 |
+
def __init__(self, d_model=256, pred_dim=128, max_patches=64, dropout=0.1):
|
| 235 |
+
super().__init__()
|
| 236 |
+
self.proj_in = nn.Linear(d_model, pred_dim)
|
| 237 |
+
self.target_pe = nn.Parameter(torch.randn(1, max_patches, pred_dim) * 0.02)
|
| 238 |
+
layer = nn.TransformerEncoderLayer(
|
| 239 |
+
d_model=pred_dim, nhead=4, dim_feedforward=pred_dim * 2,
|
| 240 |
+
dropout=dropout, activation="gelu", batch_first=True, norm_first=True,
|
| 241 |
+
)
|
| 242 |
+
self.tf = nn.TransformerEncoder(layer, num_layers=2)
|
| 243 |
+
self.proj_out = nn.Linear(pred_dim, d_model)
|
| 244 |
+
self.norm = nn.LayerNorm(d_model)
|
| 245 |
+
|
| 246 |
+
def forward(self, ctx_latents, ctx_idx, tgt_idx):
|
| 247 |
+
"""
|
| 248 |
+
ctx_latents: (B, N_ctx, d_model)
|
| 249 |
+
ctx_idx: (B, N_ctx) original positions of context patches
|
| 250 |
+
tgt_idx: (B, N_tgt) original positions of target patches
|
| 251 |
+
returns: (B, N_tgt, d_model) predicted target latents
|
| 252 |
+
"""
|
| 253 |
+
B = ctx_latents.size(0)
|
| 254 |
+
d_pred = self.target_pe.size(-1)
|
| 255 |
+
# Project context to pred_dim and add target positional embeddings
|
| 256 |
+
ctx_p = self.proj_in(ctx_latents) # (B, N_ctx, d_pred)
|
| 257 |
+
# Gather target PEs at the masked positions
|
| 258 |
+
tgt_pe = self.target_pe.expand(B, -1, -1) # (B, max_patches, d_pred)
|
| 259 |
+
tgt_idx_expanded = tgt_idx.unsqueeze(-1).expand(-1, -1, d_pred)
|
| 260 |
+
tgt_q = torch.gather(tgt_pe, 1, tgt_idx_expanded) # (B, N_tgt, d_pred)
|
| 261 |
+
# Concatenate and run transformer
|
| 262 |
+
h = torch.cat([ctx_p, tgt_q], dim=1) # (B, N_ctx+N_tgt, d_pred)
|
| 263 |
+
h = self.tf(h)
|
| 264 |
+
# Take only the target-position outputs
|
| 265 |
+
N_ctx = ctx_p.size(1)
|
| 266 |
+
h_tgt = h[:, N_ctx:]
|
| 267 |
+
return self.norm(self.proj_out(h_tgt))
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
def make_block_mask(B: int, N: int, mask_ratio: float, rng: np.random.Generator,
|
| 271 |
+
device, min_visible: int = 4):
|
| 272 |
+
"""
|
| 273 |
+
Sample a contiguous block mask per batch element.
|
| 274 |
+
Returns:
|
| 275 |
+
ctx_idx: list of LongTensors (variable length per sample)
|
| 276 |
+
tgt_idx: list of LongTensors
|
| 277 |
+
For batched processing we'll right-pad and provide separate masks.
|
| 278 |
+
"""
|
| 279 |
+
ctx_idxs = []
|
| 280 |
+
tgt_idxs = []
|
| 281 |
+
for _ in range(B):
|
| 282 |
+
n_mask = max(1, int(round(N * mask_ratio)))
|
| 283 |
+
n_mask = min(n_mask, N - min_visible)
|
| 284 |
+
# Random contiguous block start
|
| 285 |
+
if N - n_mask <= 0:
|
| 286 |
+
start = 0
|
| 287 |
+
n_mask = N - min_visible
|
| 288 |
+
else:
|
| 289 |
+
start = int(rng.integers(0, N - n_mask + 1))
|
| 290 |
+
all_idx = np.arange(N)
|
| 291 |
+
tgt_mask = (all_idx >= start) & (all_idx < start + n_mask)
|
| 292 |
+
ctx_idxs.append(torch.tensor(all_idx[~tgt_mask], dtype=torch.long, device=device))
|
| 293 |
+
tgt_idxs.append(torch.tensor(all_idx[tgt_mask], dtype=torch.long, device=device))
|
| 294 |
+
return ctx_idxs, tgt_idxs
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def gather_by_indices(x: torch.Tensor, idx_list: list[torch.Tensor], pad_value=0.0):
|
| 298 |
+
"""x: (B, N, D). idx_list: per-batch index tensors. Returns (B, N_max, D) padded
|
| 299 |
+
plus a (B, N_max) mask of which entries are real."""
|
| 300 |
+
B = x.size(0); D = x.size(-1)
|
| 301 |
+
N_max = max((idx.numel() for idx in idx_list), default=1)
|
| 302 |
+
out = torch.full((B, N_max, D), pad_value, device=x.device, dtype=x.dtype)
|
| 303 |
+
mask = torch.zeros((B, N_max), dtype=torch.bool, device=x.device)
|
| 304 |
+
for b in range(B):
|
| 305 |
+
idx = idx_list[b]
|
| 306 |
+
n = idx.numel()
|
| 307 |
+
if n > 0:
|
| 308 |
+
out[b, :n] = x[b, idx]
|
| 309 |
+
mask[b, :n] = True
|
| 310 |
+
return out, mask
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
def gather_indices_only(idx_list: list[torch.Tensor], device):
|
| 314 |
+
"""Pack a list of LongTensors into (B, N_max) padded with zeros."""
|
| 315 |
+
B = len(idx_list)
|
| 316 |
+
N_max = max((idx.numel() for idx in idx_list), default=1)
|
| 317 |
+
out = torch.zeros((B, N_max), dtype=torch.long, device=device)
|
| 318 |
+
mask = torch.zeros((B, N_max), dtype=torch.bool, device=device)
|
| 319 |
+
for b in range(B):
|
| 320 |
+
idx = idx_list[b]
|
| 321 |
+
n = idx.numel()
|
| 322 |
+
if n > 0:
|
| 323 |
+
out[b, :n] = idx
|
| 324 |
+
mask[b, :n] = True
|
| 325 |
+
return out, mask
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
# ============================================================================
|
| 329 |
+
# PRETRAIN MODULE
|
| 330 |
+
# ============================================================================
|
| 331 |
+
|
| 332 |
+
class FlightJEPAPretrain(nn.Module):
|
| 333 |
+
def __init__(self, cfg):
|
| 334 |
+
super().__init__()
|
| 335 |
+
self.cfg = cfg
|
| 336 |
+
d = cfg.get("d_model", 256)
|
| 337 |
+
h_ = cfg.get("n_heads", 8)
|
| 338 |
+
n_l = cfg.get("n_layers", 4)
|
| 339 |
+
d_ff = cfg.get("d_ff", 1024)
|
| 340 |
+
dr = cfg.get("dropout", 0.1)
|
| 341 |
+
ps = cfg.get("patch_size", 8)
|
| 342 |
+
past_max = cfg.get("past_max", 256)
|
| 343 |
+
max_patches = past_max // ps
|
| 344 |
+
pred_dim = cfg.get("pred_dim", 128)
|
| 345 |
+
self.ema_decay = cfg.get("ema_decay", 0.998)
|
| 346 |
+
self.max_patches = max_patches
|
| 347 |
+
|
| 348 |
+
self.tokenizer = PatchTokenizer(9, d, ps, max_patches)
|
| 349 |
+
self.encoder = CausalEncoder(d, h_, n_l, d_ff, dr)
|
| 350 |
+
self.predictor = JEPAPredictor(d, pred_dim, max_patches, dr)
|
| 351 |
+
|
| 352 |
+
self.target_tokenizer = copy.deepcopy(self.tokenizer)
|
| 353 |
+
self.target_encoder = copy.deepcopy(self.encoder)
|
| 354 |
+
for p in self.target_tokenizer.parameters():
|
| 355 |
+
p.requires_grad = False
|
| 356 |
+
for p in self.target_encoder.parameters():
|
| 357 |
+
p.requires_grad = False
|
| 358 |
+
|
| 359 |
+
@torch.no_grad()
|
| 360 |
+
def update_ema(self):
|
| 361 |
+
m = self.ema_decay
|
| 362 |
+
for online, target in [(self.tokenizer, self.target_tokenizer),
|
| 363 |
+
(self.encoder, self.target_encoder)]:
|
| 364 |
+
for po, pt in zip(online.parameters(), target.parameters()):
|
| 365 |
+
pt.data.mul_(m).add_(po.data, alpha=1.0 - m)
|
| 366 |
+
|
| 367 |
+
def forward(self, features, lengths, mask_ratio: float, rng: np.random.Generator):
|
| 368 |
+
"""
|
| 369 |
+
Mask a contiguous block of patches per sample. Encode visible context.
|
| 370 |
+
Predict masked-patch latents. Compare to EMA target encoder over the
|
| 371 |
+
full sequence at masked positions.
|
| 372 |
+
|
| 373 |
+
Pretraining uses *bidirectional* attention (no causal mask) — at
|
| 374 |
+
fine-tune time we restore the causal mask. This gives the encoder
|
| 375 |
+
more signal during pretraining; the encoder's transformer layers are
|
| 376 |
+
not architecturally causal, only the mask passed in changes the mode.
|
| 377 |
+
"""
|
| 378 |
+
B = features.size(0)
|
| 379 |
+
device = features.device
|
| 380 |
+
|
| 381 |
+
# Tokenize for online (will be partially masked) and target (full).
|
| 382 |
+
patches_full, patch_lens = self.tokenizer(features, lengths)
|
| 383 |
+
N = patches_full.size(1)
|
| 384 |
+
|
| 385 |
+
# Padding mask (token absent because past sequence shorter than N*patch).
|
| 386 |
+
pad_mask = (torch.arange(N, device=device).unsqueeze(0)
|
| 387 |
+
>= patch_lens.unsqueeze(1)) # True where padded
|
| 388 |
+
|
| 389 |
+
# Sample contiguous masks per sample (drawing only from valid patches).
|
| 390 |
+
ctx_idx_list, tgt_idx_list = [], []
|
| 391 |
+
for b in range(B):
|
| 392 |
+
n_valid = int(patch_lens[b].item())
|
| 393 |
+
if n_valid < 8: # too short to mask meaningfully
|
| 394 |
+
ctx_idx_list.append(torch.arange(n_valid, device=device))
|
| 395 |
+
tgt_idx_list.append(torch.tensor([], dtype=torch.long, device=device))
|
| 396 |
+
continue
|
| 397 |
+
n_mask = max(2, int(round(n_valid * mask_ratio)))
|
| 398 |
+
n_mask = min(n_mask, n_valid - 4) # keep at least 4 visible
|
| 399 |
+
start = int(rng.integers(0, n_valid - n_mask + 1))
|
| 400 |
+
all_idx = torch.arange(n_valid, device=device)
|
| 401 |
+
tgt_mask = (all_idx >= start) & (all_idx < start + n_mask)
|
| 402 |
+
ctx_idx_list.append(all_idx[~tgt_mask])
|
| 403 |
+
tgt_idx_list.append(all_idx[tgt_mask])
|
| 404 |
+
|
| 405 |
+
# Skip if any batch element produced no targets (e.g., very short sequences)
|
| 406 |
+
n_targets = sum(int(t.numel()) for t in tgt_idx_list)
|
| 407 |
+
if n_targets == 0:
|
| 408 |
+
return torch.tensor(0.0, device=device, requires_grad=True)
|
| 409 |
+
|
| 410 |
+
# Pack context indices and gather context tokens. We pass through
|
| 411 |
+
# the same encoder with a key_padding_mask that hides the masked
|
| 412 |
+
# positions plus the original padding.
|
| 413 |
+
# Easier: re-run encoder on a *new* tensor consisting only of context
|
| 414 |
+
# tokens with bidirectional attention.
|
| 415 |
+
N_ctx_max = max((idx.numel() for idx in ctx_idx_list), default=1)
|
| 416 |
+
ctx_tokens = torch.zeros((B, N_ctx_max, patches_full.size(-1)),
|
| 417 |
+
device=device, dtype=patches_full.dtype)
|
| 418 |
+
ctx_kpm = torch.ones((B, N_ctx_max), dtype=torch.bool, device=device) # True=pad
|
| 419 |
+
for b in range(B):
|
| 420 |
+
idx = ctx_idx_list[b]
|
| 421 |
+
n = idx.numel()
|
| 422 |
+
if n > 0:
|
| 423 |
+
ctx_tokens[b, :n] = patches_full[b, idx]
|
| 424 |
+
ctx_kpm[b, :n] = False
|
| 425 |
+
|
| 426 |
+
# Bidirectional attention for pretraining (full mask).
|
| 427 |
+
bi_mask = torch.zeros((N_ctx_max, N_ctx_max), dtype=torch.bool, device=device)
|
| 428 |
+
ctx_encoded = self.encoder(ctx_tokens, key_padding_mask=ctx_kpm,
|
| 429 |
+
attn_mask=bi_mask)
|
| 430 |
+
|
| 431 |
+
# Build target-index packed tensors
|
| 432 |
+
tgt_idx_packed, tgt_idx_mask = gather_indices_only(tgt_idx_list, device)
|
| 433 |
+
# Build dummy "context indices in original layout" — we need to tell
|
| 434 |
+
# the predictor where the context tokens live (their original patch
|
| 435 |
+
# positions). Add positional info to ctx through a side embedding —
|
| 436 |
+
# we can use the same target_pe table for context too.
|
| 437 |
+
# Simpler: encode their original position as a query-like input by
|
| 438 |
+
# *pre-adding* a positional token to ctx encoded representation.
|
| 439 |
+
# The predictor only needs target PEs — context already carries pos
|
| 440 |
+
# info via the patch tokenizer's pos_enc, so we don't need to add
|
| 441 |
+
# context indices.
|
| 442 |
+
|
| 443 |
+
# Predict target latents.
|
| 444 |
+
pred = self.predictor(ctx_encoded, ctx_idx=None, tgt_idx=tgt_idx_packed)
|
| 445 |
+
# Pad target predictions to N_tgt_max already done by gather_indices_only
|
| 446 |
+
|
| 447 |
+
# Targets: run the EMA target encoder on the *full* sequence
|
| 448 |
+
# (causal mask, like fine-tune time) and gather at target positions.
|
| 449 |
+
with torch.no_grad():
|
| 450 |
+
tgt_patches, _ = self.target_tokenizer(features, lengths)
|
| 451 |
+
tgt_encoded = self.target_encoder(tgt_patches, key_padding_mask=pad_mask)
|
| 452 |
+
tgt_latents, _ = gather_by_indices(tgt_encoded, tgt_idx_list)
|
| 453 |
+
|
| 454 |
+
# L1 loss in latent space, masked over valid targets
|
| 455 |
+
loss_per = F.l1_loss(pred, tgt_latents, reduction="none").mean(-1) # (B, N_tgt_max)
|
| 456 |
+
loss = (loss_per * tgt_idx_mask.float()).sum() / tgt_idx_mask.sum().clamp(min=1)
|
| 457 |
+
return loss
|
| 458 |
+
|
| 459 |
+
|
| 460 |
+
# ============================================================================
|
| 461 |
+
# TRAIN LOOP
|
| 462 |
+
# ============================================================================
|
| 463 |
+
|
| 464 |
+
def device_pick(arg=None):
|
| 465 |
+
if arg:
|
| 466 |
+
return arg
|
| 467 |
+
if torch.cuda.is_available():
|
| 468 |
+
return "cuda"
|
| 469 |
+
if torch.backends.mps.is_available():
|
| 470 |
+
return "mps"
|
| 471 |
+
return "cpu"
|
| 472 |
+
|
| 473 |
+
|
| 474 |
+
def train_one_epoch(model, loader, optimizer, device, mask_ratio_lo, mask_ratio_hi,
|
| 475 |
+
log_every=50, grad_clip=1.0, rng=None):
|
| 476 |
+
model.train()
|
| 477 |
+
sums = {"loss": 0.0, "n": 0}
|
| 478 |
+
t0 = time.time()
|
| 479 |
+
rng = rng or np.random.default_rng()
|
| 480 |
+
n_batches = len(loader) if hasattr(loader, "__len__") else 0
|
| 481 |
+
for bi, batch in enumerate(loader):
|
| 482 |
+
feats = batch["features"].to(device)
|
| 483 |
+
lens = batch["length"].to(device)
|
| 484 |
+
mr = float(rng.uniform(mask_ratio_lo, mask_ratio_hi))
|
| 485 |
+
loss = model(feats, lens, mask_ratio=mr, rng=rng)
|
| 486 |
+
optimizer.zero_grad()
|
| 487 |
+
loss.backward()
|
| 488 |
+
torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
|
| 489 |
+
optimizer.step()
|
| 490 |
+
model.update_ema()
|
| 491 |
+
bs = feats.size(0)
|
| 492 |
+
sums["loss"] += loss.item() * bs
|
| 493 |
+
sums["n"] += bs
|
| 494 |
+
if (bi + 1) % log_every == 0 or bi == 0:
|
| 495 |
+
dt = time.time() - t0
|
| 496 |
+
print(f" [batch {bi+1}/{n_batches}] {dt:.1f}s elapsed, "
|
| 497 |
+
f"mr={mr:.2f}, loss={loss.item():.4f}", flush=True)
|
| 498 |
+
n = max(sums["n"], 1)
|
| 499 |
+
return {"loss": sums["loss"] / n}
|
| 500 |
+
|
| 501 |
+
|
| 502 |
+
def main():
|
| 503 |
+
p = argparse.ArgumentParser()
|
| 504 |
+
p.add_argument("--airport", default="RKSIa")
|
| 505 |
+
p.add_argument("--data-dir", default="data")
|
| 506 |
+
p.add_argument("--tag", default="v7-pretrain")
|
| 507 |
+
p.add_argument("--out-dir", default="runs")
|
| 508 |
+
p.add_argument("--epochs", type=int, default=60)
|
| 509 |
+
p.add_argument("--batch-size", type=int, default=64)
|
| 510 |
+
p.add_argument("--lr", type=float, default=1.5e-4)
|
| 511 |
+
p.add_argument("--weight-decay", type=float, default=1e-4)
|
| 512 |
+
p.add_argument("--past-max", type=int, default=256)
|
| 513 |
+
p.add_argument("--past-min", type=int, default=128)
|
| 514 |
+
p.add_argument("--epoch-multiplier", type=int, default=2)
|
| 515 |
+
p.add_argument("--ema-decay", type=float, default=0.998)
|
| 516 |
+
p.add_argument("--d-model", type=int, default=256)
|
| 517 |
+
p.add_argument("--n-layers", type=int, default=4)
|
| 518 |
+
p.add_argument("--n-heads", type=int, default=8)
|
| 519 |
+
p.add_argument("--patch-size", type=int, default=8)
|
| 520 |
+
p.add_argument("--pred-dim", type=int, default=128)
|
| 521 |
+
p.add_argument("--mask-ratio-lo", type=float, default=0.3)
|
| 522 |
+
p.add_argument("--mask-ratio-hi", type=float, default=0.7)
|
| 523 |
+
p.add_argument("--seed", type=int, default=0)
|
| 524 |
+
p.add_argument("--num-workers", type=int, default=2)
|
| 525 |
+
p.add_argument("--push-to-hub", action="store_true")
|
| 526 |
+
p.add_argument("--hub-model-id", default=None)
|
| 527 |
+
p.add_argument("--trackio-name", default=None)
|
| 528 |
+
args = p.parse_args()
|
| 529 |
+
|
| 530 |
+
torch.manual_seed(args.seed)
|
| 531 |
+
np.random.seed(args.seed)
|
| 532 |
+
rng = np.random.default_rng(args.seed)
|
| 533 |
+
|
| 534 |
+
device = device_pick()
|
| 535 |
+
print(f"[v7-pretrain] device={device} tag={args.tag}", flush=True)
|
| 536 |
+
if device == "cuda":
|
| 537 |
+
print(f"[v7-pretrain] cuda: {torch.cuda.get_device_name(0)} "
|
| 538 |
+
f"vram={torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB",
|
| 539 |
+
flush=True)
|
| 540 |
+
if HAS_TRACKIO and args.trackio_name:
|
| 541 |
+
trackio.init(project="flight-jepa-v7-pretrain",
|
| 542 |
+
name=args.trackio_name, config=vars(args))
|
| 543 |
+
|
| 544 |
+
train_ds = PastTrackDataset(
|
| 545 |
+
airport=args.airport, mode="TRAIN", data_dir=args.data_dir,
|
| 546 |
+
past_max=args.past_max, past_min=args.past_min,
|
| 547 |
+
seed=args.seed, epoch_multiplier=args.epoch_multiplier,
|
| 548 |
+
)
|
| 549 |
+
train_dl = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True,
|
| 550 |
+
num_workers=args.num_workers, pin_memory=True,
|
| 551 |
+
drop_last=True)
|
| 552 |
+
|
| 553 |
+
cfg = {
|
| 554 |
+
"d_model": args.d_model, "n_heads": args.n_heads,
|
| 555 |
+
"n_layers": args.n_layers, "d_ff": args.d_model * 4,
|
| 556 |
+
"dropout": 0.1, "patch_size": args.patch_size,
|
| 557 |
+
"past_max": args.past_max, "ema_decay": args.ema_decay,
|
| 558 |
+
"pred_dim": args.pred_dim,
|
| 559 |
+
}
|
| 560 |
+
model = FlightJEPAPretrain(cfg).to(device)
|
| 561 |
+
n_params = sum(p.numel() for p in model.parameters())
|
| 562 |
+
print(f"[v7-pretrain] params={n_params/1e6:.2f}M")
|
| 563 |
+
|
| 564 |
+
optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr,
|
| 565 |
+
weight_decay=args.weight_decay)
|
| 566 |
+
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
|
| 567 |
+
|
| 568 |
+
os.makedirs(args.out_dir, exist_ok=True)
|
| 569 |
+
history = []
|
| 570 |
+
for epoch in range(args.epochs):
|
| 571 |
+
t0 = time.time()
|
| 572 |
+
stats = train_one_epoch(
|
| 573 |
+
model, train_dl, optimizer, device,
|
| 574 |
+
mask_ratio_lo=args.mask_ratio_lo,
|
| 575 |
+
mask_ratio_hi=args.mask_ratio_hi,
|
| 576 |
+
rng=rng,
|
| 577 |
+
)
|
| 578 |
+
scheduler.step()
|
| 579 |
+
elapsed = time.time() - t0
|
| 580 |
+
print(f"[v7-pretrain] ep {epoch+1:03d} loss={stats['loss']:.4f} | {elapsed:.0f}s",
|
| 581 |
+
flush=True)
|
| 582 |
+
history.append({"epoch": epoch + 1, "loss": stats["loss"], "elapsed_s": elapsed})
|
| 583 |
+
if HAS_TRACKIO and args.trackio_name:
|
| 584 |
+
trackio.log({"pretrain/loss": stats["loss"]}, step=epoch + 1)
|
| 585 |
+
|
| 586 |
+
out_path = os.path.join(args.out_dir, f"{args.tag}.pt")
|
| 587 |
+
torch.save({
|
| 588 |
+
"encoder_state_dict": model.encoder.state_dict(),
|
| 589 |
+
"tokenizer_state_dict": model.tokenizer.state_dict(),
|
| 590 |
+
"config": cfg, "args": vars(args),
|
| 591 |
+
"history": history,
|
| 592 |
+
}, out_path)
|
| 593 |
+
print(f"[v7-pretrain] saved {out_path}")
|
| 594 |
+
|
| 595 |
+
if args.push_to_hub and args.hub_model_id:
|
| 596 |
+
try:
|
| 597 |
+
from huggingface_hub import HfApi
|
| 598 |
+
api = HfApi()
|
| 599 |
+
api.create_repo(args.hub_model_id, exist_ok=True)
|
| 600 |
+
api.upload_file(path_or_fileobj=out_path,
|
| 601 |
+
path_in_repo=f"{args.tag}.pt",
|
| 602 |
+
repo_id=args.hub_model_id)
|
| 603 |
+
print(f"[v7-pretrain] uploaded to {args.hub_model_id}")
|
| 604 |
+
except Exception as e:
|
| 605 |
+
print(f"[v7-pretrain] hub upload failed: {e}")
|
| 606 |
+
|
| 607 |
+
if HAS_TRACKIO and args.trackio_name:
|
| 608 |
+
trackio.finish()
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
if __name__ == "__main__":
|
| 612 |
+
main()
|