guychuk commited on
Commit
842fb21
·
verified ·
1 Parent(s): 032a560

v5: add scheduled sampling for autoregressive rollout stability

Browse files
Files changed (1) hide show
  1. train_v5_prod.py +831 -0
train_v5_prod.py ADDED
@@ -0,0 +1,831 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.10"
3
+ # dependencies = [
4
+ # "torch>=2.1",
5
+ # "numpy",
6
+ # "pandas",
7
+ # "scikit-learn",
8
+ # "huggingface-hub",
9
+ # "trackio",
10
+ # ]
11
+ # ///
12
+ """
13
+ Flight-JEPA v2 — bundled training script for HF Jobs.
14
+
15
+ Self-contained: downloads the dataset from HF, trains either the supervised
16
+ baseline (`--lambda-jepa 0`) or the JEPA-augmented model, runs blindspot
17
+ scoring + extrapolation eval, and pushes the result to a hub repo.
18
+
19
+ Usage (HF Jobs):
20
+ python train_v2_prod.py --tag baseline --lambda-jepa 0.0 \
21
+ --hub-model-id guychuk/flight-jepa-v2 --push-to-hub
22
+
23
+ python train_v2_prod.py --tag jepa --lambda-jepa 0.5 \
24
+ --hub-model-id guychuk/flight-jepa-v2 --push-to-hub
25
+ """
26
+ from __future__ import annotations
27
+ import argparse
28
+ import copy
29
+ import json
30
+ import math
31
+ import os
32
+ import shutil
33
+ import sys
34
+ import time
35
+
36
+ import numpy as np
37
+ import pandas as pd
38
+ import torch
39
+ import torch.nn as nn
40
+ import torch.nn.functional as F
41
+ from torch.utils.data import Dataset, DataLoader
42
+
43
+ try:
44
+ import trackio
45
+ HAS_TRACKIO = True
46
+ except ImportError:
47
+ HAS_TRACKIO = False
48
+
49
+
50
+ # ============================================================================
51
+ # DATA UTILITIES (inlined from flight_jepa.data)
52
+ # ============================================================================
53
+
54
+ def load_atfm(dset_name, mode, path):
55
+ variables = ["X", "Y", "Z"]
56
+ data, labels = [], None
57
+ for var in variables:
58
+ df = pd.read_csv(os.path.join(path, f"{dset_name}_{mode}_{var}.tsv"),
59
+ sep="\t", header=None, na_values="NaN")
60
+ if labels is None:
61
+ labels = df.values[:, 0]
62
+ data.append(df.values[:, 1:])
63
+ return np.stack(data, axis=-1), labels.astype(int)
64
+
65
+
66
+ def compute_features(traj_xyz: np.ndarray) -> np.ndarray:
67
+ if traj_xyz.shape[0] < 2:
68
+ T = traj_xyz.shape[0]
69
+ return np.concatenate([
70
+ traj_xyz, np.zeros((T, 3), dtype=traj_xyz.dtype),
71
+ np.zeros((T, 3), dtype=traj_xyz.dtype)
72
+ ], axis=1)
73
+ x, y, z = traj_xyz[:, 0], traj_xyz[:, 1], traj_xyz[:, 2]
74
+ diff = np.diff(traj_xyz, axis=0)
75
+ norms = np.maximum(np.linalg.norm(diff, axis=1, keepdims=True), 1e-8)
76
+ u = diff / norms
77
+ u = np.vstack([u, u[-1:]])
78
+ r = np.sqrt(x ** 2 + y ** 2)
79
+ theta = np.arctan2(y, x)
80
+ return np.column_stack([
81
+ traj_xyz, u,
82
+ r[:, None], np.sin(theta)[:, None], np.cos(theta)[:, None]
83
+ ]).astype(np.float32)
84
+
85
+
86
+ def ensure_data(airport: str, data_dir: str = "data"):
87
+ target = os.path.join(data_dir, airport)
88
+ if os.path.isdir(target) and any(f.endswith(".tsv") for f in os.listdir(target)):
89
+ return target
90
+ print(f"[data] downloading {airport} from HF ...")
91
+ from huggingface_hub import snapshot_download
92
+ snap = snapshot_download(
93
+ "petchthwr/ATFMTraj",
94
+ repo_type="dataset",
95
+ allow_patterns=[f"{airport}/*"],
96
+ )
97
+ os.makedirs(data_dir, exist_ok=True)
98
+ src = os.path.join(snap, airport)
99
+ if not os.path.isdir(target):
100
+ shutil.copytree(src, target)
101
+ return target
102
+
103
+
104
+ # ============================================================================
105
+ # DATASET — variable-length blindspot
106
+ # ============================================================================
107
+
108
+ PAD_VALUE = 0.0
109
+
110
+
111
+ class BlindspotDataset(Dataset):
112
+ def __init__(self, airport, mode, data_dir,
113
+ past_max=256, past_min=60,
114
+ delta_min=30, delta_max=120,
115
+ seed=0, epoch_multiplier=4):
116
+ ensure_data(airport, data_dir)
117
+ airport_dir = os.path.join(data_dir, airport)
118
+ raw, labels = load_atfm(airport, mode, airport_dir)
119
+
120
+ self.past_max = past_max
121
+ self.past_min = past_min
122
+ self.delta_min = delta_min
123
+ self.delta_max = delta_max
124
+ self.epoch_multiplier = epoch_multiplier
125
+ self.rng_seed = seed
126
+
127
+ lengths = np.array(
128
+ [int(np.sum(~np.isnan(raw[i, :, 0]))) for i in range(raw.shape[0])],
129
+ dtype=np.int64,
130
+ )
131
+ min_required = past_min + delta_max + 1
132
+ keep = lengths >= min_required
133
+ if keep.sum() == 0:
134
+ raise RuntimeError(
135
+ f"No trajectories of length >= {min_required} in {airport}/{mode}"
136
+ )
137
+ raw = raw[keep]
138
+ lengths = lengths[keep]
139
+ self.labels = labels[keep].astype(np.int64)
140
+
141
+ self.positions = []
142
+ for i in range(raw.shape[0]):
143
+ L = int(lengths[i])
144
+ self.positions.append(np.nan_to_num(raw[i, :L], nan=0.0).astype(np.float32))
145
+ del raw
146
+
147
+ self.n_traj = len(self.positions)
148
+ print(f"[data] {airport}/{mode}: {self.n_traj} trajectories "
149
+ f"(after filtering for L >= {min_required})")
150
+
151
+ def __len__(self):
152
+ return self.n_traj * self.epoch_multiplier
153
+
154
+ def __getitem__(self, idx):
155
+ traj_idx = idx % self.n_traj
156
+ rng = np.random.default_rng(self.rng_seed + idx * 9173)
157
+ positions = self.positions[traj_idx]
158
+ L = positions.shape[0]
159
+ delta = int(rng.integers(self.delta_min, self.delta_max + 1))
160
+ t_in_max = L - delta - 1
161
+ t_in_min = self.past_min
162
+ t_in = int(rng.integers(t_in_min, t_in_max + 1))
163
+
164
+ past_start = max(0, t_in - self.past_max)
165
+ past_pos = positions[past_start:t_in]
166
+ target_pos = positions[t_in:t_in + delta]
167
+
168
+ past_features = compute_features(past_pos)
169
+ T_past = past_features.shape[0]
170
+ feat_pad = np.full((self.past_max, 9), PAD_VALUE, dtype=np.float32)
171
+ feat_pad[:T_past] = past_features
172
+ tgt_pad = np.zeros((self.delta_max, 3), dtype=np.float32)
173
+ tgt_pad[:delta] = target_pos
174
+ return {
175
+ "past_features": torch.from_numpy(feat_pad),
176
+ "past_length": torch.tensor(T_past, dtype=torch.long),
177
+ "target_pos": torch.from_numpy(tgt_pad),
178
+ "delta": torch.tensor(delta, dtype=torch.long),
179
+ "label": torch.tensor(int(self.labels[traj_idx]), dtype=torch.long),
180
+ }
181
+
182
+
183
+ # ============================================================================
184
+ # MODEL
185
+ # ============================================================================
186
+
187
+ def sinusoidal_embedding(values, dim):
188
+ half = dim // 2
189
+ device = values.device
190
+ freqs = torch.exp(-math.log(10000.0)
191
+ * torch.arange(half, device=device) / half)
192
+ angles = values.float().unsqueeze(-1) * freqs
193
+ emb = torch.cat([torch.sin(angles), torch.cos(angles)], dim=-1)
194
+ if dim % 2 == 1:
195
+ emb = F.pad(emb, (0, 1))
196
+ return emb
197
+
198
+
199
+ class LearnablePosEnc(nn.Module):
200
+ def __init__(self, max_len, d_model):
201
+ super().__init__()
202
+ self.pe = nn.Parameter(torch.randn(1, max_len, d_model) * 0.02)
203
+ def forward(self, x):
204
+ return x + self.pe[:, :x.size(1)]
205
+
206
+
207
+ class PatchTokenizer(nn.Module):
208
+ def __init__(self, in_channels=9, d_model=256, patch_size=8, max_patches=64):
209
+ super().__init__()
210
+ self.patch_size = patch_size
211
+ self.d_model = d_model
212
+ self.embed = nn.Sequential(
213
+ nn.Conv1d(in_channels, d_model // 2, 5, padding=2),
214
+ nn.GELU(),
215
+ nn.Conv1d(d_model // 2, d_model, 3, padding=1),
216
+ nn.GELU(),
217
+ )
218
+ self.pos_enc = LearnablePosEnc(max_patches, d_model)
219
+ self.norm = nn.LayerNorm(d_model)
220
+
221
+ def forward(self, features, lengths):
222
+ B, T, C = features.shape
223
+ h = self.embed(features.transpose(1, 2))
224
+ N = max(1, T // self.patch_size)
225
+ h = h[:, :, :N * self.patch_size]
226
+ h = h.reshape(B, self.d_model, N, self.patch_size).mean(-1)
227
+ h = h.transpose(1, 2)
228
+ h = self.norm(self.pos_enc(h))
229
+ patch_lengths = (lengths.float() / self.patch_size).clamp(min=1).long()
230
+ patch_lengths = patch_lengths.clamp(max=N)
231
+ return h, patch_lengths
232
+
233
+
234
+ class CausalEncoder(nn.Module):
235
+ def __init__(self, d_model=256, n_heads=8, n_layers=4, d_ff=1024, dropout=0.1):
236
+ super().__init__()
237
+ layer = nn.TransformerEncoderLayer(
238
+ d_model=d_model, nhead=n_heads, dim_feedforward=d_ff,
239
+ dropout=dropout, activation="gelu", batch_first=True,
240
+ norm_first=True,
241
+ )
242
+ self.tf = nn.TransformerEncoder(layer, num_layers=n_layers)
243
+ self.norm = nn.LayerNorm(d_model)
244
+
245
+ def forward(self, x, key_padding_mask):
246
+ N = x.size(1)
247
+ causal_mask = torch.triu(
248
+ torch.ones(N, N, dtype=torch.bool, device=x.device), diagonal=1
249
+ )
250
+ return self.norm(
251
+ self.tf(x, mask=causal_mask, src_key_padding_mask=key_padding_mask)
252
+ )
253
+
254
+
255
+ def last_valid_token(encoded, patch_lengths):
256
+ B, N, D = encoded.shape
257
+ idx = (patch_lengths - 1).clamp(min=0).view(B, 1, 1).expand(-1, 1, D)
258
+ return encoded.gather(1, idx).squeeze(1)
259
+
260
+
261
+ class DeltaEmbedding(nn.Module):
262
+ def __init__(self, d_model=256, d_freq=64):
263
+ super().__init__()
264
+ self.d_freq = d_freq
265
+ self.proj = nn.Sequential(
266
+ nn.Linear(d_freq * 2, d_model),
267
+ nn.GELU(),
268
+ nn.Linear(d_model, d_model),
269
+ )
270
+ def forward(self, delta, t_past):
271
+ d_emb = sinusoidal_embedding(delta.float(), self.d_freq)
272
+ rel = delta.float() / t_past.float().clamp(min=1.0)
273
+ rel_emb = sinusoidal_embedding(rel * 100.0, self.d_freq)
274
+ return self.proj(torch.cat([d_emb, rel_emb], dim=-1))
275
+
276
+
277
+ class GaussianHead(nn.Module):
278
+ def __init__(self, d_model=256, d_hidden=256):
279
+ super().__init__()
280
+ self.net = nn.Sequential(
281
+ nn.Linear(d_model, d_hidden), nn.GELU(),
282
+ nn.Linear(d_hidden, d_hidden), nn.GELU(),
283
+ )
284
+ self.mu_head = nn.Linear(d_hidden, 3)
285
+ self.log_sigma_head = nn.Linear(d_hidden, 3)
286
+ self.rho_head = nn.Linear(d_hidden, 1)
287
+
288
+ def forward(self, h):
289
+ z = self.net(h)
290
+ delta_mu = self.mu_head(z)
291
+ log_sigma = self.log_sigma_head(z).clamp(min=-7.0, max=2.0)
292
+ rho = torch.tanh(self.rho_head(z)).squeeze(-1) * 0.99
293
+ return delta_mu, log_sigma, rho
294
+
295
+
296
+ def gaussian_nll_xyz(true_delta, mu, log_sigma, rho, beta: float = 0.5):
297
+ """
298
+ β-NLL Gaussian for (x, y, z) — bivariate on xy + independent z.
299
+
300
+ Standard NLL has a degenerate minimum where σ→0 ("σ-collapse",
301
+ Detlefsen 2019). β-NLL (Seitzer et al., arxiv:2203.09168) reweights
302
+ each sample's NLL by σ^{2β} (detached) so points with large σ get
303
+ proportionally more gradient on the mean term, preventing collapse.
304
+
305
+ β = 0 → standard NLL (collapse-prone, what v2 used)
306
+ β = 0.5 → recommended; preserves uncertainty learning
307
+ β = 1 → pure squared-error scaling (loses σ learning)
308
+ """
309
+ sx = log_sigma[:, 0].exp()
310
+ sy = log_sigma[:, 1].exp()
311
+ sz = log_sigma[:, 2].exp()
312
+ dx = true_delta[:, 0] - mu[:, 0]
313
+ dy = true_delta[:, 1] - mu[:, 1]
314
+ dz = true_delta[:, 2] - mu[:, 2]
315
+ omr2 = (1.0 - rho * rho).clamp(min=1e-6)
316
+ z2 = (((dx / sx) ** 2)
317
+ - 2.0 * rho * (dx / sx) * (dy / sy)
318
+ + ((dy / sy) ** 2)) / omr2
319
+ log_det = 2.0 * (log_sigma[:, 0] + log_sigma[:, 1]) + torch.log(omr2)
320
+ nll_xy = 0.5 * (z2 + log_det + 2.0 * math.log(2.0 * math.pi))
321
+ nll_z = 0.5 * ((dz / sz) ** 2 + 2.0 * log_sigma[:, 2]
322
+ + math.log(2.0 * math.pi))
323
+
324
+ if beta > 0.0:
325
+ # Detached per-sample weights: σ^{2β}. Weight is treated as constant
326
+ # during backward, so it rescales the gradient without participating
327
+ # in optimization.
328
+ # For xy use geometric-mean σ; for z use σz directly.
329
+ sxy = (sx * sy).sqrt().detach()
330
+ wxy = sxy.pow(2.0 * beta)
331
+ wz = sz.detach().pow(2.0 * beta)
332
+ return wxy * nll_xy + wz * nll_z
333
+ return nll_xy + nll_z
334
+
335
+
336
+ class FuturePredictor(nn.Module):
337
+ def __init__(self, d_model=256, pred_dim=128, dropout=0.1):
338
+ super().__init__()
339
+ self.proj_in = nn.Linear(d_model * 2, pred_dim)
340
+ layer = nn.TransformerEncoderLayer(
341
+ d_model=pred_dim, nhead=4, dim_feedforward=pred_dim * 2,
342
+ dropout=dropout, activation="gelu", batch_first=True, norm_first=True,
343
+ )
344
+ self.tf = nn.TransformerEncoder(layer, num_layers=2)
345
+ self.proj_out = nn.Linear(pred_dim, d_model)
346
+ self.norm = nn.LayerNorm(d_model)
347
+
348
+ def forward(self, z_in, delta_emb):
349
+ h = self.proj_in(torch.cat([z_in, delta_emb], dim=-1)).unsqueeze(1)
350
+ h = self.tf(h)
351
+ return self.norm(self.proj_out(h.squeeze(1)))
352
+
353
+
354
+ class FlightJEPAv2(nn.Module):
355
+ def __init__(self, cfg):
356
+ super().__init__()
357
+ self.cfg = cfg
358
+ d = cfg.get("d_model", 256)
359
+ h_ = cfg.get("n_heads", 8)
360
+ n_l = cfg.get("n_layers", 4)
361
+ d_ff = cfg.get("d_ff", 1024)
362
+ dr = cfg.get("dropout", 0.1)
363
+ ps = cfg.get("patch_size", 8)
364
+ past_max = cfg.get("past_max", 256)
365
+ max_patches = past_max // ps
366
+ self.lambda_jepa = cfg.get("lambda_jepa", 0.0)
367
+ self.ema_decay = cfg.get("ema_decay", 0.998)
368
+ self.beta_nll = cfg.get("beta_nll", 0.5)
369
+
370
+ self.tokenizer = PatchTokenizer(9, d, ps, max_patches)
371
+ self.encoder = CausalEncoder(d, h_, n_l, d_ff, dr)
372
+ self.delta_emb = DeltaEmbedding(d, 64)
373
+ self.head = GaussianHead(d, d)
374
+ self.fuse_in = nn.Sequential(
375
+ nn.Linear(d * 2, d), nn.GELU(),
376
+ nn.Linear(d, d),
377
+ )
378
+ self.step_cell = nn.GRUCell(input_size=3, hidden_size=d)
379
+
380
+ self.target_tokenizer = copy.deepcopy(self.tokenizer)
381
+ self.target_encoder = copy.deepcopy(self.encoder)
382
+ for p in self.target_tokenizer.parameters():
383
+ p.requires_grad = False
384
+ for p in self.target_encoder.parameters():
385
+ p.requires_grad = False
386
+ self.predictor = FuturePredictor(d, d // 2, dr)
387
+
388
+ @torch.no_grad()
389
+ def update_ema(self):
390
+ m = self.ema_decay
391
+ for online, target in [(self.tokenizer, self.target_tokenizer),
392
+ (self.encoder, self.target_encoder)]:
393
+ for po, pt in zip(online.parameters(), target.parameters()):
394
+ pt.data.mul_(m).add_(po.data, alpha=1.0 - m)
395
+
396
+ def encode_past(self, past_features, past_length):
397
+ patches, patch_lens = self.tokenizer(past_features, past_length)
398
+ N = patches.size(1)
399
+ pad_mask = (torch.arange(N, device=patches.device).unsqueeze(0)
400
+ >= patch_lens.unsqueeze(1))
401
+ encoded = self.encoder(patches, key_padding_mask=pad_mask)
402
+ z_in = last_valid_token(encoded, patch_lens)
403
+ return z_in, encoded, patch_lens
404
+
405
+ @torch.no_grad()
406
+ def encode_future_target(self, target_features, target_length):
407
+ patches, patch_lens = self.target_tokenizer(target_features, target_length)
408
+ N = patches.size(1)
409
+ pad_mask = (torch.arange(N, device=patches.device).unsqueeze(0)
410
+ >= patch_lens.unsqueeze(1))
411
+ encoded = self.target_encoder(patches, key_padding_mask=pad_mask)
412
+ return last_valid_token(encoded, patch_lens)
413
+
414
+ def forward(self, past_features, past_length, target_pos, delta, last_pos,
415
+ ss_prob: float = 0.0):
416
+ """
417
+ ss_prob: scheduled-sampling probability ∈ [0, 1]. With this probability
418
+ per (batch element, timestep), the *predicted* delta replaces
419
+ the *true* delta in the recurrence. NLL loss is always against
420
+ truth — only the GRU input + prev_pos accumulator are mixed.
421
+ """
422
+ B = past_features.size(0)
423
+ device = past_features.device
424
+ delta_max = target_pos.size(1)
425
+
426
+ z_in, _, _ = self.encode_past(past_features, past_length)
427
+ delta_e = self.delta_emb(delta, past_length)
428
+ h = self.fuse_in(torch.cat([z_in, delta_e], dim=-1))
429
+
430
+ prev_pos = last_pos
431
+ nll_total = torch.zeros(B, device=device)
432
+ valid_steps = torch.zeros(B, device=device)
433
+ ade_total = torch.zeros(B, device=device)
434
+
435
+ for t in range(delta_max):
436
+ delta_mu, log_sigma, rho = self.head(h)
437
+ true_pos_t = target_pos[:, t]
438
+ true_delta = true_pos_t - prev_pos
439
+
440
+ # NLL computed always vs truth.
441
+ nll = gaussian_nll_xyz(true_delta, delta_mu, log_sigma, rho,
442
+ beta=self.beta_nll)
443
+ mask = (t < delta).float()
444
+ nll_total = nll_total + nll * mask
445
+ ade_total = (ade_total
446
+ + (true_delta - delta_mu).pow(2).sum(-1).sqrt() * mask)
447
+ valid_steps = valid_steps + mask
448
+
449
+ # Scheduled-sampling: with prob ss_prob, feed predicted delta instead
450
+ # of true delta into the recurrence. Sampled per (batch, step).
451
+ if ss_prob > 0.0 and self.training:
452
+ use_pred = (torch.rand(B, device=device) < ss_prob).float().unsqueeze(-1)
453
+ # Use predicted mean as "what we would do at inference time".
454
+ # Detach so the prev_pos accumulator gradient doesn't recurse.
455
+ fed_delta = use_pred * delta_mu.detach() + (1 - use_pred) * true_delta
456
+ fed_pos = use_pred * (prev_pos + delta_mu.detach()) + (1 - use_pred) * true_pos_t
457
+ else:
458
+ fed_delta = true_delta
459
+ fed_pos = true_pos_t
460
+
461
+ h = self.step_cell(fed_delta, h)
462
+ prev_pos = fed_pos
463
+
464
+ nll_loss = (nll_total / valid_steps.clamp(min=1.0)).mean()
465
+ ade_train = (ade_total / valid_steps.clamp(min=1.0)).mean().detach()
466
+
467
+ losses = {"nll": nll_loss, "ade_train": ade_train, "total": nll_loss}
468
+
469
+ if self.lambda_jepa > 0.0:
470
+ tgt_feat = torch.zeros(B, delta_max, 9, device=device)
471
+ tgt_feat[..., :3] = target_pos
472
+ z_target = self.encode_future_target(tgt_feat, delta)
473
+ z_pred = self.predictor(z_in, delta_e)
474
+ jepa_loss = F.l1_loss(z_pred, z_target.detach())
475
+ losses["jepa"] = jepa_loss
476
+ losses["total"] = nll_loss + self.lambda_jepa * jepa_loss
477
+
478
+ return losses
479
+
480
+ @torch.no_grad()
481
+ def rollout(self, past_features, past_length, delta, last_pos, delta_max):
482
+ B = past_features.size(0)
483
+ device = past_features.device
484
+ z_in, _, _ = self.encode_past(past_features, past_length)
485
+ delta_e = self.delta_emb(delta, past_length)
486
+ h = self.fuse_in(torch.cat([z_in, delta_e], dim=-1))
487
+ prev_pos = last_pos
488
+ mu_pos = torch.zeros(B, delta_max, 3, device=device)
489
+ sigma = torch.zeros(B, delta_max, 3, device=device)
490
+ rho_out = torch.zeros(B, delta_max, device=device)
491
+ for t in range(delta_max):
492
+ delta_mu, log_sigma, rho = self.head(h)
493
+ cur_pos = prev_pos + delta_mu
494
+ mu_pos[:, t] = cur_pos
495
+ sigma[:, t] = log_sigma.exp()
496
+ rho_out[:, t] = rho
497
+ h = self.step_cell(delta_mu, h)
498
+ prev_pos = cur_pos
499
+ return mu_pos, sigma, rho_out
500
+
501
+
502
+ # ============================================================================
503
+ # TRAIN + SCORE
504
+ # ============================================================================
505
+
506
+ RMAX_KM = 120.0
507
+ DELTA_BUCKETS = [(30, 60), (60, 90), (90, 120)]
508
+ EXTRAP_DELTAS = [180, 300]
509
+ THRESH_M = [500.0, 1000.0, 2000.0]
510
+
511
+
512
+ def get_last_pos(past_features, past_length):
513
+ B = past_features.size(0)
514
+ idx = (past_length - 1).clamp(min=0)
515
+ return past_features[torch.arange(B, device=past_features.device), idx, :3]
516
+
517
+
518
+ def train_one_epoch(model, loader, optimizer, device, grad_clip=1.0,
519
+ log_every: int = 50, ss_prob: float = 0.0):
520
+ model.train()
521
+ sums = {"nll": 0.0, "ade": 0.0, "jepa": 0.0, "total": 0.0, "n": 0}
522
+ t0 = time.time()
523
+ n_batches = len(loader) if hasattr(loader, "__len__") else 0
524
+ for bi, batch in enumerate(loader):
525
+ past_f = batch["past_features"].to(device)
526
+ past_l = batch["past_length"].to(device)
527
+ target = batch["target_pos"].to(device)
528
+ delta = batch["delta"].to(device)
529
+ last_pos = get_last_pos(past_f, past_l)
530
+ losses = model(past_f, past_l, target, delta, last_pos,
531
+ ss_prob=ss_prob)
532
+ optimizer.zero_grad()
533
+ losses["total"].backward()
534
+ torch.nn.utils.clip_grad_norm_(model.parameters(), grad_clip)
535
+ optimizer.step()
536
+ if model.lambda_jepa > 0.0:
537
+ model.update_ema()
538
+ bs = past_f.size(0)
539
+ sums["nll"] += losses["nll"].item() * bs
540
+ sums["ade"] += losses["ade_train"].item() * bs
541
+ if "jepa" in losses:
542
+ sums["jepa"] += losses["jepa"].item() * bs
543
+ sums["total"] += losses["total"].item() * bs
544
+ sums["n"] += bs
545
+
546
+ if (bi + 1) % log_every == 0 or bi == 0:
547
+ dt = time.time() - t0
548
+ rate = (bi + 1) / max(dt, 0.001)
549
+ print(f" [batch {bi+1}/{n_batches}] {dt:.1f}s elapsed, "
550
+ f"{rate:.1f} batch/s, loss={losses['total'].item():.4f}",
551
+ flush=True)
552
+ n = max(sums["n"], 1)
553
+ return {k: v / n for k, v in sums.items() if k != "n"} | {
554
+ "ade_train": sums["ade"] / n
555
+ }
556
+
557
+
558
+ @torch.no_grad()
559
+ def score_loader(model, loader, device, extrap_delta=None):
560
+ model.train(False)
561
+ delta_max_dataset = loader.dataset.delta_max
562
+ per_sample = []
563
+ for batch in loader:
564
+ past_f = batch["past_features"].to(device)
565
+ past_l = batch["past_length"].to(device)
566
+ target = batch["target_pos"].to(device)
567
+ delta = batch["delta"].to(device)
568
+ last_pos = get_last_pos(past_f, past_l)
569
+ if extrap_delta is not None:
570
+ forced = torch.full_like(delta, extrap_delta)
571
+ roll_len = extrap_delta
572
+ else:
573
+ forced = delta
574
+ roll_len = int(delta.max().item())
575
+ if roll_len > delta_max_dataset:
576
+ continue
577
+ mu_pos, sigma, rho = model.rollout(past_f, past_l, forced, last_pos, roll_len)
578
+ active_len = torch.minimum(forced, delta).clamp(min=1)
579
+ for i in range(past_f.size(0)):
580
+ L = int(active_len[i].item())
581
+ per_sample.append({
582
+ "mu": mu_pos[i, :L].cpu().numpy(),
583
+ "sigma": sigma[i, :L].cpu().numpy(),
584
+ "rho": rho[i, :L].cpu().numpy(),
585
+ "target": target[i, :L].cpu().numpy(),
586
+ "delta_orig": int(delta[i].item()),
587
+ })
588
+
589
+ if not per_sample:
590
+ return {}
591
+ ades, fdes = [], []
592
+ in_circle = {t: [] for t in THRESH_M}
593
+ nlls, coverage95, delta_orig = [], [], []
594
+ for s in per_sample:
595
+ diff = s["target"] - s["mu"]
596
+ per_step_l2 = np.linalg.norm(diff, axis=1) * RMAX_KM * 1000.0
597
+ ades.append(per_step_l2.mean())
598
+ fdes.append(per_step_l2[-1])
599
+ for t in THRESH_M:
600
+ in_circle[t].append(per_step_l2[-1] <= t)
601
+ sx = max(s["sigma"][-1, 0], 1e-9)
602
+ sy = max(s["sigma"][-1, 1], 1e-9)
603
+ sz = max(s["sigma"][-1, 2], 1e-9)
604
+ rho_xy = s["rho"][-1]
605
+ dx = diff[-1, 0]; dy = diff[-1, 1]; dz = diff[-1, 2]
606
+ omr2 = max(1.0 - rho_xy * rho_xy, 1e-6)
607
+ z2 = ((dx / sx) ** 2 - 2 * rho_xy * (dx / sx) * (dy / sy)
608
+ + (dy / sy) ** 2) / omr2
609
+ coverage95.append(z2 <= 5.991)
610
+ log_det = 2 * (math.log(sx) + math.log(sy)) + math.log(omr2)
611
+ nll_xy = 0.5 * (z2 + log_det + 2 * math.log(2 * math.pi))
612
+ nll_z = 0.5 * ((dz / sz) ** 2 + 2 * math.log(sz) + math.log(2 * math.pi))
613
+ nlls.append(nll_xy + nll_z)
614
+ delta_orig.append(s["delta_orig"])
615
+ ades = np.array(ades); fdes = np.array(fdes)
616
+ nlls = np.array(nlls); coverage95 = np.array(coverage95, dtype=float)
617
+ delta_orig = np.array(delta_orig)
618
+ out = {
619
+ "ade_m": float(ades.mean()),
620
+ "fde_m": float(fdes.mean()),
621
+ "fde_median_m": float(np.median(fdes)),
622
+ "nll_xy_z": float(nlls.mean()),
623
+ "coverage_95": float(coverage95.mean()),
624
+ "n": len(ades),
625
+ }
626
+ for t in THRESH_M:
627
+ out[f"miss_rate_{int(t)}m"] = float(1.0 - np.mean(in_circle[t]))
628
+ if extrap_delta is None:
629
+ per_bucket = {}
630
+ for lo, hi in DELTA_BUCKETS:
631
+ mask = (delta_orig >= lo) & (delta_orig <= hi)
632
+ if mask.sum() == 0:
633
+ continue
634
+ per_bucket[f"delta_{lo}_{hi}"] = {
635
+ "ade_m": float(ades[mask].mean()),
636
+ "fde_m": float(fdes[mask].mean()),
637
+ "coverage_95": float(coverage95[mask].mean()),
638
+ "n": int(mask.sum()),
639
+ }
640
+ out["per_bucket"] = per_bucket
641
+ return out
642
+
643
+
644
+ def main():
645
+ p = argparse.ArgumentParser()
646
+ p.add_argument("--airport", default="RKSIa")
647
+ p.add_argument("--data-dir", default="data")
648
+ p.add_argument("--tag", default="run")
649
+ p.add_argument("--out-dir", default="runs")
650
+ p.add_argument("--epochs", type=int, default=30)
651
+ p.add_argument("--batch-size", type=int, default=64)
652
+ p.add_argument("--lr", type=float, default=1e-4)
653
+ p.add_argument("--weight-decay", type=float, default=1e-4)
654
+ p.add_argument("--past-max", type=int, default=256)
655
+ p.add_argument("--past-min", type=int, default=60)
656
+ p.add_argument("--delta-min", type=int, default=30)
657
+ p.add_argument("--delta-max", type=int, default=120)
658
+ p.add_argument("--extrap-delta-max", type=int, default=300)
659
+ p.add_argument("--epoch-multiplier", type=int, default=4)
660
+ p.add_argument("--lambda-jepa", type=float, default=0.0)
661
+ p.add_argument("--ema-decay", type=float, default=0.998)
662
+ p.add_argument("--beta-nll", type=float, default=0.5,
663
+ help="β-NLL exponent (Seitzer 2022). 0=plain NLL, 0.5=recommended.")
664
+ p.add_argument("--ss-max", type=float, default=0.0,
665
+ help="Max scheduled-sampling probability (0=teacher-forcing only, 0.5=Bengio recommended).")
666
+ p.add_argument("--ss-warmup-frac", type=float, default=0.5,
667
+ help="Fraction of training over which ss_prob ramps from 0 to ss_max linearly.")
668
+ p.add_argument("--d-model", type=int, default=256)
669
+ p.add_argument("--n-layers", type=int, default=4)
670
+ p.add_argument("--n-heads", type=int, default=8)
671
+ p.add_argument("--patch-size", type=int, default=8)
672
+ p.add_argument("--seed", type=int, default=0)
673
+ p.add_argument("--num-workers", type=int, default=2)
674
+ p.add_argument("--push-to-hub", action="store_true")
675
+ p.add_argument("--hub-model-id", default=None)
676
+ p.add_argument("--trackio-name", default=None)
677
+ args = p.parse_args()
678
+
679
+ torch.manual_seed(args.seed)
680
+ np.random.seed(args.seed)
681
+
682
+ device = "cuda" if torch.cuda.is_available() else "cpu"
683
+ print(f"[v2] device={device} tag={args.tag} "
684
+ f"lambda_jepa={args.lambda_jepa} beta_nll={args.beta_nll} "
685
+ f"ss_max={args.ss_max} ss_warmup_frac={args.ss_warmup_frac}",
686
+ flush=True)
687
+ if device == "cuda":
688
+ print(f"[v2] cuda device: {torch.cuda.get_device_name(0)} "
689
+ f"vram={torch.cuda.get_device_properties(0).total_memory/1e9:.1f}GB",
690
+ flush=True)
691
+ else:
692
+ print("[v2] WARNING: CUDA not available, training on CPU. "
693
+ "This will be very slow.", flush=True)
694
+
695
+ if HAS_TRACKIO and args.trackio_name:
696
+ trackio.init(project="flight-jepa-v2", name=args.trackio_name,
697
+ config=vars(args))
698
+
699
+ train_ds = BlindspotDataset(
700
+ airport=args.airport, mode="TRAIN", data_dir=args.data_dir,
701
+ past_max=args.past_max, past_min=args.past_min,
702
+ delta_min=args.delta_min, delta_max=args.delta_max,
703
+ seed=args.seed, epoch_multiplier=args.epoch_multiplier,
704
+ )
705
+ test_ds = BlindspotDataset(
706
+ airport=args.airport, mode="TEST", data_dir=args.data_dir,
707
+ past_max=args.past_max, past_min=args.past_min,
708
+ delta_min=args.delta_min, delta_max=args.delta_max,
709
+ seed=args.seed + 1, epoch_multiplier=1,
710
+ )
711
+ extrap_ds = BlindspotDataset(
712
+ airport=args.airport, mode="TEST", data_dir=args.data_dir,
713
+ past_max=args.past_max, past_min=args.past_min,
714
+ delta_min=args.delta_min, delta_max=args.extrap_delta_max,
715
+ seed=args.seed + 99, epoch_multiplier=1,
716
+ )
717
+
718
+ train_dl = DataLoader(train_ds, batch_size=args.batch_size, shuffle=True,
719
+ num_workers=args.num_workers, pin_memory=True,
720
+ drop_last=True)
721
+ test_dl = DataLoader(test_ds, batch_size=args.batch_size, shuffle=False,
722
+ num_workers=args.num_workers, pin_memory=True)
723
+ extrap_dl = DataLoader(extrap_ds, batch_size=args.batch_size, shuffle=False,
724
+ num_workers=args.num_workers, pin_memory=True)
725
+
726
+ cfg = {
727
+ "d_model": args.d_model, "n_heads": args.n_heads,
728
+ "n_layers": args.n_layers, "d_ff": args.d_model * 4,
729
+ "dropout": 0.1, "patch_size": args.patch_size,
730
+ "past_max": args.past_max, "lambda_jepa": args.lambda_jepa,
731
+ "ema_decay": args.ema_decay, "beta_nll": args.beta_nll,
732
+ }
733
+ model = FlightJEPAv2(cfg).to(device)
734
+ n_params = sum(p.numel() for p in model.parameters())
735
+ print(f"[v2] params={n_params/1e6:.2f}M")
736
+
737
+ optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr,
738
+ weight_decay=args.weight_decay)
739
+ scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)
740
+
741
+ os.makedirs(args.out_dir, exist_ok=True)
742
+ history = []
743
+ best_fde = float("inf")
744
+ best_state = None
745
+
746
+ for epoch in range(args.epochs):
747
+ t0 = time.time()
748
+ # Linear ramp ss_prob: 0 → ss_max over args.ss_warmup_frac of training,
749
+ # then hold at ss_max.
750
+ warmup_epochs = max(1, int(args.epochs * args.ss_warmup_frac))
751
+ ss_prob = min(args.ss_max,
752
+ args.ss_max * (epoch + 1) / warmup_epochs)
753
+ train_stats = train_one_epoch(model, train_dl, optimizer, device,
754
+ ss_prob=ss_prob)
755
+ scheduler.step()
756
+
757
+ score_stats = None
758
+ if (epoch + 1) % 5 == 0 or epoch == args.epochs - 1:
759
+ score_stats = score_loader(model, test_dl, device)
760
+ if score_stats and score_stats["fde_m"] < best_fde:
761
+ best_fde = score_stats["fde_m"]
762
+ best_state = {k: v.detach().cpu().clone()
763
+ for k, v in model.state_dict().items()}
764
+
765
+ elapsed = time.time() - t0
766
+ log = {
767
+ "epoch": epoch + 1, "elapsed_s": elapsed,
768
+ "lr": optimizer.param_groups[0]["lr"],
769
+ "train": train_stats, "score": score_stats,
770
+ }
771
+ history.append(log)
772
+ msg = (f"[v2] ep {epoch+1:03d} | loss={train_stats['total']:.4f} "
773
+ f"nll={train_stats['nll']:.4f} ade_t={train_stats['ade_train']:.4f} "
774
+ f"jepa={train_stats['jepa']:.4f} ss={ss_prob:.2f}")
775
+ if score_stats:
776
+ msg += f" | fde={score_stats['fde_m']:.0f}m ade={score_stats['ade_m']:.0f}m"
777
+ msg += f" | {elapsed:.0f}s"
778
+ print(msg, flush=True)
779
+
780
+ if HAS_TRACKIO and args.trackio_name:
781
+ tlog = {f"train/{k}": v for k, v in train_stats.items()}
782
+ if score_stats:
783
+ tlog.update({f"test/{k}": v for k, v in score_stats.items()
784
+ if isinstance(v, (int, float))})
785
+ trackio.log(tlog, step=epoch + 1)
786
+
787
+ final = {"in_distribution": score_loader(model, test_dl, device)}
788
+ for d in EXTRAP_DELTAS:
789
+ final[f"extrap_delta_{d}"] = score_loader(model, extrap_dl, device, extrap_delta=d)
790
+
791
+ if best_state is not None:
792
+ model.load_state_dict(best_state)
793
+
794
+ out_path = os.path.join(args.out_dir, f"{args.tag}.pt")
795
+ torch.save({
796
+ "state_dict": model.state_dict(),
797
+ "config": cfg, "args": vars(args),
798
+ "history": history, "final": final,
799
+ "best_fde_m": best_fde,
800
+ }, out_path)
801
+ print(f"[v2] saved {out_path}")
802
+
803
+ summary_path = os.path.join(args.out_dir, f"{args.tag}_summary.json")
804
+ with open(summary_path, "w") as f:
805
+ json.dump({
806
+ "tag": args.tag, "lambda_jepa": args.lambda_jepa,
807
+ "beta_nll": args.beta_nll,
808
+ "n_params": n_params, "best_fde_m": best_fde,
809
+ "final": final, "args": vars(args),
810
+ }, f, indent=2, default=float)
811
+ print(f"[v2] summary -> {summary_path}", flush=True)
812
+
813
+ if args.push_to_hub and args.hub_model_id:
814
+ try:
815
+ from huggingface_hub import HfApi
816
+ api = HfApi()
817
+ api.create_repo(args.hub_model_id, exist_ok=True)
818
+ for path, fname in [(out_path, f"{args.tag}.pt"),
819
+ (summary_path, f"{args.tag}_summary.json")]:
820
+ api.upload_file(path_or_fileobj=path, path_in_repo=fname,
821
+ repo_id=args.hub_model_id)
822
+ print(f"[v2] uploaded to {args.hub_model_id}")
823
+ except Exception as e:
824
+ print(f"[v2] hub upload failed: {e}")
825
+
826
+ if HAS_TRACKIO and args.trackio_name:
827
+ trackio.finish()
828
+
829
+
830
+ if __name__ == "__main__":
831
+ main()