| """ |
| Qwen3 + Titans Deep Integration (v3) - BABILong QA1 (32k) |
| |
| Key improvements over v1/v2: |
| 1. Deep Attention Integration: Memory participates in attention K/V computation |
| 2. Cross-chunk gradient flow: Support detach_mem_state=False for better long-term learning |
| 3. Memory-Augmented Attention: Memory as additional context source |
| 4. Dual-path architecture: Both attention-level and layer-level memory integration |
| 5. Better memory state management with optional gradient flow |
| """ |
|
|
| import os |
| import sys |
|
|
| |
| |
| |
| os.environ["TRANSFORMERS_NO_TORCHAO"] = "1" |
|
|
| |
| class _MockTorchAO: |
| def __getattr__(self, name): |
| return _MockTorchAO() |
| def __call__(self, *args, **kwargs): |
| return _MockTorchAO() |
|
|
| sys.modules['torchao'] = _MockTorchAO() |
| sys.modules['torchao.quantization'] = _MockTorchAO() |
|
|
| import json |
| import math |
| import argparse |
| import logging |
| import weakref |
| from contextlib import nullcontext |
| from dataclasses import dataclass, asdict, field |
| from typing import Optional, Dict, Any, List, Tuple, Callable |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torch.distributed as dist |
| from torch.utils.data import Dataset, DataLoader |
| from torch.optim import AdamW |
| from torch.optim.lr_scheduler import CosineAnnealingLR |
| from torch.nn.parallel import DistributedDataParallel as DDP |
| from tqdm import tqdm |
|
|
| from einops import rearrange, repeat |
|
|
| |
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) |
|
|
| |
| from titans_pytorch import NeuralMemory, MemoryMLP |
| from titans_pytorch.neural_memory import NeuralMemState |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s - %(levelname)s - %(message)s" |
| ) |
| logger = logging.getLogger(__name__) |
|
|
|
|
| |
| |
| |
|
|
| @dataclass |
| class TrainingConfig: |
| |
| model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554" |
| data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json" |
| output_dir: str = "./outputs/qwen_titans_babilong_v3" |
|
|
| |
| num_epochs: int = 10 |
| batch_size: int = 1 |
| gradient_accumulation_steps: int = 16 |
| max_grad_norm: float = 1.0 |
|
|
| |
| lr_memory: float = 1e-4 |
| lr_memory_attention: float = 5e-5 |
| lr_pretrained: float = 5e-6 |
| weight_decay: float = 0.01 |
| warmup_steps: int = 100 |
|
|
| |
| chunk_size: int = 4096 |
| use_memory: bool = True |
| memory_chunk_size: int = 128 |
| memory_batch_size: int = 128 |
| memory_heads: int = 8 |
| memory_dim_head: int = 64 |
| memory_depth: int = 1 |
| memory_layer_stride: int = 8 |
| memory_fp32: bool = True |
|
|
| |
| detach_mem_state: bool = True |
| deep_memory_integration: bool = False |
| memory_as_context: bool = False |
| num_memory_tokens: int = 16 |
| memory_gate_bias: float = -2.0 |
| use_momentum: bool = True |
| momentum_order: int = 1 |
|
|
| |
| gradient_checkpoint_memory: bool = False |
| cross_chunk_gradient_steps: int = 2 |
|
|
| |
| eval_steps: int = 200 |
| eval_topk: int = 0 |
| logging_steps: int = 10 |
| log_every_batches: int = 80 |
| final_eval_print_examples: int = 10 |
| debug_data_samples: int = 0 |
| debug_label_batches: int = 0 |
| debug_eval_stats: bool = False |
| debug_grad_norm: bool = False |
|
|
| |
| bf16: bool = True |
| fp16: bool = False |
| use_tf32: bool = True |
| gradient_checkpointing: bool = False |
| chunkwise_backward: bool = True |
|
|
| |
| max_length: int = 32768 |
| answer_reserve_tokens: int = 64 |
| label_prefix_tokens: int = 0 |
| max_samples: Optional[int] = 500 |
|
|
| |
| use_fsdp: bool = False |
| fsdp_use_orig_params: bool = True |
| ddp_find_unused_parameters: bool = False |
|
|
| |
| save_full_checkpoint: bool = True |
| final_ckpt_name: str = "final_memory_checkpoint.pt" |
| final_full_ckpt_name: str = "final_full_checkpoint.pt" |
|
|
| seed: int = 42 |
|
|
|
|
| |
| |
| |
|
|
| class BABILongDataset(Dataset): |
| def __init__( |
| self, |
| data_path: str, |
| tokenizer, |
| max_length: int = 32768, |
| answer_reserve_tokens: int = 64, |
| label_prefix_tokens: int = 0, |
| max_samples: Optional[int] = None, |
| ): |
| self.tokenizer = tokenizer |
| self.max_length = max_length |
| self.answer_reserve_tokens = answer_reserve_tokens |
| self.label_prefix_tokens = int(label_prefix_tokens) |
|
|
| logger.info(f"Loading dataset: {data_path}") |
| with open(data_path, "r") as f: |
| self.data = json.load(f) |
|
|
| if max_samples: |
| self.data = self.data[:max_samples] |
|
|
| logger.info(f"Dataset size: {len(self.data)}") |
|
|
| def __len__(self): |
| return len(self.data) |
|
|
| def __getitem__(self, idx): |
| item = self.data[idx] |
| text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:" |
| target = item["target"] |
|
|
| pad_id = self.tokenizer.pad_token_id or 0 |
| reserve = int(self.answer_reserve_tokens) |
|
|
| prompt_ids = self.tokenizer( |
| text, |
| max_length=max(self.max_length - reserve, 1), |
| truncation=True, |
| add_special_tokens=True, |
| return_tensors="pt", |
| ).input_ids.squeeze(0) |
|
|
| answer_ids = self.tokenizer( |
| f" {target}", |
| add_special_tokens=False, |
| return_tensors="pt", |
| ).input_ids.squeeze(0) |
|
|
| available = max(self.max_length - prompt_ids.numel(), 0) |
| answer_ids = answer_ids[:available] |
|
|
| input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length] |
|
|
| labels = torch.full_like(input_ids, fill_value=-100) |
| if answer_ids.numel() > 0: |
| start = prompt_ids.numel() |
| end = min(start + answer_ids.numel(), labels.numel()) |
| labels[start:end] = input_ids[start:end] |
| if self.label_prefix_tokens > 0: |
| prefix = min(start, self.label_prefix_tokens) |
| if prefix > 0: |
| labels[start - prefix:start] = input_ids[start - prefix:start] |
|
|
| seq_len = input_ids.numel() |
| if seq_len < self.max_length: |
| pad_len = self.max_length - seq_len |
| input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id)) |
| labels = F.pad(labels, (0, pad_len), value=-100) |
| attention_mask = torch.cat( |
| [torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)], |
| dim=0, |
| ) |
| else: |
| attention_mask = torch.ones(self.max_length, dtype=torch.long) |
|
|
| return { |
| "input_ids": input_ids.to(dtype=torch.long), |
| "labels": labels.to(dtype=torch.long), |
| "attention_mask": attention_mask, |
| } |
|
|
|
|
| def collate_fn(batch): |
| keys = batch[0].keys() |
| return {k: torch.stack([b[k] for b in batch], dim=0) for k in keys} |
|
|
|
|
| |
| |
| |
|
|
| class MemoryAugmentedAttention(nn.Module): |
| """ |
| Deep integration of memory into attention mechanism. |
| Memory provides additional context that enhances hidden states. |
| """ |
| def __init__( |
| self, |
| hidden_size: int, |
| num_attention_heads: int, |
| num_memory_tokens: int = 16, |
| memory_dim_head: int = 64, |
| memory_fp32: bool = True, |
| gate_bias: float = -2.0, |
| ): |
| super().__init__() |
| self.hidden_size = hidden_size |
| self.num_heads = num_attention_heads |
| self.head_dim = hidden_size // num_attention_heads |
| self.memory_fp32 = memory_fp32 |
|
|
| |
| self.memory_transform = nn.Sequential( |
| nn.Linear(hidden_size, hidden_size), |
| nn.SiLU(), |
| nn.Linear(hidden_size, hidden_size), |
| ) |
|
|
| |
| self.memory_gate = nn.Parameter(torch.full((num_attention_heads, 1, 1), gate_bias)) |
|
|
| |
| self.memory_output_proj = nn.Linear(hidden_size, hidden_size, bias=False) |
| nn.init.zeros_(self.memory_output_proj.weight) |
|
|
| def forward( |
| self, |
| hidden_states: torch.Tensor, |
| memory_context: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| ) -> torch.Tensor: |
| """ |
| Args: |
| hidden_states: [batch, seq_len, hidden_size] - current hidden states |
| memory_context: [batch, seq_len, hidden_size] - retrieved memory |
| attention_mask: optional attention mask |
| Returns: |
| enhanced_hidden: [batch, seq_len, hidden_size] |
| """ |
| batch_size, seq_len, _ = hidden_states.shape |
|
|
| |
| mem_transformed = self.memory_transform(memory_context) |
|
|
| |
| mem_heads = rearrange(mem_transformed, 'b n (h d) -> b h n d', h=self.num_heads) |
|
|
| |
| gate = torch.sigmoid(self.memory_gate) |
|
|
| |
| mem_contribution = mem_heads * gate |
| mem_contribution = rearrange(mem_contribution, 'b h n d -> b n (h d)') |
|
|
| |
| enhanced = hidden_states + self.memory_output_proj(mem_contribution) |
|
|
| return enhanced |
|
|
|
|
| |
| |
| |
|
|
| class QwenDecoderLayerWithDeepMemory(nn.Module): |
| """ |
| v3: Deep integration of Titans memory into Qwen decoder layer. |
| |
| Key differences from v1/v2: |
| 1. Memory participates in attention computation (not just post-processing) |
| 2. Support for cross-chunk gradient flow |
| 3. Dual-path architecture: attention-level + layer-level memory |
| 4. Better memory state management |
| """ |
| def __init__( |
| self, |
| base_layer: nn.Module, |
| layer_idx: int, |
| *, |
| hidden_size: int, |
| num_attention_heads: int, |
| chunk_size: int, |
| batch_size: int, |
| dim_head: int, |
| num_heads: int, |
| memory_depth: int, |
| memory_fp32: bool, |
| detach_mem_state: bool, |
| deep_integration: bool, |
| memory_as_context: bool, |
| num_memory_tokens: int, |
| memory_gate_bias: float, |
| use_momentum: bool, |
| momentum_order: int, |
| parent_model: Optional[nn.Module] = None, |
| ): |
| super().__init__() |
| self.layer = base_layer |
| self.layer_idx = layer_idx |
| self.memory_fp32 = memory_fp32 |
| self.detach_mem_state = bool(detach_mem_state) |
| self.deep_integration = deep_integration |
| self.memory_as_context = memory_as_context |
| self.memory_state: Optional[NeuralMemState] = None |
| self.parent_model_ref = weakref.ref(parent_model) if parent_model is not None else None |
|
|
| |
| self._chunk_counter = 0 |
| self._gradient_steps_back = 2 |
|
|
| |
| memory_model = MemoryMLP( |
| dim=dim_head, |
| depth=memory_depth, |
| expansion_factor=2.0, |
| ) |
|
|
| self.neural_memory = NeuralMemory( |
| dim=hidden_size, |
| chunk_size=chunk_size, |
| batch_size=batch_size, |
| dim_head=dim_head, |
| heads=num_heads, |
| model=memory_model, |
| momentum=use_momentum, |
| momentum_order=momentum_order, |
| qk_rmsnorm=True, |
| pre_rmsnorm=True, |
| default_step_transform_max_lr=1e-2, |
| init_adaptive_step_bias=-4.0, |
| max_grad_norm=1.0, |
| spectral_norm_surprises=True, |
| use_accelerated_scan=False, |
| ) |
|
|
| |
| self.mem_gate = nn.Sequential( |
| nn.Linear(hidden_size * 2, hidden_size), |
| nn.SiLU(), |
| nn.Linear(hidden_size, hidden_size), |
| nn.Sigmoid(), |
| ) |
| |
| nn.init.zeros_(self.mem_gate[-2].weight) |
| nn.init.constant_(self.mem_gate[-2].bias, memory_gate_bias) |
|
|
| |
| if deep_integration: |
| self.memory_attention = MemoryAugmentedAttention( |
| hidden_size=hidden_size, |
| num_attention_heads=num_attention_heads, |
| num_memory_tokens=num_memory_tokens, |
| memory_dim_head=dim_head, |
| memory_fp32=memory_fp32, |
| gate_bias=memory_gate_bias, |
| ) |
| else: |
| self.memory_attention = None |
|
|
| |
| if memory_as_context: |
| self.memory_context_proj = nn.Sequential( |
| nn.Linear(hidden_size, hidden_size), |
| nn.SiLU(), |
| nn.Linear(hidden_size, hidden_size), |
| ) |
| nn.init.zeros_(self.memory_context_proj[-1].weight) |
| nn.init.zeros_(self.memory_context_proj[-1].bias) |
| else: |
| self.memory_context_proj = None |
|
|
| |
| try: |
| layer_device = next(base_layer.parameters()).device |
| layer_dtype = next(base_layer.parameters()).dtype |
| except StopIteration: |
| layer_device = None |
| layer_dtype = None |
|
|
| if layer_device is not None: |
| mem_dtype = torch.float32 if memory_fp32 else layer_dtype |
| self.neural_memory = self.neural_memory.to(device=layer_device, dtype=mem_dtype) |
| if layer_dtype is not None: |
| self.mem_gate = self.mem_gate.to(device=layer_device, dtype=layer_dtype) |
| if self.memory_attention is not None: |
| self.memory_attention = self.memory_attention.to(device=layer_device, dtype=layer_dtype) |
| if self.memory_context_proj is not None: |
| self.memory_context_proj = self.memory_context_proj.to(device=layer_device, dtype=layer_dtype) |
|
|
| def reset_memory_state(self): |
| self.memory_state = None |
| self._chunk_counter = 0 |
|
|
| def set_gradient_steps_back(self, steps: int): |
| """Control how many chunks back gradient can flow.""" |
| self._gradient_steps_back = steps |
|
|
| def _get_store_mask(self, hidden_states: torch.Tensor) -> Optional[torch.Tensor]: |
| parent_model = self.parent_model_ref() if self.parent_model_ref is not None else None |
| if parent_model is None or not hasattr(parent_model, "_mem_store_mask"): |
| return None |
| store_mask = getattr(parent_model, "_mem_store_mask") |
| if store_mask is None: |
| return None |
| store_mask = store_mask.to(device=hidden_states.device).bool() |
| if store_mask.shape[:2] != hidden_states.shape[:2]: |
| return None |
| return store_mask |
|
|
| def _should_detach_state(self) -> bool: |
| """Determine if memory state should be detached based on chunk counter.""" |
| if self.detach_mem_state: |
| return True |
| |
| self._chunk_counter += 1 |
| return self._chunk_counter > self._gradient_steps_back |
|
|
| def forward(self, *args, **kwargs): |
| |
| outputs = self.layer(*args, **kwargs) |
|
|
| if isinstance(outputs, (tuple, list)): |
| hidden_states = outputs[0] |
| rest = outputs[1:] |
| else: |
| hidden_states = outputs |
| rest = None |
|
|
| |
| full_store_mask = self._get_store_mask(hidden_states) |
|
|
| |
| mem_inp = hidden_states.float() if self.memory_fp32 else hidden_states |
|
|
| |
| store_seq = None |
| store_mask = full_store_mask |
| if store_mask is not None: |
| store_seq = mem_inp |
| |
| if store_mask.shape[1] > 0 and not store_mask[:, 0].any(): |
| store_seq = store_seq[:, 1:] |
| store_mask = store_mask[:, 1:] |
|
|
| |
| store_chunk = self.neural_memory.store_chunk_size |
| remainder = store_seq.shape[1] % store_chunk |
| if remainder != 0: |
| store_seq = store_seq[:, :-remainder] |
| store_mask = store_mask[:, :-remainder] |
|
|
| if store_mask is not None and store_seq is not None: |
| if store_mask.shape[1] != store_seq.shape[1]: |
| min_len = min(store_mask.shape[1], store_seq.shape[1]) |
| store_seq = store_seq[:, :min_len] |
| store_mask = store_mask[:, :min_len] |
|
|
| if store_seq.shape[1] == 0: |
| store_seq = None |
| store_mask = None |
|
|
| |
| mem_ctx = ( |
| torch.amp.autocast(device_type=hidden_states.device.type, enabled=False) |
| if self.memory_fp32 |
| else nullcontext() |
| ) |
|
|
| |
| should_detach = self._should_detach_state() |
|
|
| with mem_ctx: |
| retrieved, next_state = self.neural_memory( |
| mem_inp, |
| store_seq=store_seq, |
| state=self.memory_state, |
| store_mask=store_mask, |
| detach_mem_state=should_detach, |
| ) |
| self.memory_state = next_state |
|
|
| if retrieved is not None: |
| retrieved = retrieved.to(dtype=hidden_states.dtype) |
|
|
| |
| if full_store_mask is not None and full_store_mask.shape[:2] == retrieved.shape[:2]: |
| retrieved = retrieved * full_store_mask.unsqueeze(-1).to(dtype=retrieved.dtype) |
|
|
| |
|
|
| |
| if self.memory_attention is not None: |
| hidden_states = self.memory_attention( |
| hidden_states=hidden_states, |
| memory_context=retrieved, |
| attention_mask=None, |
| ) |
|
|
| |
| if self.memory_context_proj is not None: |
| context_enhancement = self.memory_context_proj(retrieved) |
| hidden_states = hidden_states + context_enhancement |
|
|
| |
| gate = self.mem_gate(torch.cat([hidden_states, retrieved], dim=-1)) |
| hidden_states = hidden_states + gate * retrieved |
|
|
| if rest is None: |
| return hidden_states |
| return (hidden_states, *rest) |
|
|
|
|
| |
| |
| |
|
|
| class QwenTitansForBABILongV3(nn.Module): |
| """ |
| v3: Qwen3 with deep Titans memory integration. |
| """ |
| def __init__(self, qwen_model, config: TrainingConfig): |
| super().__init__() |
| self.qwen = qwen_model |
| self.config = config |
| self.hidden_size = qwen_model.config.hidden_size |
| self.num_attention_heads = qwen_model.config.num_attention_heads |
| self.use_memory = bool(getattr(config, "use_memory", True)) |
|
|
| if self.use_memory: |
| self.memory_layer_stride = int(getattr(config, "memory_layer_stride", 6)) |
| self.memory_layer_indices = [ |
| idx for idx in range(len(self.qwen.model.layers)) |
| if idx % self.memory_layer_stride == 0 |
| ] |
|
|
| for layer_idx in self.memory_layer_indices: |
| base_layer = self.qwen.model.layers[layer_idx] |
| wrapped = QwenDecoderLayerWithDeepMemory( |
| base_layer, |
| layer_idx=layer_idx, |
| hidden_size=self.hidden_size, |
| num_attention_heads=self.num_attention_heads, |
| chunk_size=config.memory_chunk_size, |
| batch_size=config.memory_batch_size, |
| dim_head=config.memory_dim_head, |
| num_heads=config.memory_heads, |
| memory_depth=config.memory_depth, |
| memory_fp32=config.memory_fp32, |
| detach_mem_state=config.detach_mem_state, |
| deep_integration=config.deep_memory_integration, |
| memory_as_context=config.memory_as_context, |
| num_memory_tokens=config.num_memory_tokens, |
| memory_gate_bias=config.memory_gate_bias, |
| use_momentum=config.use_momentum, |
| momentum_order=config.momentum_order, |
| parent_model=self.qwen.model, |
| ) |
| self.qwen.model.layers[layer_idx] = wrapped |
| else: |
| self.memory_layer_stride = 0 |
| self.memory_layer_indices = [] |
|
|
| if self.use_memory: |
| logger.info("[QwenTitansForBABILongV3] Initialized with DEEP memory integration") |
| logger.info(f" - hidden_size: {self.hidden_size}") |
| logger.info(f" - num_attention_heads: {self.num_attention_heads}") |
| logger.info(f" - chunk_size: {config.chunk_size}") |
| logger.info(f" - memory_layer_stride: {self.memory_layer_stride}") |
| logger.info(f" - memory_layers: {self.memory_layer_indices}") |
| logger.info(f" - deep_memory_integration: {config.deep_memory_integration}") |
| logger.info(f" - memory_as_context: {config.memory_as_context}") |
| logger.info(f" - detach_mem_state: {config.detach_mem_state}") |
| logger.info(f" - cross_chunk_gradient_steps: {config.cross_chunk_gradient_steps}") |
| else: |
| logger.info("[QwenTitansForBABILongV3] Initialized (memory disabled)") |
|
|
| self._memory_layers = [ |
| layer for layer in self.qwen.model.layers |
| if isinstance(layer, QwenDecoderLayerWithDeepMemory) |
| ] |
| self.qwen.model._mem_store_mask = None |
|
|
| |
| for layer in self._memory_layers: |
| layer.set_gradient_steps_back(config.cross_chunk_gradient_steps) |
|
|
| def _split_into_chunks(self, tensor, chunk_size): |
| seq_len = tensor.shape[1] |
| chunks = [] |
| for start in range(0, seq_len, chunk_size): |
| end = min(start + chunk_size, seq_len) |
| chunks.append((start, end, tensor[:, start:end])) |
| return chunks |
|
|
| def reset_memory_states(self): |
| for layer in self._memory_layers: |
| layer.reset_memory_state() |
|
|
| def _set_mem_store_mask( |
| self, |
| chunk_ids: torch.Tensor, |
| chunk_mask: Optional[torch.Tensor], |
| chunk_start: int, |
| ) -> None: |
| if not self.use_memory: |
| self.qwen.model._mem_store_mask = None |
| return |
| if chunk_mask is None: |
| if chunk_start > 0: |
| store_mask = torch.ones_like(chunk_ids, dtype=torch.bool) |
| store_mask[:, 0] = False |
| else: |
| store_mask = None |
| else: |
| store_mask = chunk_mask.to(device=chunk_ids.device).bool() |
| if chunk_start > 0: |
| store_mask[:, 0] = False |
| self.qwen.model._mem_store_mask = store_mask |
|
|
| def get_memory_modules(self) -> List[nn.Module]: |
| if not self._memory_layers: |
| return [] |
| modules = [] |
| for layer in self._memory_layers: |
| modules.append(layer.neural_memory) |
| modules.append(layer.mem_gate) |
| if layer.memory_attention is not None: |
| modules.append(layer.memory_attention) |
| if layer.memory_context_proj is not None: |
| modules.append(layer.memory_context_proj) |
| return modules |
|
|
| def forward( |
| self, |
| input_ids: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| labels: Optional[torch.Tensor] = None, |
| return_pred_tokens: bool = False, |
| topk: int = 0, |
| chunk_start: Optional[int] = None, |
| chunk_end: Optional[int] = None, |
| reset_mem_state: bool = False, |
| ) -> Dict[str, torch.Tensor]: |
|
|
| |
| if chunk_start is not None or chunk_end is not None: |
| start = 0 if chunk_start is None else int(chunk_start) |
| end = int(chunk_end) if chunk_end is not None else None |
| return self._forward_single_chunk( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| labels=labels, |
| chunk_start=start, |
| chunk_end=end, |
| reset_mem_state=reset_mem_state, |
| ) |
|
|
| |
| batch_size, seq_len = input_ids.shape |
| chunk_size = self.config.chunk_size |
| chunks = self._split_into_chunks(input_ids, chunk_size) |
|
|
| self.reset_memory_states() |
| loss_fct_sum = nn.CrossEntropyLoss(reduction="sum") |
| total_loss_sum = None |
| total_loss_tokens = 0 |
| topk_correct = None |
| topk_total = None |
|
|
| pred_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)] |
| target_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)] |
|
|
| if topk and topk > 0: |
| device = input_ids.device |
| topk_correct = torch.tensor(0.0, device=device, dtype=torch.float32) |
| topk_total = torch.tensor(0.0, device=device, dtype=torch.float32) |
|
|
| for start, end, _ in chunks: |
| proc_start = max(0, start - 1) |
| chunk_ids = input_ids[:, proc_start:end] |
| chunk_labels = labels[:, proc_start:end] if labels is not None else None |
| chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None |
|
|
| self._set_mem_store_mask(chunk_ids, chunk_mask, start) |
| hidden_full = self._process_chunk(chunk_ids, chunk_mask) |
| if self.use_memory: |
| self.qwen.model._mem_store_mask = None |
|
|
| if chunk_labels is not None and (chunk_labels != -100).any(): |
| chunk_labels_local = chunk_labels.to(device=hidden_full.device) |
| shift_hidden = hidden_full[:, :-1, :].contiguous() |
| shift_labels = chunk_labels_local[:, 1:].contiguous() |
|
|
| valid = shift_labels != -100 |
| if valid.any(): |
| hs = shift_hidden[valid] |
| targets = shift_labels[valid] |
|
|
| hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0) |
| logits = self.qwen.lm_head(hs) |
| logits = logits.float() |
| logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0) |
| targets = targets.to(device=logits.device) |
|
|
| chunk_loss_sum = loss_fct_sum(logits, targets) |
| if total_loss_sum is None: |
| total_loss_sum = chunk_loss_sum |
| else: |
| total_loss_sum = total_loss_sum + chunk_loss_sum |
| total_loss_tokens += targets.numel() |
|
|
| if topk and topk > 0: |
| k = min(int(topk), logits.shape[-1]) |
| topk_ids = torch.topk(logits, k=k, dim=-1).indices |
| correct = (topk_ids == targets.unsqueeze(-1)).any(dim=-1) |
| topk_correct = topk_correct + correct.float().sum() |
| topk_total = topk_total + torch.tensor(float(targets.numel()), device=topk_total.device) |
|
|
| if return_pred_tokens: |
| idx = valid.nonzero(as_tuple=False) |
| pred_flat = torch.argmax(logits, dim=-1).detach().to("cpu", dtype=torch.long).tolist() |
| tgt_flat = targets.detach().to("cpu", dtype=torch.long).tolist() |
| b_idx_flat = idx[:, 0].detach().to("cpu", dtype=torch.long).tolist() |
|
|
| for i, b_idx in enumerate(b_idx_flat): |
| pred_tokens_by_sample[b_idx].append(int(pred_flat[i])) |
| target_tokens_by_sample[b_idx].append(int(tgt_flat[i])) |
|
|
| if total_loss_sum is None or total_loss_tokens == 0: |
| device = next(self.qwen.parameters()).device |
| loss = torch.zeros((), device=device, dtype=torch.float32) |
| else: |
| loss = total_loss_sum / total_loss_tokens |
|
|
| out: Dict[str, torch.Tensor] = {"loss": loss} |
| if return_pred_tokens: |
| lengths = torch.tensor([len(x) for x in target_tokens_by_sample], dtype=torch.long) |
| max_len = int(lengths.max().item()) if lengths.numel() > 0 else 0 |
| if max_len > 0: |
| pred_mat = torch.full((batch_size, max_len), -1, dtype=torch.long) |
| tgt_mat = torch.full((batch_size, max_len), -1, dtype=torch.long) |
| for b in range(batch_size): |
| L = int(lengths[b].item()) |
| if L > 0: |
| pred_mat[b, :L] = torch.tensor(pred_tokens_by_sample[b], dtype=torch.long) |
| tgt_mat[b, :L] = torch.tensor(target_tokens_by_sample[b], dtype=torch.long) |
| else: |
| pred_mat = torch.empty((batch_size, 0), dtype=torch.long) |
| tgt_mat = torch.empty((batch_size, 0), dtype=torch.long) |
| out["pred_ids"] = pred_mat |
| out["target_ids"] = tgt_mat |
| out["target_lengths"] = lengths |
| if topk and topk > 0 and topk_correct is not None and topk_total is not None: |
| out["topk_correct"] = topk_correct |
| out["topk_total"] = topk_total |
| return out |
|
|
| def _forward_single_chunk( |
| self, |
| input_ids: torch.Tensor, |
| attention_mask: Optional[torch.Tensor], |
| labels: Optional[torch.Tensor], |
| chunk_start: int, |
| chunk_end: Optional[int], |
| reset_mem_state: bool, |
| ) -> Dict[str, torch.Tensor]: |
| if reset_mem_state: |
| self.reset_memory_states() |
|
|
| seq_len = input_ids.shape[1] |
| end = chunk_end if chunk_end is not None else min(chunk_start + self.config.chunk_size, seq_len) |
| end = min(int(end), seq_len) |
| start = max(0, int(chunk_start)) |
|
|
| proc_start = max(0, start - 1) |
| chunk_ids = input_ids[:, proc_start:end] |
| chunk_labels = labels[:, proc_start:end] if labels is not None else None |
| chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None |
|
|
| self._set_mem_store_mask(chunk_ids, chunk_mask, start) |
| hidden_full = self._process_chunk(chunk_ids, chunk_mask) |
| if self.use_memory: |
| self.qwen.model._mem_store_mask = None |
|
|
| loss_fct_sum = nn.CrossEntropyLoss(reduction="sum") |
| total_loss_sum = None |
| total_loss_tokens = 0 |
|
|
| if chunk_labels is not None and (chunk_labels != -100).any(): |
| chunk_labels_local = chunk_labels.to(device=hidden_full.device) |
| shift_hidden = hidden_full[:, :-1, :].contiguous() |
| shift_labels = chunk_labels_local[:, 1:].contiguous() |
|
|
| valid = shift_labels != -100 |
| if valid.any(): |
| hs = shift_hidden[valid] |
| targets = shift_labels[valid] |
|
|
| hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0) |
| logits = self.qwen.lm_head(hs) |
| logits = logits.float() |
| logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0) |
| targets = targets.to(device=logits.device) |
|
|
| total_loss_sum = loss_fct_sum(logits, targets) |
| total_loss_tokens = targets.numel() |
|
|
| if total_loss_sum is None: |
| total_loss_sum = (hidden_full.float().sum() * 0.0) |
|
|
| return { |
| "loss_sum": total_loss_sum, |
| "loss_tokens": total_loss_tokens, |
| "has_grad": True, |
| } |
|
|
| def _process_chunk( |
| self, |
| chunk_ids: torch.Tensor, |
| chunk_attention_mask: Optional[torch.Tensor] = None, |
| ) -> torch.Tensor: |
| if hasattr(self.qwen.model, "embed_tokens"): |
| token_embeds = self.qwen.model.embed_tokens(chunk_ids) |
| else: |
| token_embeds = self.qwen.get_input_embeddings()(chunk_ids) |
|
|
| outputs = self.qwen.model( |
| inputs_embeds=token_embeds, |
| attention_mask=chunk_attention_mask, |
| use_cache=False, |
| output_hidden_states=False, |
| return_dict=True, |
| ) |
| return outputs.last_hidden_state |
|
|
| def get_param_groups(self, config: TrainingConfig): |
| """ |
| v3: Three parameter groups with different learning rates. |
| """ |
| memory_core_params = [] |
| memory_attention_params = [] |
| pretrained_params = [] |
|
|
| for name, param in self.named_parameters(): |
| if not param.requires_grad: |
| continue |
| if "neural_memory" in name or "mem_gate" in name: |
| memory_core_params.append(param) |
| elif "memory_attention" in name or "memory_context_proj" in name: |
| memory_attention_params.append(param) |
| else: |
| pretrained_params.append(param) |
|
|
| param_groups = [] |
| if len(memory_core_params) > 0: |
| param_groups.append({ |
| "params": memory_core_params, |
| "lr": config.lr_memory, |
| "weight_decay": config.weight_decay, |
| "name": "memory_core" |
| }) |
| if len(memory_attention_params) > 0: |
| param_groups.append({ |
| "params": memory_attention_params, |
| "lr": config.lr_memory_attention, |
| "weight_decay": config.weight_decay, |
| "name": "memory_attention" |
| }) |
| if len(pretrained_params) > 0: |
| param_groups.append({ |
| "params": pretrained_params, |
| "lr": config.lr_pretrained, |
| "weight_decay": config.weight_decay, |
| "name": "pretrained" |
| }) |
|
|
| logger.info(f"Param groups: memory_core={len(memory_core_params)}, " |
| f"memory_attention={len(memory_attention_params)}, " |
| f"pretrained={len(pretrained_params)}") |
| return param_groups |
|
|
|
|
| |
| |
| |
|
|
| def init_distributed() -> tuple: |
| if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ: |
| return False, 0, 0, 1 |
|
|
| rank = int(os.environ["RANK"]) |
| world_size = int(os.environ["WORLD_SIZE"]) |
| local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
|
|
| if not dist.is_available(): |
| raise RuntimeError("torch.distributed not available") |
|
|
| if not dist.is_initialized(): |
| dist.init_process_group(backend="nccl", init_method="env://") |
|
|
| torch.cuda.set_device(local_rank) |
| return True, rank, local_rank, world_size |
|
|
|
|
| def cleanup_distributed(): |
| if dist.is_available() and dist.is_initialized(): |
| dist.barrier() |
| dist.destroy_process_group() |
|
|
|
|
| def unwrap_model(model: nn.Module) -> nn.Module: |
| if hasattr(model, "module"): |
| return model.module |
| if hasattr(model, "_fsdp_wrapped_module"): |
| wrapped = getattr(model, "_fsdp_wrapped_module", None) |
| if wrapped is not None and hasattr(wrapped, "module"): |
| return wrapped.module |
| return model |
|
|
|
|
| def is_fsdp_model(model: nn.Module) -> bool: |
| try: |
| from torch.distributed.fsdp import FullyShardedDataParallel as FSDP |
| return isinstance(model, FSDP) |
| except Exception: |
| return False |
|
|
|
|
| |
| |
| |
|
|
| class Trainer: |
| def __init__( |
| self, |
| model: QwenTitansForBABILongV3, |
| train_dataloader: DataLoader, |
| eval_dataloader: DataLoader, |
| config: TrainingConfig, |
| rank: int = 0, |
| world_size: int = 1, |
| is_distributed: bool = False, |
| tokenizer=None, |
| ): |
| self.model = model |
| self.train_dataloader = train_dataloader |
| self.eval_dataloader = eval_dataloader |
| self.config = config |
| self.device = next(model.parameters()).device |
| self.rank = rank |
| self.world_size = world_size |
| self.is_distributed = is_distributed |
| self.is_main_process = (rank == 0) |
| self.tokenizer = tokenizer |
|
|
| base_model = unwrap_model(self.model) |
| param_groups = base_model.get_param_groups(config) |
| self.optimizer = AdamW(param_groups) |
|
|
| total_steps = math.ceil( |
| (len(train_dataloader) * config.num_epochs) / max(config.gradient_accumulation_steps, 1) |
| ) |
| self.scheduler = CosineAnnealingLR(self.optimizer, T_max=total_steps, eta_min=1e-7) |
|
|
| self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16) |
| self.global_step = 0 |
|
|
| def _get_group_lr(self, group_name: str) -> Optional[float]: |
| for group in self.optimizer.param_groups: |
| if group.get("name") == group_name: |
| return group.get("lr") |
| return None |
|
|
| def train(self): |
| self.model.train() |
| if self.is_main_process: |
| logger.info("=" * 60) |
| logger.info("Starting v3 training with deep memory integration") |
| logger.info("=" * 60) |
|
|
| last_epoch_loss = None |
| for epoch in range(self.config.num_epochs): |
| sampler = getattr(self.train_dataloader, "sampler", None) |
| if sampler is not None and hasattr(sampler, "set_epoch"): |
| sampler.set_epoch(epoch) |
| if self.is_main_process: |
| logger.info(f"Epoch {epoch + 1}/{self.config.num_epochs}") |
|
|
| epoch_loss = 0.0 |
| num_batches = 0 |
|
|
| pbar = self.train_dataloader |
| if self.is_main_process: |
| pbar = tqdm( |
| self.train_dataloader, |
| desc=f"Epoch {epoch + 1}/{self.config.num_epochs}", |
| leave=False, |
| dynamic_ncols=True, |
| ) |
|
|
| for step, batch in enumerate(pbar): |
| batch = {k: v.to(self.device) for k, v in batch.items()} |
|
|
| ga = max(self.config.gradient_accumulation_steps, 1) |
| sync_gradients = ((step + 1) % ga == 0) |
| amp_enabled = self.config.fp16 or self.config.bf16 |
| amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16 |
|
|
| with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype): |
| if self.config.chunkwise_backward: |
| labels = batch.get("labels") |
| if labels is not None: |
| total_tokens = int((labels[:, 1:] != -100).sum().item()) |
| else: |
| total_tokens = 0 |
| loss_scale = 0.0 if total_tokens == 0 else (1.0 / total_tokens / ga) |
|
|
| seq_len = batch["input_ids"].shape[1] |
| chunk_size = int(self.config.chunk_size) |
| chunk_ranges = [ |
| (start, min(start + chunk_size, seq_len)) |
| for start in range(0, seq_len, chunk_size) |
| ] |
| raw_loss_sum = None |
|
|
| for idx, (start, end) in enumerate(chunk_ranges): |
| is_last_chunk = (idx == len(chunk_ranges) - 1) |
| sync_chunk = sync_gradients and is_last_chunk |
| chunk_ctx = ( |
| self.model.no_sync |
| if (self.is_distributed and not sync_chunk) |
| else nullcontext |
| ) |
| with chunk_ctx(): |
| outputs = self.model( |
| input_ids=batch["input_ids"], |
| attention_mask=batch["attention_mask"], |
| labels=labels, |
| chunk_start=start, |
| chunk_end=end, |
| reset_mem_state=(idx == 0), |
| ) |
| chunk_loss_sum = outputs["loss_sum"] |
| if raw_loss_sum is None: |
| raw_loss_sum = chunk_loss_sum.detach() |
| else: |
| raw_loss_sum = raw_loss_sum + chunk_loss_sum.detach() |
|
|
| scaled_loss = chunk_loss_sum * float(loss_scale) |
| if self.config.fp16: |
| self.scaler.scale(scaled_loss).backward() |
| else: |
| scaled_loss.backward() |
|
|
| if raw_loss_sum is None or total_tokens == 0: |
| raw_loss = torch.zeros((), device=self.device, dtype=torch.float32) |
| else: |
| raw_loss = raw_loss_sum / total_tokens |
| loss = raw_loss / ga |
| else: |
| ctx = self.model.no_sync if (self.is_distributed and not sync_gradients) else nullcontext |
| with ctx(): |
| outputs = self.model( |
| input_ids=batch["input_ids"], |
| attention_mask=batch["attention_mask"], |
| labels=batch["labels"], |
| ) |
| raw_loss = outputs["loss"] |
| loss = raw_loss / ga |
|
|
| if self.config.fp16: |
| self.scaler.scale(loss).backward() |
| else: |
| loss.backward() |
|
|
| epoch_loss += raw_loss.detach().float().item() |
| num_batches += 1 |
|
|
| if sync_gradients: |
| grad_norm = None |
| if self.config.fp16: |
| self.scaler.unscale_(self.optimizer) |
| grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm) |
| self.scaler.step(self.optimizer) |
| self.scaler.update() |
| else: |
| grad_norm = torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.config.max_grad_norm) |
| self.optimizer.step() |
|
|
| self.scheduler.step() |
| self.optimizer.zero_grad(set_to_none=True) |
| self.global_step += 1 |
|
|
| if self.is_main_process: |
| avg_loss = epoch_loss / max(num_batches, 1) |
| pbar.set_postfix({"gstep": self.global_step, "loss": f"{avg_loss:.4f}"}) |
|
|
| if self.global_step % self.config.logging_steps == 0 and self.is_main_process: |
| lr_mem = self._get_group_lr("memory_core") or 0.0 |
| lr_mem_attn = self._get_group_lr("memory_attention") or 0.0 |
| lr_pre = self._get_group_lr("pretrained") or 0.0 |
| grad_note = "" |
| if self.config.debug_grad_norm and grad_norm is not None: |
| grad_note = f" | grad_norm={float(grad_norm):.4f}" |
| logger.info( |
| f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | " |
| f"lr_mem={lr_mem:.2e} | lr_mem_attn={lr_mem_attn:.2e} | lr_pre={lr_pre:.2e}{grad_note}" |
| ) |
|
|
| if self.global_step % self.config.eval_steps == 0: |
| eval_metrics = self.evaluate() |
| if self.is_main_process: |
| logger.info( |
| f"Step {self.global_step}: " |
| f"eval_loss={eval_metrics['loss']:.4f}, " |
| f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| ) |
| self.model.train() |
|
|
| avg_epoch_loss = epoch_loss / max(num_batches, 1) |
| if self.is_distributed: |
| t = torch.tensor(avg_epoch_loss, device=self.device, dtype=torch.float32) |
| dist.all_reduce(t, op=dist.ReduceOp.SUM) |
| avg_epoch_loss = (t / self.world_size).item() |
|
|
| if self.is_main_process: |
| logger.info(f"Epoch {epoch + 1} done, avg loss={avg_epoch_loss:.4f}") |
| last_epoch_loss = avg_epoch_loss |
|
|
| eval_metrics = self.evaluate() |
| if self.is_main_process: |
| logger.info( |
| f"[EPOCH {epoch + 1} EVAL] " |
| f"eval_loss={eval_metrics['loss']:.4f}, " |
| f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| ) |
| self._append_eval_metrics( |
| eval_metrics, |
| phase="epoch", |
| epoch=int(epoch + 1), |
| train_avg_loss=avg_epoch_loss, |
| ) |
| self.model.train() |
|
|
| if self.is_main_process: |
| logger.info("Training done, final evaluation") |
|
|
| final_eval = self.evaluate(print_examples=int(self.config.final_eval_print_examples)) |
| if self.is_main_process: |
| ppl = float(math.exp(min(20.0, final_eval["loss"]))) |
| logger.info( |
| f"[FINAL EVAL] loss={final_eval['loss']:.4f}, ppl={ppl:.3f}, " |
| f"em_acc={final_eval['em_acc'] * 100:.2f}%, " |
| f"tok_acc={final_eval['tok_acc'] * 100:.2f}%" |
| ) |
| logger.info("Saving final checkpoint") |
| self._append_eval_metrics( |
| final_eval, |
| phase="final", |
| epoch=int(self.config.num_epochs), |
| train_avg_loss=last_epoch_loss, |
| ) |
| self.save_final_checkpoint() |
|
|
| @torch.no_grad() |
| def evaluate(self, print_examples: int = 0) -> Dict[str, float]: |
| self.model.eval() |
| total_loss = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| total_batches = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
|
|
| total_tok_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| total_tok_total = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| total_em_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| total_em_total = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| printed = 0 |
|
|
| for batch in self.eval_dataloader: |
| batch = {k: v.to(self.device) for k, v in batch.items()} |
| amp_enabled = self.config.fp16 or self.config.bf16 |
| amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16 |
| with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype): |
| outputs = self.model( |
| input_ids=batch["input_ids"], |
| attention_mask=batch["attention_mask"], |
| labels=batch["labels"], |
| return_pred_tokens=True, |
| topk=int(self.config.eval_topk) if self.config.eval_topk else 0, |
| ) |
|
|
| if torch.isfinite(outputs["loss"]): |
| total_loss += outputs["loss"].detach().float() |
| total_batches += 1.0 |
|
|
| pred_ids = outputs.get("pred_ids", None) |
| target_ids = outputs.get("target_ids", None) |
| lengths = outputs.get("target_lengths", None) |
|
|
| if ( |
| pred_ids is not None |
| and target_ids is not None |
| and lengths is not None |
| and pred_ids.ndim == 2 |
| and target_ids.ndim == 2 |
| and lengths.ndim == 1 |
| and pred_ids.shape == target_ids.shape |
| and pred_ids.shape[0] == lengths.shape[0] |
| ): |
| pred_cpu = pred_ids.to("cpu", dtype=torch.long) |
| tgt_cpu = target_ids.to("cpu", dtype=torch.long) |
| len_cpu = lengths.to("cpu", dtype=torch.long) |
|
|
| for i in range(int(len_cpu.shape[0])): |
| L = int(len_cpu[i].item()) |
| if L <= 0: |
| continue |
| p = pred_cpu[i, :L] |
| t = tgt_cpu[i, :L] |
|
|
| total_tok_correct += torch.tensor(float((p == t).sum().item()), device=self.device, dtype=torch.float32) |
| total_tok_total += torch.tensor(float(L), device=self.device, dtype=torch.float32) |
|
|
| if self.tokenizer is not None: |
| pred_text = self.tokenizer.decode(p.tolist(), skip_special_tokens=True).strip() |
| tgt_text = self.tokenizer.decode(t.tolist(), skip_special_tokens=True).strip() |
| em = float(pred_text == tgt_text) |
| total_em_correct += torch.tensor(em, device=self.device, dtype=torch.float32) |
| total_em_total += torch.tensor(1.0, device=self.device, dtype=torch.float32) |
|
|
| if self.is_main_process and printed < print_examples: |
| logger.info(f"[EVAL SAMPLE] pred={repr(pred_text)} | label={repr(tgt_text)} | match={bool(em)}") |
| printed += 1 |
|
|
| if self.is_distributed: |
| dist.all_reduce(total_loss, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_batches, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_tok_correct, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_tok_total, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_em_correct, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_em_total, op=dist.ReduceOp.SUM) |
|
|
| avg_loss = (total_loss / total_batches.clamp(min=1.0)).item() |
| tok_acc = (total_tok_correct / total_tok_total.clamp(min=1.0)).item() |
| em_acc = (total_em_correct / total_em_total.clamp(min=1.0)).item() |
|
|
| return {"loss": avg_loss, "tok_acc": tok_acc, "em_acc": em_acc} |
|
|
| def _append_eval_metrics( |
| self, |
| metrics: Dict[str, float], |
| *, |
| phase: str, |
| epoch: Optional[int], |
| train_avg_loss: Optional[float], |
| ) -> None: |
| if not self.is_main_process: |
| return |
| os.makedirs(self.config.output_dir, exist_ok=True) |
| record = { |
| "phase": phase, |
| "epoch": epoch, |
| "global_step": int(self.global_step), |
| "train_avg_loss": None if train_avg_loss is None else float(train_avg_loss), |
| "eval_loss": float(metrics.get("loss", 0.0)), |
| "em_acc_pct": float(metrics.get("em_acc", 0.0) * 100.0), |
| "tok_acc_pct": float(metrics.get("tok_acc", 0.0) * 100.0), |
| } |
| metrics_path = os.path.join(self.config.output_dir, "eval_metrics.jsonl") |
| with open(metrics_path, "a") as f: |
| f.write(json.dumps(record) + "\n") |
|
|
| def save_final_checkpoint(self): |
| ckpt_path = os.path.join(self.config.output_dir, self.config.final_ckpt_name) |
| base_model = unwrap_model(self.model) |
|
|
| |
| memory_sd = { |
| name: p.detach().cpu() |
| for name, p in base_model.named_parameters() |
| if ("neural_memory" in name) or ("mem_gate" in name) or |
| ("memory_attention" in name) or ("memory_context_proj" in name) |
| } |
|
|
| if is_fsdp_model(self.model) and len(memory_sd) == 0: |
| from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig |
| full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
| with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg): |
| full_sd = self.model.state_dict() |
| memory_sd = { |
| k: v for k, v in full_sd.items() |
| if ("neural_memory" in k) or ("mem_gate" in k) or |
| ("memory_attention" in k) or ("memory_context_proj" in k) |
| } |
|
|
| if self.is_main_process: |
| torch.save( |
| {"memory_state_dict": memory_sd, "global_step": self.global_step, "config": asdict(self.config)}, |
| ckpt_path, |
| ) |
| logger.info(f"Saved memory checkpoint: {ckpt_path}") |
| if self.is_distributed: |
| dist.barrier() |
|
|
| if self.config.save_full_checkpoint: |
| full_ckpt_path = os.path.join(self.config.output_dir, self.config.final_full_ckpt_name) |
| if is_fsdp_model(self.model): |
| from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig |
| full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
| with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg): |
| full_sd = self.model.state_dict() |
| else: |
| full_sd = unwrap_model(self.model).state_dict() |
|
|
| if self.is_main_process: |
| torch.save( |
| {"model_state_dict": full_sd, "global_step": self.global_step, "config": asdict(self.config)}, |
| full_ckpt_path, |
| ) |
| logger.info(f"Saved full checkpoint: {full_ckpt_path}") |
| if self.is_distributed: |
| dist.barrier() |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| parser = argparse.ArgumentParser(description="Qwen3 + Titans v3 Deep Integration Training") |
| parser.add_argument("--fsdp", action="store_true") |
| parser.add_argument("--eval_only", action="store_true") |
| parser.add_argument("--ckpt_path", type=str, default=None) |
| parser.add_argument("--max_samples", type=int, default=None) |
| parser.add_argument("--max_length", type=int, default=None) |
| parser.add_argument("--output_dir", type=str, default=None) |
| parser.add_argument("--num_epochs", type=int, default=None) |
| parser.add_argument("--eval_steps", type=int, default=None) |
| parser.add_argument("--batch_size", type=int, default=None) |
| parser.add_argument("--gradient_accumulation_steps", type=int, default=None) |
| parser.add_argument("--chunk_size", type=int, default=None) |
| parser.add_argument("--memory_layer_stride", type=int, default=None) |
| parser.add_argument("--no_memory", action="store_true") |
| parser.add_argument("--gradient_checkpointing", action="store_true") |
| parser.add_argument("--no_chunkwise_backward", action="store_true") |
|
|
| |
| parser.add_argument("--detach_mem_state", action="store_true", |
| help="Detach memory state (disable cross-chunk gradients)") |
| parser.add_argument("--no_deep_integration", action="store_true", |
| help="Disable deep attention integration") |
| parser.add_argument("--no_memory_as_context", action="store_true", |
| help="Disable memory-as-context projection") |
| parser.add_argument("--cross_chunk_gradient_steps", type=int, default=None, |
| help="Number of chunks to allow gradient flow through") |
| parser.add_argument("--memory_depth", type=int, default=None) |
| parser.add_argument("--num_memory_tokens", type=int, default=None) |
|
|
| parser.add_argument("--debug_grad_norm", action="store_true") |
| args = parser.parse_args() |
|
|
| config = TrainingConfig() |
|
|
| |
| if args.fsdp: |
| config.use_fsdp = True |
| if args.no_memory: |
| config.use_memory = False |
| if args.max_samples is not None: |
| config.max_samples = args.max_samples |
| if args.max_length is not None: |
| config.max_length = int(args.max_length) |
| if args.output_dir is not None: |
| config.output_dir = args.output_dir |
| elif not config.use_memory: |
| config.output_dir = "./outputs/qwen_babilong_no_memory_v3" |
| if args.num_epochs is not None: |
| config.num_epochs = args.num_epochs |
| if args.eval_steps is not None: |
| config.eval_steps = args.eval_steps |
| if args.batch_size is not None: |
| config.batch_size = int(args.batch_size) |
| if args.gradient_accumulation_steps is not None: |
| config.gradient_accumulation_steps = int(args.gradient_accumulation_steps) |
| if args.chunk_size is not None: |
| config.chunk_size = int(args.chunk_size) |
| if args.memory_layer_stride is not None: |
| config.memory_layer_stride = int(args.memory_layer_stride) |
| if args.gradient_checkpointing: |
| config.gradient_checkpointing = True |
| if args.no_chunkwise_backward: |
| config.chunkwise_backward = False |
|
|
| |
| if args.detach_mem_state: |
| config.detach_mem_state = True |
| if args.no_deep_integration: |
| config.deep_memory_integration = False |
| if args.no_memory_as_context: |
| config.memory_as_context = False |
| if args.cross_chunk_gradient_steps is not None: |
| config.cross_chunk_gradient_steps = int(args.cross_chunk_gradient_steps) |
| if args.memory_depth is not None: |
| config.memory_depth = int(args.memory_depth) |
| if args.num_memory_tokens is not None: |
| config.num_memory_tokens = int(args.num_memory_tokens) |
| if args.debug_grad_norm: |
| config.debug_grad_norm = True |
|
|
| is_distributed, rank, local_rank, world_size = init_distributed() |
| is_main = (rank == 0) |
|
|
| if config.use_fsdp and config.chunkwise_backward: |
| if is_main: |
| logger.warning("chunkwise_backward is incompatible with FSDP; disabling it.") |
| config.chunkwise_backward = False |
|
|
| if is_distributed and (not config.use_fsdp) and config.gradient_checkpointing: |
| config.gradient_checkpointing = False |
| if is_main: |
| logger.warning("gradient_checkpointing is unstable with DDP here; disabling it.") |
|
|
| if is_distributed and (not config.use_fsdp): |
| if not config.ddp_find_unused_parameters: |
| config.ddp_find_unused_parameters = True |
| if is_main: |
| logger.warning("Enabling DDP find_unused_parameters.") |
|
|
| torch.manual_seed(config.seed + rank) |
|
|
| if torch.cuda.is_available(): |
| device = torch.device(f"cuda:{local_rank}" if is_distributed else "cuda") |
| else: |
| device = torch.device("cpu") |
|
|
| if torch.cuda.is_available() and config.bf16: |
| bf16_supported = False |
| try: |
| bf16_supported = torch.cuda.is_bf16_supported() |
| except Exception: |
| bf16_supported = False |
| if not bf16_supported: |
| if is_main: |
| logger.warning("bf16 not supported; falling back to fp16.") |
| config.bf16 = False |
| if not config.fp16: |
| config.fp16 = True |
|
|
| if torch.cuda.is_available() and getattr(config, "use_tf32", False): |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.backends.cudnn.allow_tf32 = True |
| try: |
| torch.set_float32_matmul_precision("high") |
| except Exception: |
| pass |
|
|
| if is_main: |
| logger.info("=" * 70) |
| logger.info("Qwen3-4B + Titans v3 DEEP INTEGRATION Training") |
| logger.info("=" * 70) |
| logger.info(f"distributed={is_distributed}, world_size={world_size}") |
| logger.info(f"model_path={config.model_path}") |
| logger.info(f"data_path={config.data_path}") |
| logger.info(f"output_dir={config.output_dir}") |
| logger.info(f"max_samples={config.max_samples}") |
| logger.info(f"max_length={config.max_length}") |
| logger.info(f"num_epochs={config.num_epochs}") |
| logger.info(f"chunk_size={config.chunk_size}") |
| logger.info(f"use_memory={config.use_memory}") |
| if config.use_memory: |
| logger.info(f"memory_layer_stride={config.memory_layer_stride}") |
| logger.info(f"memory_depth={config.memory_depth}") |
| logger.info(f"deep_memory_integration={config.deep_memory_integration}") |
| logger.info(f"memory_as_context={config.memory_as_context}") |
| logger.info(f"detach_mem_state={config.detach_mem_state}") |
| logger.info(f"cross_chunk_gradient_steps={config.cross_chunk_gradient_steps}") |
| logger.info(f"num_memory_tokens={config.num_memory_tokens}") |
|
|
| tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True) |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| |
| try: |
| import transformers |
| from transformers.utils import import_utils as _import_utils |
|
|
| def _disabled(*args, **kwargs): |
| return False |
|
|
| _import_utils.is_flash_attn_2_available = _disabled |
| if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"): |
| transformers.utils.is_flash_attn_2_available = _disabled |
| if hasattr(_import_utils, "is_torchao_available"): |
| _import_utils.is_torchao_available = _disabled |
| if hasattr(_import_utils, "is_torchvision_available"): |
| _import_utils.is_torchvision_available = _disabled |
| except Exception as e: |
| if is_main: |
| logger.warning(f"Disable checks failed (ignored): {e}") |
|
|
| torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32) |
|
|
| qwen_model = AutoModelForCausalLM.from_pretrained( |
| config.model_path, |
| torch_dtype=torch_dtype, |
| device_map=None, |
| trust_remote_code=True, |
| attn_implementation="sdpa", |
| low_cpu_mem_usage=True, |
| ) |
| qwen_model.to(device) |
| qwen_model.config.use_cache = False |
| if config.gradient_checkpointing and hasattr(qwen_model, "gradient_checkpointing_enable"): |
| qwen_model.gradient_checkpointing_enable() |
|
|
| train_dataset = BABILongDataset( |
| config.data_path, |
| tokenizer, |
| max_length=config.max_length, |
| answer_reserve_tokens=config.answer_reserve_tokens, |
| label_prefix_tokens=config.label_prefix_tokens, |
| max_samples=config.max_samples, |
| ) |
|
|
| train_size = int(0.9 * len(train_dataset)) |
| eval_size = len(train_dataset) - train_size |
| train_dataset, eval_dataset = torch.utils.data.random_split( |
| train_dataset, |
| [train_size, eval_size], |
| generator=torch.Generator().manual_seed(config.seed), |
| ) |
|
|
| train_sampler = None |
| eval_sampler = None |
| if is_distributed: |
| from torch.utils.data.distributed import DistributedSampler |
| train_sampler = DistributedSampler(train_dataset, num_replicas=world_size, rank=rank, shuffle=True, seed=config.seed) |
| eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False) |
|
|
| train_dataloader = DataLoader( |
| train_dataset, |
| batch_size=config.batch_size, |
| shuffle=(train_sampler is None), |
| sampler=train_sampler, |
| collate_fn=collate_fn, |
| num_workers=0, |
| ) |
| eval_dataloader = DataLoader( |
| eval_dataset, |
| batch_size=config.batch_size, |
| shuffle=False, |
| sampler=eval_sampler, |
| collate_fn=collate_fn, |
| num_workers=0, |
| ) |
|
|
| model = QwenTitansForBABILongV3(qwen_model, config) |
| model.to(device) |
|
|
| if is_distributed: |
| if config.use_fsdp: |
| from functools import partial |
| from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision |
| from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy |
| from transformers.models.qwen3.modeling_qwen3 import Qwen3DecoderLayer |
|
|
| mp_policy = MixedPrecision(param_dtype=torch_dtype, reduce_dtype=torch_dtype, buffer_dtype=torch_dtype) |
| auto_wrap = partial( |
| transformer_auto_wrap_policy, |
| transformer_layer_cls={Qwen3DecoderLayer, QwenDecoderLayerWithDeepMemory} |
| ) |
|
|
| model = FSDP( |
| model, |
| auto_wrap_policy=auto_wrap, |
| mixed_precision=mp_policy, |
| device_id=torch.cuda.current_device(), |
| use_orig_params=config.fsdp_use_orig_params, |
| ignored_modules=model.get_memory_modules(), |
| ) |
| else: |
| model = DDP( |
| model, |
| device_ids=[local_rank], |
| output_device=local_rank, |
| find_unused_parameters=config.ddp_find_unused_parameters, |
| ) |
|
|
| trainer = Trainer( |
| model=model, |
| train_dataloader=train_dataloader, |
| eval_dataloader=eval_dataloader, |
| config=config, |
| rank=rank, |
| world_size=world_size, |
| is_distributed=is_distributed, |
| tokenizer=tokenizer, |
| ) |
|
|
| if args.eval_only: |
| ckpt_path = args.ckpt_path or os.path.join(config.output_dir, config.final_ckpt_name) |
| if is_main: |
| logger.info(f"eval_only: loading checkpoint: {ckpt_path}") |
| ckpt = torch.load(ckpt_path, map_location="cpu") |
|
|
| memory_sd = ckpt.get("memory_state_dict", {}) |
| if len(memory_sd) > 0: |
| unwrap_model(model).load_state_dict(memory_sd, strict=False) |
|
|
| eval_metrics = trainer.evaluate() |
| if is_main: |
| ppl = float(math.exp(min(20.0, eval_metrics["loss"]))) |
| logger.info( |
| f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl={ppl:.3f}, " |
| f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| ) |
| cleanup_distributed() |
| return |
|
|
| trainer.train() |
| cleanup_distributed() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|