| """ |
| Qwen3 baseline (NO Titans) - BABILong QA1 (32k) with Cross-Chunk Gradients |
| |
| Control-group purpose: |
| - Remove Titans memory modules entirely |
| - Train ONLY Qwen's `embed_tokens` and `lm_head` |
| - Keep ALL other training settings the same as `train_qwen_titans_babilong_v4.py` |
| |
| Key design (mirrors v4 training script behavior): |
| 1. Freeze Qwen backbone EXCEPT embed_tokens (trainable for input adaptation) |
| 2. Untie lm_head from embed_tokens if they share weights |
| 3. Train: embed_tokens + lm_head |
| 4. Keep chunkwise_backward=False + full-sequence backward (cross-chunk graph) |
| 5. Keep gradient_checkpointing & manual gradient all-reduce strategy for multi-GPU |
| """ |
|
|
| import os |
| import sys |
|
|
| |
| |
| |
| os.environ["TRANSFORMERS_NO_TORCHAO"] = "1" |
|
|
|
|
| |
| class _MockTorchAO: |
| def __getattr__(self, name): |
| return _MockTorchAO() |
|
|
| def __call__(self, *args, **kwargs): |
| return _MockTorchAO() |
|
|
|
|
| sys.modules["torchao"] = _MockTorchAO() |
| sys.modules["torchao.quantization"] = _MockTorchAO() |
|
|
| import json |
| import math |
| import argparse |
| import logging |
| from contextlib import nullcontext |
| from dataclasses import dataclass, asdict |
| from typing import Optional, Dict, List |
|
|
| import torch |
| import torch.nn as nn |
| import torch.nn.functional as F |
| import torch.distributed as dist |
| from torch.utils.data import Dataset, DataLoader |
| from torch.optim import AdamW |
| from torch.optim.lr_scheduler import CosineAnnealingLR |
| from torch.nn.parallel import DistributedDataParallel as DDP |
| from tqdm import tqdm |
|
|
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s - %(levelname)s - %(message)s", |
| ) |
| logger = logging.getLogger(__name__) |
|
|
|
|
| |
| |
| |
|
|
|
|
| @dataclass |
| class TrainingConfig: |
| |
| model_path: str = "/data/huangyifei/huggingface_cache/hub/models--Qwen--Qwen3-4B-Instruct-2507/snapshots/cdbee75f17c01a7cc42f958dc650907174af0554" |
| data_path: str = "/data/yty/BABILong/babilong-train-5k-samples/data/qa1/32k.json" |
| output_dir: str = "./outputs/qwen_baseline_babilong_v4" |
|
|
| |
| num_epochs: int = 10 |
| batch_size: int = 1 |
| gradient_accumulation_steps: int = 16 |
| max_grad_norm: float = 1.0 |
|
|
| |
| lr_embed: float = 1e-5 |
| lr_lm_head: float = 1e-4 |
| weight_decay: float = 0.01 |
| warmup_steps: int = 100 |
|
|
| |
| chunk_size: int = 4096 |
|
|
| |
| eval_steps: int = 200 |
| eval_topk: int = 0 |
| logging_steps: int = 10 |
| log_every_batches: int = 80 |
| final_eval_print_examples: int = 10 |
| debug_data_samples: int = 0 |
| debug_label_batches: int = 0 |
| debug_eval_stats: bool = False |
| debug_grad_norm: bool = False |
|
|
| |
| bf16: bool = True |
| fp16: bool = False |
| use_tf32: bool = True |
| gradient_checkpointing: bool = True |
| chunkwise_backward: bool = False |
|
|
| |
| max_length: int = 32768 |
| answer_reserve_tokens: int = 64 |
| label_prefix_tokens: int = 0 |
| max_samples: Optional[int] = 500 |
|
|
| |
| use_fsdp: bool = False |
| fsdp_use_orig_params: bool = True |
| ddp_find_unused_parameters: bool = False |
|
|
| |
| save_full_checkpoint: bool = True |
| final_ckpt_name: str = "final_memory_checkpoint.pt" |
| final_full_ckpt_name: str = "final_full_checkpoint.pt" |
|
|
| seed: int = 42 |
|
|
|
|
| |
| |
| |
|
|
|
|
| class BABILongDataset(Dataset): |
| def __init__( |
| self, |
| data_path: str, |
| tokenizer, |
| max_length: int = 32768, |
| answer_reserve_tokens: int = 64, |
| label_prefix_tokens: int = 0, |
| max_samples: Optional[int] = None, |
| ): |
| self.tokenizer = tokenizer |
| self.max_length = max_length |
| self.answer_reserve_tokens = answer_reserve_tokens |
| self.label_prefix_tokens = int(label_prefix_tokens) |
|
|
| logger.info(f"Loading dataset: {data_path}") |
| with open(data_path, "r") as f: |
| self.data = json.load(f) |
|
|
| if max_samples: |
| self.data = self.data[:max_samples] |
|
|
| logger.info(f"Dataset size: {len(self.data)}") |
|
|
| def __len__(self): |
| return len(self.data) |
|
|
| def __getitem__(self, idx): |
| item = self.data[idx] |
| text = f"{item['input']}\n\nQuestion: {item['question']}\nAnswer:" |
| target = item["target"] |
|
|
| pad_id = self.tokenizer.pad_token_id or 0 |
| reserve = int(self.answer_reserve_tokens) |
|
|
| prompt_ids = self.tokenizer( |
| text, |
| max_length=max(self.max_length - reserve, 1), |
| truncation=True, |
| add_special_tokens=True, |
| return_tensors="pt", |
| ).input_ids.squeeze(0) |
|
|
| answer_ids = self.tokenizer( |
| f" {target}", |
| add_special_tokens=False, |
| return_tensors="pt", |
| ).input_ids.squeeze(0) |
|
|
| available = max(self.max_length - prompt_ids.numel(), 0) |
| answer_ids = answer_ids[:available] |
|
|
| input_ids = torch.cat([prompt_ids, answer_ids], dim=0)[: self.max_length] |
|
|
| labels = torch.full_like(input_ids, fill_value=-100) |
| if answer_ids.numel() > 0: |
| start = prompt_ids.numel() |
| end = min(start + answer_ids.numel(), labels.numel()) |
| labels[start:end] = input_ids[start:end] |
| if self.label_prefix_tokens > 0: |
| prefix = min(start, self.label_prefix_tokens) |
| if prefix > 0: |
| labels[start - prefix : start] = input_ids[start - prefix : start] |
|
|
| seq_len = input_ids.numel() |
| if seq_len < self.max_length: |
| pad_len = self.max_length - seq_len |
| input_ids = F.pad(input_ids, (0, pad_len), value=int(pad_id)) |
| labels = F.pad(labels, (0, pad_len), value=-100) |
| attention_mask = torch.cat( |
| [torch.ones(seq_len, dtype=torch.long), torch.zeros(pad_len, dtype=torch.long)], |
| dim=0, |
| ) |
| else: |
| attention_mask = torch.ones(self.max_length, dtype=torch.long) |
|
|
| return { |
| "input_ids": input_ids.to(dtype=torch.long), |
| "labels": labels.to(dtype=torch.long), |
| "attention_mask": attention_mask, |
| } |
|
|
|
|
| def collate_fn(batch): |
| keys = batch[0].keys() |
| return {k: torch.stack([b[k] for b in batch], dim=0) for k in keys} |
|
|
|
|
| |
| |
| |
|
|
|
|
| class QwenBaselineForBABILongV4(nn.Module): |
| """ |
| Baseline wrapper that mirrors v4's chunk streaming + loss computation, |
| but WITHOUT any Titans memory integration. |
| |
| Trainable: embed_tokens + lm_head |
| Frozen: all transformer layers |
| """ |
|
|
| def __init__(self, qwen_model, config: TrainingConfig): |
| super().__init__() |
| self.qwen = qwen_model |
| self.config = config |
| self.hidden_size = qwen_model.config.hidden_size |
| self.num_attention_heads = qwen_model.config.num_attention_heads |
|
|
| self._freeze_backbone() |
|
|
| logger.info("[QwenBaselineForBABILongV4] Initialized (NO TITANS)") |
| logger.info("Trainable: embed_tokens + lm_head | Frozen: transformer layers") |
| logger.info(f" - hidden_size: {self.hidden_size}") |
| logger.info(f" - num_attention_heads: {self.num_attention_heads}") |
| logger.info(f" - chunk_size: {config.chunk_size}") |
| logger.info(f" - chunkwise_backward: {config.chunkwise_backward}") |
|
|
| def _freeze_backbone(self): |
| """ |
| Freeze Qwen transformer layers, keep embed_tokens + lm_head trainable. |
| Also untie lm_head from embed_tokens if they share weights (same as v4). |
| """ |
| |
| if hasattr(self.qwen, "lm_head") and hasattr(self.qwen, "model") and hasattr(self.qwen.model, "embed_tokens"): |
| lm_head_weight = self.qwen.lm_head.weight |
| embed_weight = self.qwen.model.embed_tokens.weight |
| try: |
| has_tied_weights = lm_head_weight.data_ptr() == embed_weight.data_ptr() |
| except Exception: |
| has_tied_weights = False |
|
|
| if has_tied_weights: |
| logger.info("[baseline v4] Detected tied weights - untying lm_head from embed_tokens") |
| new_lm_head = nn.Linear( |
| self.qwen.lm_head.in_features, |
| self.qwen.lm_head.out_features, |
| bias=self.qwen.lm_head.bias is not None, |
| device=lm_head_weight.device, |
| dtype=lm_head_weight.dtype, |
| ) |
| with torch.no_grad(): |
| new_lm_head.weight.copy_(lm_head_weight) |
| if self.qwen.lm_head.bias is not None and new_lm_head.bias is not None: |
| new_lm_head.bias.copy_(self.qwen.lm_head.bias) |
| self.qwen.lm_head = new_lm_head |
| logger.info(f"[baseline v4] Created independent lm_head: {new_lm_head.weight.shape}") |
|
|
| |
| for _, p in self.named_parameters(): |
| p.requires_grad = False |
|
|
| |
| if hasattr(self.qwen, "model") and hasattr(self.qwen.model, "embed_tokens"): |
| for p in self.qwen.model.embed_tokens.parameters(): |
| p.requires_grad = True |
| else: |
| emb = self.qwen.get_input_embeddings() |
| if emb is not None: |
| for p in emb.parameters(): |
| p.requires_grad = True |
|
|
| |
| if hasattr(self.qwen, "lm_head"): |
| for p in self.qwen.lm_head.parameters(): |
| p.requires_grad = True |
| else: |
| out_emb = self.qwen.get_output_embeddings() |
| if out_emb is not None: |
| for p in out_emb.parameters(): |
| p.requires_grad = True |
|
|
| |
| frozen_count = 0 |
| trainable_count = 0 |
| embed_count = 0 |
| lm_head_count = 0 |
| for name, param in self.named_parameters(): |
| if param.requires_grad: |
| trainable_count += 1 |
| if "embed_tokens" in name: |
| embed_count += 1 |
| logger.info(f"[baseline v4] embed_tokens trainable: {name}") |
| elif "lm_head" in name: |
| lm_head_count += 1 |
| logger.info(f"[baseline v4] lm_head trainable: {name}") |
| else: |
| frozen_count += 1 |
|
|
| logger.info(f"[baseline v4] Frozen {frozen_count} parameters") |
| logger.info(f"[baseline v4] Trainable {trainable_count} parameters (embed: {embed_count} + lm_head: {lm_head_count})") |
|
|
| def _split_into_chunks(self, tensor: torch.Tensor, chunk_size: int): |
| seq_len = tensor.shape[1] |
| chunks = [] |
| for start in range(0, seq_len, chunk_size): |
| end = min(start + chunk_size, seq_len) |
| chunks.append((start, end, tensor[:, start:end])) |
| return chunks |
|
|
| def _process_chunk( |
| self, |
| chunk_ids: torch.Tensor, |
| chunk_attention_mask: Optional[torch.Tensor] = None, |
| ) -> torch.Tensor: |
| if hasattr(self.qwen, "model") and hasattr(self.qwen.model, "embed_tokens"): |
| token_embeds = self.qwen.model.embed_tokens(chunk_ids) |
| else: |
| token_embeds = self.qwen.get_input_embeddings()(chunk_ids) |
|
|
| outputs = self.qwen.model( |
| inputs_embeds=token_embeds, |
| attention_mask=chunk_attention_mask, |
| use_cache=False, |
| output_hidden_states=False, |
| return_dict=True, |
| ) |
| return outputs.last_hidden_state |
|
|
| def forward( |
| self, |
| input_ids: torch.Tensor, |
| attention_mask: Optional[torch.Tensor] = None, |
| labels: Optional[torch.Tensor] = None, |
| return_pred_tokens: bool = False, |
| topk: int = 0, |
| chunk_start: Optional[int] = None, |
| chunk_end: Optional[int] = None, |
| reset_mem_state: bool = False, |
| ) -> Dict[str, torch.Tensor]: |
| |
| if chunk_start is not None or chunk_end is not None: |
| start = 0 if chunk_start is None else int(chunk_start) |
| end = int(chunk_end) if chunk_end is not None else None |
| return self._forward_single_chunk( |
| input_ids=input_ids, |
| attention_mask=attention_mask, |
| labels=labels, |
| chunk_start=start, |
| chunk_end=end, |
| ) |
|
|
| |
| batch_size, _ = input_ids.shape |
| chunk_size = self.config.chunk_size |
| chunks = self._split_into_chunks(input_ids, chunk_size) |
|
|
| loss_fct_sum = nn.CrossEntropyLoss(reduction="sum") |
| total_loss_sum = None |
| total_loss_tokens = 0 |
|
|
| topk_correct = None |
| topk_total = None |
|
|
| pred_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)] |
| target_tokens_by_sample: List[List[int]] = [[] for _ in range(batch_size)] |
|
|
| if topk and topk > 0: |
| device = input_ids.device |
| topk_correct = torch.tensor(0.0, device=device, dtype=torch.float32) |
| topk_total = torch.tensor(0.0, device=device, dtype=torch.float32) |
|
|
| for start, end, _ in chunks: |
| proc_start = max(0, start - 1) |
| chunk_ids = input_ids[:, proc_start:end] |
| chunk_labels = labels[:, proc_start:end] if labels is not None else None |
| chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None |
|
|
| hidden_full = self._process_chunk(chunk_ids, chunk_mask) |
|
|
| if chunk_labels is not None and (chunk_labels != -100).any(): |
| chunk_labels_local = chunk_labels.to(device=hidden_full.device) |
| shift_hidden = hidden_full[:, :-1, :].contiguous() |
| shift_labels = chunk_labels_local[:, 1:].contiguous() |
|
|
| valid = shift_labels != -100 |
| if valid.any(): |
| hs = shift_hidden[valid] |
| targets = shift_labels[valid] |
|
|
| hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0) |
| logits = self.qwen.lm_head(hs) |
| logits = logits.float() |
| logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0) |
| targets = targets.to(device=logits.device) |
|
|
| chunk_loss_sum = loss_fct_sum(logits, targets) |
| total_loss_sum = chunk_loss_sum if total_loss_sum is None else (total_loss_sum + chunk_loss_sum) |
| total_loss_tokens += targets.numel() |
|
|
| if topk and topk > 0: |
| k = min(int(topk), logits.shape[-1]) |
| topk_ids = torch.topk(logits, k=k, dim=-1).indices |
| correct = (topk_ids == targets.unsqueeze(-1)).any(dim=-1) |
| topk_correct = topk_correct + correct.float().sum() |
| topk_total = topk_total + torch.tensor(float(targets.numel()), device=topk_total.device) |
|
|
| if return_pred_tokens: |
| idx = valid.nonzero(as_tuple=False) |
| pred_flat = torch.argmax(logits, dim=-1).detach().to("cpu", dtype=torch.long).tolist() |
| tgt_flat = targets.detach().to("cpu", dtype=torch.long).tolist() |
| b_idx_flat = idx[:, 0].detach().to("cpu", dtype=torch.long).tolist() |
|
|
| for i, b_idx in enumerate(b_idx_flat): |
| pred_tokens_by_sample[b_idx].append(int(pred_flat[i])) |
| target_tokens_by_sample[b_idx].append(int(tgt_flat[i])) |
|
|
| if total_loss_sum is None or total_loss_tokens == 0: |
| device = next(self.qwen.parameters()).device |
| loss = torch.zeros((), device=device, dtype=torch.float32) |
| else: |
| loss = total_loss_sum / total_loss_tokens |
|
|
| out: Dict[str, torch.Tensor] = {"loss": loss} |
|
|
| if return_pred_tokens: |
| lengths = torch.tensor([len(x) for x in target_tokens_by_sample], dtype=torch.long) |
| max_len = int(lengths.max().item()) if lengths.numel() > 0 else 0 |
| if max_len > 0: |
| pred_mat = torch.full((batch_size, max_len), -1, dtype=torch.long) |
| tgt_mat = torch.full((batch_size, max_len), -1, dtype=torch.long) |
| for b in range(batch_size): |
| L = int(lengths[b].item()) |
| if L > 0: |
| pred_mat[b, :L] = torch.tensor(pred_tokens_by_sample[b], dtype=torch.long) |
| tgt_mat[b, :L] = torch.tensor(target_tokens_by_sample[b], dtype=torch.long) |
| else: |
| pred_mat = torch.empty((batch_size, 0), dtype=torch.long) |
| tgt_mat = torch.empty((batch_size, 0), dtype=torch.long) |
| out["pred_ids"] = pred_mat |
| out["target_ids"] = tgt_mat |
| out["target_lengths"] = lengths |
|
|
| if topk and topk > 0 and topk_correct is not None and topk_total is not None: |
| out["topk_correct"] = topk_correct |
| out["topk_total"] = topk_total |
|
|
| return out |
|
|
| def _forward_single_chunk( |
| self, |
| input_ids: torch.Tensor, |
| attention_mask: Optional[torch.Tensor], |
| labels: Optional[torch.Tensor], |
| chunk_start: int, |
| chunk_end: Optional[int], |
| ) -> Dict[str, torch.Tensor]: |
| seq_len = input_ids.shape[1] |
| end = chunk_end if chunk_end is not None else min(chunk_start + self.config.chunk_size, seq_len) |
| end = min(int(end), seq_len) |
| start = max(0, int(chunk_start)) |
|
|
| proc_start = max(0, start - 1) |
| chunk_ids = input_ids[:, proc_start:end] |
| chunk_labels = labels[:, proc_start:end] if labels is not None else None |
| chunk_mask = attention_mask[:, proc_start:end] if attention_mask is not None else None |
|
|
| hidden_full = self._process_chunk(chunk_ids, chunk_mask) |
|
|
| loss_fct_sum = nn.CrossEntropyLoss(reduction="sum") |
| total_loss_sum = None |
| total_loss_tokens = 0 |
|
|
| if chunk_labels is not None and (chunk_labels != -100).any(): |
| chunk_labels_local = chunk_labels.to(device=hidden_full.device) |
| shift_hidden = hidden_full[:, :-1, :].contiguous() |
| shift_labels = chunk_labels_local[:, 1:].contiguous() |
|
|
| valid = shift_labels != -100 |
| if valid.any(): |
| hs = shift_hidden[valid] |
| targets = shift_labels[valid] |
|
|
| hs = torch.nan_to_num(hs.float(), nan=0.0, posinf=0.0, neginf=0.0) |
| logits = self.qwen.lm_head(hs) |
| logits = logits.float() |
| logits = torch.nan_to_num(logits, nan=0.0, posinf=0.0, neginf=0.0) |
| targets = targets.to(device=logits.device) |
|
|
| total_loss_sum = loss_fct_sum(logits, targets) |
| total_loss_tokens = targets.numel() |
|
|
| if total_loss_sum is None: |
| total_loss_sum = hidden_full.float().sum() * 0.0 |
|
|
| return { |
| "loss_sum": total_loss_sum, |
| "loss_tokens": total_loss_tokens, |
| "has_grad": True, |
| } |
|
|
| def get_param_groups(self, config: TrainingConfig): |
| |
| embed_mod = None |
| if hasattr(self.qwen, "model") and hasattr(self.qwen.model, "embed_tokens"): |
| embed_mod = self.qwen.model.embed_tokens |
| else: |
| embed_mod = self.qwen.get_input_embeddings() |
|
|
| head_mod = self.qwen.lm_head if hasattr(self.qwen, "lm_head") else self.qwen.get_output_embeddings() |
|
|
| embed_params = [p for p in (embed_mod.parameters() if embed_mod is not None else []) if p.requires_grad] |
| lm_head_params = [p for p in (head_mod.parameters() if head_mod is not None else []) if p.requires_grad] |
|
|
| |
| seen = set() |
|
|
| def _uniq(params): |
| out = [] |
| for p in params: |
| pid = id(p) |
| if pid in seen: |
| continue |
| out.append(p) |
| seen.add(pid) |
| return out |
|
|
| embed_params = _uniq(embed_params) |
| lm_head_params = _uniq(lm_head_params) |
|
|
| param_groups = [] |
| if len(embed_params) > 0: |
| param_groups.append( |
| { |
| "params": embed_params, |
| "lr": config.lr_embed, |
| "weight_decay": config.weight_decay, |
| "name": "embed_tokens", |
| } |
| ) |
| if len(lm_head_params) > 0: |
| param_groups.append( |
| { |
| "params": lm_head_params, |
| "lr": config.lr_lm_head, |
| "weight_decay": config.weight_decay, |
| "name": "lm_head", |
| } |
| ) |
|
|
| logger.info(f"[baseline v4 Param groups] embed_tokens={len(embed_params)}, lm_head={len(lm_head_params)}") |
| return param_groups |
|
|
|
|
| |
| |
| |
|
|
|
|
| def init_distributed() -> tuple: |
| if "RANK" not in os.environ or "WORLD_SIZE" not in os.environ: |
| return False, 0, 0, 1 |
|
|
| rank = int(os.environ["RANK"]) |
| world_size = int(os.environ["WORLD_SIZE"]) |
| local_rank = int(os.environ.get("LOCAL_RANK", 0)) |
|
|
| if not dist.is_available(): |
| raise RuntimeError("torch.distributed not available") |
|
|
| if not dist.is_initialized(): |
| dist.init_process_group(backend="nccl", init_method="env://") |
|
|
| torch.cuda.set_device(local_rank) |
| return True, rank, local_rank, world_size |
|
|
|
|
| def cleanup_distributed(): |
| if dist.is_available() and dist.is_initialized(): |
| dist.barrier() |
| dist.destroy_process_group() |
|
|
|
|
| def unwrap_model(model: nn.Module) -> nn.Module: |
| if hasattr(model, "module"): |
| return model.module |
| if hasattr(model, "_fsdp_wrapped_module"): |
| wrapped = getattr(model, "_fsdp_wrapped_module", None) |
| if wrapped is not None and hasattr(wrapped, "module"): |
| return wrapped.module |
| return model |
|
|
|
|
| def is_fsdp_model(model: nn.Module) -> bool: |
| try: |
| from torch.distributed.fsdp import FullyShardedDataParallel as FSDP |
|
|
| return isinstance(model, FSDP) |
| except Exception: |
| return False |
|
|
|
|
| def manual_all_reduce_gradients(model: nn.Module, world_size: int) -> None: |
| """ |
| Manually synchronize gradients across GPUs without DDP. |
| Kept identical to v4 to match multi-GPU strategy under cross-chunk graphs. |
| """ |
| if world_size <= 1: |
| return |
|
|
| grads_to_reduce = [] |
| for param in model.parameters(): |
| if param.grad is not None: |
| grads_to_reduce.append(param.grad) |
|
|
| if len(grads_to_reduce) == 0: |
| return |
|
|
| total_numel = sum(g.numel() for g in grads_to_reduce) |
| flat_grads = torch.zeros( |
| total_numel, |
| dtype=grads_to_reduce[0].dtype, |
| device=grads_to_reduce[0].device, |
| ) |
|
|
| offset = 0 |
| for grad in grads_to_reduce: |
| numel = grad.numel() |
| flat_grads[offset : offset + numel] = grad.view(-1) |
| offset += numel |
|
|
| dist.all_reduce(flat_grads, op=dist.ReduceOp.SUM) |
| flat_grads.div_(world_size) |
|
|
| offset = 0 |
| for grad in grads_to_reduce: |
| numel = grad.numel() |
| grad.copy_(flat_grads[offset : offset + numel].view_as(grad)) |
| offset += numel |
|
|
|
|
| |
| |
| |
|
|
|
|
| class Trainer: |
| def __init__( |
| self, |
| model: nn.Module, |
| train_dataloader: DataLoader, |
| eval_dataloader: DataLoader, |
| config: TrainingConfig, |
| rank: int = 0, |
| world_size: int = 1, |
| is_distributed: bool = False, |
| tokenizer=None, |
| use_manual_grad_sync: bool = False, |
| ): |
| self.model = model |
| self.train_dataloader = train_dataloader |
| self.eval_dataloader = eval_dataloader |
| self.config = config |
| self.device = next(model.parameters()).device |
| self.rank = rank |
| self.world_size = world_size |
| self.is_distributed = is_distributed |
| self.is_main_process = rank == 0 |
| self.tokenizer = tokenizer |
| self.use_manual_grad_sync = use_manual_grad_sync |
|
|
| base_model = unwrap_model(self.model) |
| param_groups = base_model.get_param_groups(config) |
| self.optimizer = AdamW(param_groups) |
|
|
| total_steps = math.ceil( |
| (len(train_dataloader) * config.num_epochs) / max(config.gradient_accumulation_steps, 1) |
| ) |
| self.scheduler = CosineAnnealingLR(self.optimizer, T_max=total_steps, eta_min=1e-7) |
|
|
| self.scaler = torch.cuda.amp.GradScaler(enabled=config.fp16) |
| self.global_step = 0 |
|
|
| def _get_group_lr(self, group_name: str) -> Optional[float]: |
| for group in self.optimizer.param_groups: |
| if group.get("name") == group_name: |
| return group.get("lr") |
| return None |
|
|
| def train(self): |
| self.model.train() |
| if self.is_main_process: |
| logger.info("=" * 60) |
| logger.info("Starting baseline v4 training (NO TITANS, FROZEN backbone)") |
| logger.info("=" * 60) |
|
|
| last_epoch_loss = None |
| for epoch in range(self.config.num_epochs): |
| sampler = getattr(self.train_dataloader, "sampler", None) |
| if sampler is not None and hasattr(sampler, "set_epoch"): |
| sampler.set_epoch(epoch) |
| if self.is_main_process: |
| logger.info(f"Epoch {epoch + 1}/{self.config.num_epochs}") |
|
|
| epoch_loss = 0.0 |
| num_batches = 0 |
|
|
| pbar = self.train_dataloader |
| if self.is_main_process: |
| pbar = tqdm( |
| self.train_dataloader, |
| desc=f"Epoch {epoch + 1}/{self.config.num_epochs}", |
| leave=False, |
| dynamic_ncols=True, |
| ) |
|
|
| for step, batch in enumerate(pbar): |
| batch = {k: v.to(self.device) for k, v in batch.items()} |
|
|
| ga = max(self.config.gradient_accumulation_steps, 1) |
| sync_gradients = ((step + 1) % ga == 0) |
| amp_enabled = self.config.fp16 or self.config.bf16 |
| amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16 |
|
|
| with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype): |
| if self.config.chunkwise_backward: |
| labels = batch.get("labels") |
| if labels is not None: |
| total_tokens = int((labels[:, 1:] != -100).sum().item()) |
| else: |
| total_tokens = 0 |
| loss_scale = 0.0 if total_tokens == 0 else (1.0 / total_tokens / ga) |
|
|
| seq_len = batch["input_ids"].shape[1] |
| chunk_size = int(self.config.chunk_size) |
| chunk_ranges = [ |
| (start, min(start + chunk_size, seq_len)) for start in range(0, seq_len, chunk_size) |
| ] |
| raw_loss_sum = None |
|
|
| for idx, (start, end) in enumerate(chunk_ranges): |
| is_last_chunk = idx == (len(chunk_ranges) - 1) |
| sync_chunk = sync_gradients and is_last_chunk |
|
|
| use_no_sync = ( |
| self.is_distributed |
| and not sync_chunk |
| and not self.use_manual_grad_sync |
| and hasattr(self.model, "no_sync") |
| ) |
| chunk_ctx = self.model.no_sync if use_no_sync else nullcontext |
| with chunk_ctx(): |
| outputs = self.model( |
| input_ids=batch["input_ids"], |
| attention_mask=batch["attention_mask"], |
| labels=labels, |
| chunk_start=start, |
| chunk_end=end, |
| reset_mem_state=(idx == 0), |
| ) |
| chunk_loss_sum = outputs["loss_sum"] |
| raw_loss_sum = chunk_loss_sum.detach() if raw_loss_sum is None else (raw_loss_sum + chunk_loss_sum.detach()) |
|
|
| scaled_loss = chunk_loss_sum * float(loss_scale) |
| if self.config.fp16: |
| self.scaler.scale(scaled_loss).backward() |
| else: |
| scaled_loss.backward() |
|
|
| if raw_loss_sum is None or total_tokens == 0: |
| raw_loss = torch.zeros((), device=self.device, dtype=torch.float32) |
| else: |
| raw_loss = raw_loss_sum / total_tokens |
| loss = raw_loss / ga |
| else: |
| use_no_sync = ( |
| self.is_distributed |
| and not sync_gradients |
| and not self.use_manual_grad_sync |
| and hasattr(self.model, "no_sync") |
| ) |
| ctx = self.model.no_sync if use_no_sync else nullcontext |
| with ctx(): |
| outputs = self.model( |
| input_ids=batch["input_ids"], |
| attention_mask=batch["attention_mask"], |
| labels=batch["labels"], |
| ) |
| raw_loss = outputs["loss"] |
| loss = raw_loss / ga |
|
|
| if self.config.fp16: |
| self.scaler.scale(loss).backward() |
| else: |
| loss.backward() |
|
|
| epoch_loss += raw_loss.detach().float().item() |
| num_batches += 1 |
|
|
| if sync_gradients: |
| grad_norm = None |
|
|
| if self.use_manual_grad_sync and self.world_size > 1: |
| if self.config.fp16: |
| self.scaler.unscale_(self.optimizer) |
| manual_all_reduce_gradients(self.model, self.world_size) |
|
|
| if self.config.fp16: |
| if not self.use_manual_grad_sync: |
| self.scaler.unscale_(self.optimizer) |
| grad_norm = torch.nn.utils.clip_grad_norm_( |
| self.model.parameters(), self.config.max_grad_norm |
| ) |
| self.scaler.step(self.optimizer) |
| self.scaler.update() |
| else: |
| grad_norm = torch.nn.utils.clip_grad_norm_( |
| self.model.parameters(), self.config.max_grad_norm |
| ) |
| self.optimizer.step() |
|
|
| self.scheduler.step() |
| self.optimizer.zero_grad(set_to_none=True) |
| self.global_step += 1 |
|
|
| if self.is_main_process: |
| avg_loss = epoch_loss / max(num_batches, 1) |
| pbar.set_postfix({"gstep": self.global_step, "loss": f"{avg_loss:.4f}"}) |
|
|
| if self.global_step % self.config.logging_steps == 0 and self.is_main_process: |
| lr_embed = self._get_group_lr("embed_tokens") or 0.0 |
| lr_lm_head = self._get_group_lr("lm_head") or 0.0 |
| grad_note = "" |
| if self.config.debug_grad_norm and grad_norm is not None: |
| grad_note = f" | grad_norm={float(grad_norm):.4f}" |
| logger.info( |
| f"Step {self.global_step} | loss={epoch_loss / max(num_batches, 1):.4f} | " |
| f"lr_embed={lr_embed:.2e} | lr_lm_head={lr_lm_head:.2e}{grad_note}" |
| ) |
|
|
| if self.global_step % self.config.eval_steps == 0: |
| eval_metrics = self.evaluate() |
| if self.is_main_process: |
| ppl = float(math.exp(min(20.0, eval_metrics["loss"]))) |
| logger.info( |
| f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl={ppl:.3f}, " |
| f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| ) |
| self.model.train() |
|
|
| avg_epoch_loss = epoch_loss / max(num_batches, 1) |
| if self.is_distributed: |
| t = torch.tensor(avg_epoch_loss, device=self.device, dtype=torch.float32) |
| dist.all_reduce(t, op=dist.ReduceOp.SUM) |
| avg_epoch_loss = (t / self.world_size).item() |
|
|
| if self.is_main_process: |
| logger.info(f"Epoch {epoch + 1} done, avg loss={avg_epoch_loss:.4f}") |
| last_epoch_loss = avg_epoch_loss |
|
|
| eval_metrics = self.evaluate() |
| if self.is_main_process: |
| logger.info( |
| f"[EPOCH {epoch + 1} EVAL] " |
| f"eval_loss={eval_metrics['loss']:.4f}, " |
| f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| ) |
| self._append_eval_metrics( |
| eval_metrics, |
| phase="epoch", |
| epoch=int(epoch + 1), |
| train_avg_loss=avg_epoch_loss, |
| ) |
| self.model.train() |
|
|
| if self.is_main_process: |
| logger.info("Training done, final evaluation") |
|
|
| final_eval = self.evaluate(print_examples=int(self.config.final_eval_print_examples)) |
| if self.is_main_process: |
| ppl = float(math.exp(min(20.0, final_eval["loss"]))) |
| logger.info( |
| f"[FINAL EVAL] loss={final_eval['loss']:.4f}, ppl={ppl:.3f}, " |
| f"em_acc={final_eval['em_acc'] * 100:.2f}%, " |
| f"tok_acc={final_eval['tok_acc'] * 100:.2f}%" |
| ) |
| logger.info("Saving final checkpoint") |
|
|
| self._append_eval_metrics( |
| final_eval, |
| phase="final", |
| epoch=int(self.config.num_epochs), |
| train_avg_loss=last_epoch_loss, |
| ) |
| self.save_final_checkpoint() |
|
|
| @torch.no_grad() |
| def evaluate(self, print_examples: int = 0) -> Dict[str, float]: |
| self.model.eval() |
| total_loss = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| total_batches = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
|
|
| total_tok_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| total_tok_total = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| total_em_correct = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| total_em_total = torch.tensor(0.0, device=self.device, dtype=torch.float32) |
| printed = 0 |
|
|
| for batch in self.eval_dataloader: |
| batch = {k: v.to(self.device) for k, v in batch.items()} |
| amp_enabled = self.config.fp16 or self.config.bf16 |
| amp_dtype = torch.float16 if self.config.fp16 else torch.bfloat16 |
| with torch.amp.autocast(device_type=self.device.type, enabled=amp_enabled, dtype=amp_dtype): |
| outputs = self.model( |
| input_ids=batch["input_ids"], |
| attention_mask=batch["attention_mask"], |
| labels=batch["labels"], |
| return_pred_tokens=True, |
| topk=int(self.config.eval_topk) if self.config.eval_topk else 0, |
| ) |
|
|
| if torch.isfinite(outputs["loss"]): |
| total_loss += outputs["loss"].detach().float() |
| total_batches += 1.0 |
|
|
| pred_ids = outputs.get("pred_ids", None) |
| target_ids = outputs.get("target_ids", None) |
| lengths = outputs.get("target_lengths", None) |
|
|
| if ( |
| pred_ids is not None |
| and target_ids is not None |
| and lengths is not None |
| and pred_ids.ndim == 2 |
| and target_ids.ndim == 2 |
| and lengths.ndim == 1 |
| and pred_ids.shape == target_ids.shape |
| and pred_ids.shape[0] == lengths.shape[0] |
| ): |
| pred_cpu = pred_ids.to("cpu", dtype=torch.long) |
| tgt_cpu = target_ids.to("cpu", dtype=torch.long) |
| len_cpu = lengths.to("cpu", dtype=torch.long) |
|
|
| for i in range(int(len_cpu.shape[0])): |
| L = int(len_cpu[i].item()) |
| if L <= 0: |
| continue |
| p = pred_cpu[i, :L] |
| t = tgt_cpu[i, :L] |
|
|
| total_tok_correct += torch.tensor( |
| float((p == t).sum().item()), device=self.device, dtype=torch.float32 |
| ) |
| total_tok_total += torch.tensor(float(L), device=self.device, dtype=torch.float32) |
|
|
| if self.tokenizer is not None: |
| pred_text = self.tokenizer.decode(p.tolist(), skip_special_tokens=True).strip() |
| tgt_text = self.tokenizer.decode(t.tolist(), skip_special_tokens=True).strip() |
| em = float(pred_text == tgt_text) |
| total_em_correct += torch.tensor(em, device=self.device, dtype=torch.float32) |
| total_em_total += torch.tensor(1.0, device=self.device, dtype=torch.float32) |
|
|
| if self.is_main_process and printed < print_examples: |
| logger.info( |
| f"[EVAL SAMPLE] pred={repr(pred_text)} | label={repr(tgt_text)} | match={bool(em)}" |
| ) |
| printed += 1 |
|
|
| if self.is_distributed: |
| dist.all_reduce(total_loss, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_batches, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_tok_correct, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_tok_total, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_em_correct, op=dist.ReduceOp.SUM) |
| dist.all_reduce(total_em_total, op=dist.ReduceOp.SUM) |
|
|
| avg_loss = (total_loss / total_batches.clamp(min=1.0)).item() |
| tok_acc = (total_tok_correct / total_tok_total.clamp(min=1.0)).item() |
| em_acc = (total_em_correct / total_em_total.clamp(min=1.0)).item() |
|
|
| return {"loss": avg_loss, "tok_acc": tok_acc, "em_acc": em_acc} |
|
|
| def _append_eval_metrics( |
| self, |
| metrics: Dict[str, float], |
| *, |
| phase: str, |
| epoch: Optional[int], |
| train_avg_loss: Optional[float], |
| ) -> None: |
| if not self.is_main_process: |
| return |
| os.makedirs(self.config.output_dir, exist_ok=True) |
| record = { |
| "phase": phase, |
| "epoch": epoch, |
| "global_step": int(self.global_step), |
| "train_avg_loss": None if train_avg_loss is None else float(train_avg_loss), |
| "eval_loss": float(metrics.get("loss", 0.0)), |
| "em_acc_pct": float(metrics.get("em_acc", 0.0) * 100.0), |
| "tok_acc_pct": float(metrics.get("tok_acc", 0.0) * 100.0), |
| } |
| metrics_path = os.path.join(self.config.output_dir, "eval_metrics.jsonl") |
| with open(metrics_path, "a") as f: |
| f.write(json.dumps(record) + "\n") |
|
|
| def save_final_checkpoint(self): |
| ckpt_path = os.path.join(self.config.output_dir, self.config.final_ckpt_name) |
| base_model = unwrap_model(self.model) |
|
|
| |
| trainable_sd = { |
| name: p.detach().cpu() |
| for name, p in base_model.named_parameters() |
| if p.requires_grad and (("embed_tokens" in name) or ("lm_head" in name)) |
| } |
|
|
| if is_fsdp_model(self.model) and len(trainable_sd) == 0: |
| from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, StateDictType, FullStateDictConfig |
|
|
| full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
| with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg): |
| full_sd = self.model.state_dict() |
| trainable_sd = {k: v for k, v in full_sd.items() if ("embed_tokens" in k) or ("lm_head" in k)} |
|
|
| if self.is_main_process: |
| torch.save( |
| {"memory_state_dict": trainable_sd, "global_step": self.global_step, "config": asdict(self.config)}, |
| ckpt_path, |
| ) |
| logger.info(f"Saved trainable checkpoint: {ckpt_path}") |
| if self.is_distributed: |
| dist.barrier() |
|
|
| if self.config.save_full_checkpoint: |
| full_ckpt_path = os.path.join(self.config.output_dir, self.config.final_full_ckpt_name) |
| if is_fsdp_model(self.model): |
| from torch.distributed.fsdp import ( |
| FullyShardedDataParallel as FSDP, |
| StateDictType, |
| FullStateDictConfig, |
| ) |
|
|
| full_cfg = FullStateDictConfig(offload_to_cpu=True, rank0_only=True) |
| with FSDP.state_dict_type(self.model, StateDictType.FULL_STATE_DICT, full_cfg): |
| full_sd = self.model.state_dict() |
| else: |
| full_sd = unwrap_model(self.model).state_dict() |
|
|
| if self.is_main_process: |
| torch.save( |
| {"model_state_dict": full_sd, "global_step": self.global_step, "config": asdict(self.config)}, |
| full_ckpt_path, |
| ) |
| logger.info(f"Saved full checkpoint: {full_ckpt_path}") |
| if self.is_distributed: |
| dist.barrier() |
|
|
|
|
| |
| |
| |
|
|
|
|
| def main(): |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| parser = argparse.ArgumentParser(description="Qwen3 baseline v4 (NO TITANS) - Frozen Backbone Training") |
| parser.add_argument("--fsdp", action="store_true") |
| parser.add_argument("--eval_only", action="store_true") |
| parser.add_argument("--ckpt_path", type=str, default=None) |
| parser.add_argument("--max_samples", type=int, default=None) |
| parser.add_argument("--max_length", type=int, default=None) |
| parser.add_argument("--output_dir", type=str, default=None) |
| parser.add_argument("--num_epochs", type=int, default=None) |
| parser.add_argument("--eval_steps", type=int, default=None) |
| parser.add_argument("--batch_size", type=int, default=None) |
| parser.add_argument("--gradient_accumulation_steps", type=int, default=None) |
| parser.add_argument("--chunk_size", type=int, default=None) |
| parser.add_argument("--gradient_checkpointing", action="store_true") |
| parser.add_argument("--no_chunkwise_backward", action="store_true") |
| parser.add_argument("--lr_embed", type=float, default=None) |
| parser.add_argument("--lr_lm_head", type=float, default=None) |
| parser.add_argument("--debug_grad_norm", action="store_true") |
| args = parser.parse_args() |
|
|
| config = TrainingConfig() |
|
|
| |
| if args.fsdp: |
| config.use_fsdp = True |
| if args.max_samples is not None: |
| config.max_samples = args.max_samples |
| if args.max_length is not None: |
| config.max_length = int(args.max_length) |
| if args.output_dir is not None: |
| config.output_dir = args.output_dir |
| if args.num_epochs is not None: |
| config.num_epochs = args.num_epochs |
| if args.eval_steps is not None: |
| config.eval_steps = args.eval_steps |
| if args.batch_size is not None: |
| config.batch_size = int(args.batch_size) |
| if args.gradient_accumulation_steps is not None: |
| config.gradient_accumulation_steps = int(args.gradient_accumulation_steps) |
| if args.chunk_size is not None: |
| config.chunk_size = int(args.chunk_size) |
| if args.gradient_checkpointing: |
| config.gradient_checkpointing = True |
| if args.no_chunkwise_backward: |
| config.chunkwise_backward = False |
| if args.lr_embed is not None: |
| config.lr_embed = float(args.lr_embed) |
| if args.lr_lm_head is not None: |
| config.lr_lm_head = float(args.lr_lm_head) |
| if args.debug_grad_norm: |
| config.debug_grad_norm = True |
|
|
| is_distributed, rank, local_rank, world_size = init_distributed() |
| is_main = rank == 0 |
|
|
| if config.use_fsdp and config.chunkwise_backward: |
| if is_main: |
| logger.warning("chunkwise_backward is incompatible with FSDP; disabling it.") |
| config.chunkwise_backward = False |
|
|
| if is_distributed and (not config.use_fsdp): |
| if not config.ddp_find_unused_parameters: |
| config.ddp_find_unused_parameters = True |
| if is_main: |
| logger.warning("Enabling DDP find_unused_parameters.") |
|
|
| torch.manual_seed(config.seed + rank) |
|
|
| if torch.cuda.is_available(): |
| device = torch.device(f"cuda:{local_rank}" if is_distributed else "cuda") |
| else: |
| device = torch.device("cpu") |
|
|
| if torch.cuda.is_available() and config.bf16: |
| bf16_supported = False |
| try: |
| bf16_supported = torch.cuda.is_bf16_supported() |
| except Exception: |
| bf16_supported = False |
| if not bf16_supported: |
| if is_main: |
| logger.warning("bf16 not supported; falling back to fp16.") |
| config.bf16 = False |
| if not config.fp16: |
| config.fp16 = True |
|
|
| if torch.cuda.is_available() and getattr(config, "use_tf32", False): |
| torch.backends.cuda.matmul.allow_tf32 = True |
| torch.backends.cudnn.allow_tf32 = True |
| try: |
| torch.set_float32_matmul_precision("high") |
| except Exception: |
| pass |
|
|
| if is_main: |
| logger.info("=" * 70) |
| logger.info("Qwen3-4B baseline v4 Training (NO TITANS, FROZEN BACKBONE)") |
| logger.info("=" * 70) |
| logger.info(f"distributed={is_distributed}, world_size={world_size}") |
| logger.info(f"model_path={config.model_path}") |
| logger.info(f"data_path={config.data_path}") |
| logger.info(f"output_dir={config.output_dir}") |
| logger.info(f"max_samples={config.max_samples}") |
| logger.info(f"max_length={config.max_length}") |
| logger.info(f"num_epochs={config.num_epochs}") |
| logger.info(f"chunk_size={config.chunk_size}") |
| logger.info("Trainable: embed_tokens + lm_head") |
| logger.info("=" * 70) |
|
|
| tokenizer = AutoTokenizer.from_pretrained(config.model_path, trust_remote_code=True) |
| if tokenizer.pad_token is None: |
| tokenizer.pad_token = tokenizer.eos_token |
|
|
| |
| try: |
| import transformers |
| from transformers.utils import import_utils as _import_utils |
|
|
| def _disabled(*args, **kwargs): |
| return False |
|
|
| _import_utils.is_flash_attn_2_available = _disabled |
| if hasattr(transformers, "utils") and hasattr(transformers.utils, "is_flash_attn_2_available"): |
| transformers.utils.is_flash_attn_2_available = _disabled |
| if hasattr(_import_utils, "is_torchao_available"): |
| _import_utils.is_torchao_available = _disabled |
| if hasattr(_import_utils, "is_torchvision_available"): |
| _import_utils.is_torchvision_available = _disabled |
| except Exception as e: |
| if is_main: |
| logger.warning(f"Disable checks failed (ignored): {e}") |
|
|
| torch_dtype = torch.bfloat16 if config.bf16 else (torch.float16 if config.fp16 else torch.float32) |
|
|
| qwen_model = AutoModelForCausalLM.from_pretrained( |
| config.model_path, |
| torch_dtype=torch_dtype, |
| device_map=None, |
| trust_remote_code=True, |
| attn_implementation="sdpa", |
| low_cpu_mem_usage=True, |
| ) |
| qwen_model.to(device) |
| qwen_model.config.use_cache = False |
| if config.gradient_checkpointing and hasattr(qwen_model, "gradient_checkpointing_enable"): |
| qwen_model.gradient_checkpointing_enable() |
|
|
| train_dataset = BABILongDataset( |
| config.data_path, |
| tokenizer, |
| max_length=config.max_length, |
| answer_reserve_tokens=config.answer_reserve_tokens, |
| label_prefix_tokens=config.label_prefix_tokens, |
| max_samples=config.max_samples, |
| ) |
|
|
| train_size = int(0.9 * len(train_dataset)) |
| eval_size = len(train_dataset) - train_size |
| train_dataset, eval_dataset = torch.utils.data.random_split( |
| train_dataset, |
| [train_size, eval_size], |
| generator=torch.Generator().manual_seed(config.seed), |
| ) |
|
|
| train_sampler = None |
| eval_sampler = None |
| if is_distributed: |
| from torch.utils.data.distributed import DistributedSampler |
|
|
| train_sampler = DistributedSampler( |
| train_dataset, num_replicas=world_size, rank=rank, shuffle=True, seed=config.seed |
| ) |
| eval_sampler = DistributedSampler(eval_dataset, num_replicas=world_size, rank=rank, shuffle=False) |
|
|
| train_dataloader = DataLoader( |
| train_dataset, |
| batch_size=config.batch_size, |
| shuffle=(train_sampler is None), |
| sampler=train_sampler, |
| collate_fn=collate_fn, |
| num_workers=0, |
| ) |
| eval_dataloader = DataLoader( |
| eval_dataset, |
| batch_size=config.batch_size, |
| shuffle=False, |
| sampler=eval_sampler, |
| collate_fn=collate_fn, |
| num_workers=0, |
| ) |
|
|
| model = QwenBaselineForBABILongV4(qwen_model, config) |
| model.to(device) |
|
|
| |
| use_ddp = is_distributed and world_size > 1 |
| use_manual_grad_sync = False |
|
|
| if use_ddp and not config.chunkwise_backward: |
| if is_main: |
| logger.info("=" * 70) |
| logger.info("Cross-chunk graph with multi-GPU: using MANUAL gradient sync (NO DDP wrap)") |
| logger.info("=" * 70) |
| use_ddp = False |
| use_manual_grad_sync = True |
|
|
| if use_ddp: |
| if config.use_fsdp: |
| from functools import partial |
| from torch.distributed.fsdp import FullyShardedDataParallel as FSDP, MixedPrecision |
| from torch.distributed.fsdp.wrap import transformer_auto_wrap_policy |
| from transformers.models.qwen3.modeling_qwen3 import Qwen3DecoderLayer |
|
|
| mp_policy = MixedPrecision(param_dtype=torch_dtype, reduce_dtype=torch_dtype, buffer_dtype=torch_dtype) |
| auto_wrap = partial(transformer_auto_wrap_policy, transformer_layer_cls={Qwen3DecoderLayer}) |
|
|
| model = FSDP( |
| model, |
| auto_wrap_policy=auto_wrap, |
| mixed_precision=mp_policy, |
| device_id=torch.cuda.current_device(), |
| use_orig_params=config.fsdp_use_orig_params, |
| ) |
| else: |
| model = DDP( |
| model, |
| device_ids=[local_rank], |
| output_device=local_rank, |
| find_unused_parameters=config.ddp_find_unused_parameters, |
| ) |
|
|
| trainer = Trainer( |
| model=model, |
| train_dataloader=train_dataloader, |
| eval_dataloader=eval_dataloader, |
| config=config, |
| rank=rank, |
| world_size=world_size, |
| is_distributed=is_distributed, |
| tokenizer=tokenizer, |
| use_manual_grad_sync=use_manual_grad_sync, |
| ) |
|
|
| if args.eval_only: |
| ckpt_path = args.ckpt_path or os.path.join(config.output_dir, config.final_ckpt_name) |
| if is_main: |
| logger.info(f"eval_only: loading checkpoint: {ckpt_path}") |
| ckpt = torch.load(ckpt_path, map_location="cpu") |
|
|
| sd = ckpt.get("memory_state_dict", {}) |
| if len(sd) > 0: |
| unwrap_model(model).load_state_dict(sd, strict=False) |
|
|
| eval_metrics = trainer.evaluate() |
| if is_main: |
| ppl = float(math.exp(min(20.0, eval_metrics["loss"]))) |
| logger.info( |
| f"[EVAL] loss={eval_metrics['loss']:.4f}, ppl={ppl:.3f}, " |
| f"em_acc={eval_metrics['em_acc'] * 100:.2f}%, " |
| f"tok_acc={eval_metrics['tok_acc'] * 100:.2f}%" |
| ) |
| cleanup_distributed() |
| return |
|
|
| trainer.train() |
| cleanup_distributed() |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|
|
|