Pratyush-01 commited on
Commit
4979698
·
verified ·
1 Parent(s): 3e7e87c

loop.py: prominent W&B checkpoint logging (summary keys + table)

Browse files
Files changed (1) hide show
  1. physix/training/loop.py +136 -26
physix/training/loop.py CHANGED
@@ -134,6 +134,28 @@ def train(config: TrainingConfig) -> None:
134
  resume="allow",
135
  )
136
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  _log.info("Loading model %s with Unsloth (4-bit, LoRA-%d)", config.model_name, config.lora_r)
138
  model, tokenizer = _load_model_and_tokenizer(config)
139
  train_dataset = _build_and_format_dataset(config, tokenizer)
@@ -365,14 +387,64 @@ def _select_reward_funcs(ablation: Optional[Ablation]) -> list[object]:
365
 
366
 
367
  class _WandbCheckpointCallback(TrainerCallback):
368
- """Logs a lightweight W&B artifact reference after each Trainer checkpoint.
369
-
370
- The artifact does not re-upload model weights — it simply records the
371
- HuggingFace Hub URL of the checkpoint so the W&B run links back to it.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372
  """
373
 
374
  def __init__(self, hub_checkpoint_repo_id: str) -> None:
375
  self._repo = hub_checkpoint_repo_id
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
376
 
377
  def on_save(
378
  self,
@@ -381,43 +453,81 @@ class _WandbCheckpointCallback(TrainerCallback):
381
  control: TrainerControl,
382
  **kwargs,
383
  ) -> None:
384
- # The W&B artifact stores ONLY a URL reference back to the HF Hub
385
- # checkpoint — no model bytes are uploaded to wandb. This is purely
386
- # a convenience link in the W&B run page.
387
- #
388
- # Failure of this callback must NEVER crash training. The trainer's
389
- # own PushToHubCallback runs the actual git push asynchronously, so
390
- # at the instant `on_save` fires the Hub URL almost always 404s
391
- # (push hasn't finished yet). We try once, swallow any error, and
392
- # carry on. Loss of a wandb-side hyperlink is not a training bug.
393
  try:
394
  import wandb
395
 
396
  if wandb.run is None:
397
  return
398
  step = state.global_step
399
- artifact = wandb.Artifact(
400
- name="grpo-checkpoint",
401
- type="model",
402
- metadata={"step": step, "hub_repo": self._repo},
 
 
403
  )
404
- artifact.add_reference(
405
- f"https://huggingface.co/{self._repo}/tree/main/checkpoint-{step}"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
406
  )
407
- wandb.log_artifact(artifact, aliases=[f"step-{step}", "latest"])
408
  _log.info(
409
- "W&B artifact link logged for checkpoint-%d %s", step, self._repo
 
 
410
  )
411
- except Exception as exc: # noqa: BLE001 — see comment above
412
  _log.warning(
413
- "W&B checkpoint-link callback skipped at step %d: %s. "
414
- "Training continues; this only affects the wandb hyperlink, "
415
- "the actual checkpoint is still pushed to the HF Hub by "
416
- "the trainer's PushToHubCallback.",
417
  state.global_step,
418
  exc,
419
  )
420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
421
 
422
  def _build_grpo_config(config: TrainingConfig) -> GRPOConfig:
423
  # NOTE on "train/loss → 0" — this is expected GRPO behaviour, not a bug.
 
134
  resume="allow",
135
  )
136
 
137
+ # Pin a few high-signal pointers into the run summary right away so the
138
+ # W&B "Overview" tab shows them prominently (no scrolling, no hunting).
139
+ if config.hub_checkpoint_repo_id:
140
+ ckpt_url = f"https://huggingface.co/{config.hub_checkpoint_repo_id}"
141
+ wandb.run.summary["checkpoint/repo"] = config.hub_checkpoint_repo_id
142
+ wandb.run.summary["checkpoint/repo_url"] = ckpt_url
143
+ if config.hub_repo_id:
144
+ wandb.run.summary["model/final_repo"] = config.hub_repo_id
145
+ wandb.run.summary["model/final_url"] = (
146
+ f"https://huggingface.co/{config.hub_repo_id}"
147
+ )
148
+ if config.lora_adapter_repo:
149
+ wandb.run.summary["resume/from_adapter"] = config.lora_adapter_repo
150
+ wandb.run.summary["resume/from_url"] = (
151
+ f"https://huggingface.co/{config.lora_adapter_repo}"
152
+ )
153
+ print(
154
+ f"\n[wandb] RESUMED run — warm-started from "
155
+ f"https://huggingface.co/{config.lora_adapter_repo}\n",
156
+ flush=True,
157
+ )
158
+
159
  _log.info("Loading model %s with Unsloth (4-bit, LoRA-%d)", config.model_name, config.lora_r)
160
  model, tokenizer = _load_model_and_tokenizer(config)
161
  train_dataset = _build_and_format_dataset(config, tokenizer)
 
387
 
388
 
389
  class _WandbCheckpointCallback(TrainerCallback):
390
+ """Make checkpoints first-class in W&B.
391
+
392
+ After every Trainer save, this callback:
393
+
394
+ 1. Resolves the latest commit hash on the Hub repo (best-effort — the
395
+ trainer's own ``PushToHubCallback`` runs ``git push`` asynchronously
396
+ so we may briefly see an older commit; that is fine, it self-corrects
397
+ on the next save).
398
+ 2. Updates the W&B run summary with persistent, prominent keys
399
+ (visible in the "Overview" tab of the run):
400
+ - ``checkpoint/last_step``
401
+ - ``checkpoint/last_commit``
402
+ - ``checkpoint/repo_url``
403
+ - ``checkpoint/last_url``
404
+ 3. Logs a step-indexed scalar ``checkpoint/step`` so a chart appears
405
+ on the W&B run page (one tick per save).
406
+ 4. Maintains a running ``checkpoint_history`` ``wandb.Table`` so every
407
+ saved checkpoint is browsable as a sortable table directly on the
408
+ run page (Tables tab).
409
+ 5. Prints a banner to stdout (visible in ``hf jobs logs``) with the
410
+ direct URL — so the checkpoint is also impossible to miss in the
411
+ job logs.
412
+
413
+ No model bytes are uploaded to W&B; the actual weights live on the HF
414
+ Hub checkpoint repo. We never crash training if any of this fails.
415
  """
416
 
417
  def __init__(self, hub_checkpoint_repo_id: str) -> None:
418
  self._repo = hub_checkpoint_repo_id
419
+ self._repo_url = f"https://huggingface.co/{hub_checkpoint_repo_id}"
420
+ self._table = None # lazy: wandb may not be initialised at __init__
421
+
422
+ def on_train_begin(
423
+ self,
424
+ args: HFTrainingArguments,
425
+ state: TrainerState,
426
+ control: TrainerControl,
427
+ **kwargs,
428
+ ) -> None:
429
+ # Pin the repo URL into the run config + summary at the very start
430
+ # so the link is visible on the W&B "Overview" panel from step 0.
431
+ try:
432
+ import wandb
433
+
434
+ if wandb.run is None:
435
+ return
436
+ wandb.run.summary["checkpoint/repo_url"] = self._repo_url
437
+ wandb.run.summary["checkpoint/repo"] = self._repo
438
+ wandb.config.update(
439
+ {"checkpoint_repo_url": self._repo_url, "checkpoint_repo": self._repo},
440
+ allow_val_change=True,
441
+ )
442
+ print(
443
+ f"\n[wandb] Checkpoint repo pinned in run summary: {self._repo_url}\n",
444
+ flush=True,
445
+ )
446
+ except Exception as exc: # noqa: BLE001
447
+ _log.warning("Could not pin checkpoint repo to W&B summary: %s", exc)
448
 
449
  def on_save(
450
  self,
 
453
  control: TrainerControl,
454
  **kwargs,
455
  ) -> None:
 
 
 
 
 
 
 
 
 
456
  try:
457
  import wandb
458
 
459
  if wandb.run is None:
460
  return
461
  step = state.global_step
462
+ commit_sha = self._latest_commit_sha()
463
+ short = (commit_sha or "pending")[:8]
464
+ tree_url = (
465
+ f"{self._repo_url}/tree/{commit_sha}"
466
+ if commit_sha
467
+ else f"{self._repo_url}/tree/main"
468
  )
469
+
470
+ # 1. Persistent summary keys (top-of-run, always visible).
471
+ wandb.run.summary["checkpoint/last_step"] = step
472
+ wandb.run.summary["checkpoint/last_commit"] = commit_sha or "pending"
473
+ wandb.run.summary["checkpoint/last_url"] = tree_url
474
+
475
+ # 2. Step-indexed scalar so a small chart appears on the run page.
476
+ wandb.log({"checkpoint/step": step}, step=step)
477
+
478
+ # 3. Running history table.
479
+ if self._table is None:
480
+ self._table = wandb.Table(
481
+ columns=["step", "commit", "url", "repo"]
482
+ )
483
+ self._table.add_data(step, commit_sha or "pending", tree_url, self._repo)
484
+ # Re-log the entire table each time so the latest version shows.
485
+ wandb.log({"checkpoint_history": self._table}, step=step)
486
+
487
+ # 4. Stdout banner — also visible in `hf jobs logs`.
488
+ print(
489
+ "\n"
490
+ "================ CHECKPOINT SAVED ================\n"
491
+ f" step : {step}\n"
492
+ f" commit: {short}\n"
493
+ f" url : {tree_url}\n"
494
+ f" repo : {self._repo_url}\n"
495
+ "==================================================\n",
496
+ flush=True,
497
  )
 
498
  _log.info(
499
+ "W&B checkpoint metadata logged: step=%d commit=%s",
500
+ step,
501
+ short,
502
  )
503
+ except Exception as exc: # noqa: BLE001
504
  _log.warning(
505
+ "W&B checkpoint callback skipped at step %d: %s. "
506
+ "Training continues; the actual checkpoint is still pushed "
507
+ "to the HF Hub by the trainer's PushToHubCallback.",
 
508
  state.global_step,
509
  exc,
510
  )
511
 
512
+ def _latest_commit_sha(self) -> Optional[str]:
513
+ """Best-effort fetch of the most recent commit on the checkpoint repo.
514
+
515
+ Uses ``HfApi.list_repo_commits`` if available; returns ``None`` on
516
+ any failure. The async ``git push`` may not be done at the instant
517
+ ``on_save`` fires, so we may see the *previous* checkpoint's commit;
518
+ that's acceptable — it self-corrects on the next save.
519
+ """
520
+ try:
521
+ from huggingface_hub import HfApi
522
+
523
+ api = HfApi(token=os.environ.get("HUGGINGFACE_HUB_TOKEN"))
524
+ commits = api.list_repo_commits(repo_id=self._repo, repo_type="model")
525
+ if commits:
526
+ return commits[0].commit_id
527
+ except Exception as exc: # noqa: BLE001
528
+ _log.debug("Could not fetch latest commit sha: %s", exc)
529
+ return None
530
+
531
 
532
  def _build_grpo_config(config: TrainingConfig) -> GRPOConfig:
533
  # NOTE on "train/loss → 0" — this is expected GRPO behaviour, not a bug.