Pratyush-01 commited on
Commit
6a3d97c
·
verified ·
1 Parent(s): 16e9350

training: add early-stop callback + lr=1e-5 for next run

Browse files
Files changed (1) hide show
  1. physix/training/loop.py +79 -0
physix/training/loop.py CHANGED
@@ -97,6 +97,9 @@ class TrainingConfig(BaseModel):
97
  per_device_train_batch_size: int = 1
98
  gradient_accumulation_steps: int = 8
99
  num_steps: int = 300
 
 
 
100
  seed: int = 0
101
  instances_per_system: int = 32
102
  #: Subset of system IDs to train on. Defaults to all SUPPORTED_SYSTEMS.
@@ -176,6 +179,12 @@ def train(config: TrainingConfig) -> None:
176
  grpo_config = _build_grpo_config(config)
177
 
178
  callbacks = []
 
 
 
 
 
 
179
  if config.hub_checkpoint_repo_id:
180
  callbacks.append(_WandbCheckpointCallback(config.hub_checkpoint_repo_id))
181
  _log.info(
@@ -610,6 +619,65 @@ def _select_reward_funcs(ablation: Optional[Ablation]) -> list[object]:
610
  )
611
 
612
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
613
  class _WandbCheckpointCallback(TrainerCallback):
614
  """Make checkpoints first-class in W&B.
615
 
@@ -962,6 +1030,16 @@ def _parse_args() -> TrainingConfig:
962
  default=None,
963
  help="Path to a Trainer checkpoint directory to resume GRPO from.",
964
  )
 
 
 
 
 
 
 
 
 
 
965
  parser.add_argument("--seed", type=int, default=0)
966
 
967
  args = parser.parse_args()
@@ -999,6 +1077,7 @@ def _parse_args() -> TrainingConfig:
999
  hub_repo_id=args.hub_repo_id,
1000
  hub_checkpoint_repo_id=args.hub_checkpoint_repo_id,
1001
  resume_from_checkpoint=args.resume_from_checkpoint,
 
1002
  seed=args.seed,
1003
  )
1004
 
 
97
  per_device_train_batch_size: int = 1
98
  gradient_accumulation_steps: int = 8
99
  num_steps: int = 300
100
+ #: Stop early if ``reward_std`` stays below 0.05 for this many consecutive
101
+ #: logged steps. Set to 0 to disable early stopping.
102
+ early_stop_patience: int = 50
103
  seed: int = 0
104
  instances_per_system: int = 32
105
  #: Subset of system IDs to train on. Defaults to all SUPPORTED_SYSTEMS.
 
179
  grpo_config = _build_grpo_config(config)
180
 
181
  callbacks = []
182
+ if config.early_stop_patience > 0:
183
+ callbacks.append(_RewardConvergenceCallback(patience=config.early_stop_patience))
184
+ _log.info(
185
+ "Early stopping enabled: will stop if reward_std < 0.05 for %d consecutive steps",
186
+ config.early_stop_patience,
187
+ )
188
  if config.hub_checkpoint_repo_id:
189
  callbacks.append(_WandbCheckpointCallback(config.hub_checkpoint_repo_id))
190
  _log.info(
 
619
  )
620
 
621
 
622
+ class _RewardConvergenceCallback(TrainerCallback):
623
+ """Stop training early when the GRPO reward has converged.
624
+
625
+ Convergence criterion: ``reward_std`` (std of total reward across the
626
+ rollout batch) stays below ``min_std`` for ``patience`` consecutive
627
+ logged steps. When ``reward_std ≈ 0`` every generation scores the
628
+ same, so the GRPO advantage estimates are all zero and the policy
629
+ gradient vanishes — continuing burns compute without learning.
630
+
631
+ The callback also logs the early-stop event to W&B so the decision
632
+ is visible on the run page.
633
+ """
634
+
635
+ def __init__(self, patience: int = 50, min_std: float = 0.05) -> None:
636
+ self._patience = patience
637
+ self._min_std = min_std
638
+ self._flat_steps: int = 0
639
+
640
+ def on_log(
641
+ self,
642
+ args: HFTrainingArguments,
643
+ state: TrainerState,
644
+ control: TrainerControl,
645
+ logs: dict | None = None,
646
+ **kwargs,
647
+ ) -> None:
648
+ if not logs:
649
+ return
650
+ reward_std = logs.get("reward_std")
651
+ if reward_std is None:
652
+ return
653
+
654
+ if reward_std < self._min_std:
655
+ self._flat_steps += 1
656
+ else:
657
+ self._flat_steps = 0
658
+
659
+ if self._flat_steps >= self._patience:
660
+ step = state.global_step
661
+ msg = (
662
+ f"[early-stop] reward_std < {self._min_std} for "
663
+ f"{self._flat_steps} consecutive steps at step {step}. "
664
+ "Stopping training — policy has converged."
665
+ )
666
+ print(f"\n{msg}\n", flush=True)
667
+ _log.info(msg)
668
+ try:
669
+ import wandb
670
+ if wandb.run is not None:
671
+ wandb.run.summary["early_stop/step"] = step
672
+ wandb.run.summary["early_stop/reason"] = (
673
+ f"reward_std < {self._min_std} for {self._flat_steps} steps"
674
+ )
675
+ wandb.log({"early_stop/triggered": 1}, step=step)
676
+ except Exception as exc: # noqa: BLE001
677
+ _log.debug("Could not log early-stop event to W&B: %s", exc)
678
+ control.should_training_stop = True
679
+
680
+
681
  class _WandbCheckpointCallback(TrainerCallback):
682
  """Make checkpoints first-class in W&B.
683
 
 
1030
  default=None,
1031
  help="Path to a Trainer checkpoint directory to resume GRPO from.",
1032
  )
1033
+ parser.add_argument(
1034
+ "--early-stop-patience",
1035
+ type=int,
1036
+ default=50,
1037
+ help=(
1038
+ "Stop training early if reward_std stays below 0.05 for this many "
1039
+ "consecutive steps (policy converged, GRPO advantage ≈ 0). "
1040
+ "Set to 0 to disable."
1041
+ ),
1042
+ )
1043
  parser.add_argument("--seed", type=int, default=0)
1044
 
1045
  args = parser.parse_args()
 
1077
  hub_repo_id=args.hub_repo_id,
1078
  hub_checkpoint_repo_id=args.hub_checkpoint_repo_id,
1079
  resume_from_checkpoint=args.resume_from_checkpoint,
1080
+ early_stop_patience=args.early_stop_patience,
1081
  seed=args.seed,
1082
  )
1083