Pratyush-01 commited on
Commit
906b21d
·
verified ·
1 Parent(s): 430e433

add --system-ids flag; add job_train_single.py for damped_spring

Browse files
docs/plots/README.md ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Training Curves
2
+
3
+ PNGs in this directory are auto-generated by
4
+ `physix.training.loop._render_training_curves` at end of every GRPO run, then
5
+ mirrored from the HF model repo via `physix-train/sync-plots.sh`.
6
+
7
+ Files:
8
+
9
+ - `loss.png` — GRPO surrogate loss over training steps.
10
+ - `reward.png` — Mean reward (with ±1σ band) over training steps.
11
+ - `reward_components.png` — Per-component reward (`match`, `match_dense`,
12
+ `correctness`, `simplicity`, `format`).
13
+
14
+ To regenerate locally after a job:
15
+
16
+ ./physix-train/sync-plots.sh Pratyush-01/physix-3b-rl
physix/training/loop.py CHANGED
@@ -99,6 +99,9 @@ class TrainingConfig(BaseModel):
99
  num_steps: int = 300
100
  seed: int = 0
101
  instances_per_system: int = 32
 
 
 
102
  ablation: Optional[Ablation] = None
103
  wandb_project: str = "physix-live"
104
  wandb_run_name: Optional[str] = None
@@ -535,7 +538,7 @@ def _build_and_format_dataset(
535
  tokenizer: AutoTokenizer,
536
  ) -> Dataset:
537
  spec = DatasetSpec(
538
- system_ids=SUPPORTED_SYSTEMS,
539
  instances_per_system=config.instances_per_system,
540
  seed=config.seed,
541
  )
@@ -543,8 +546,8 @@ def _build_and_format_dataset(
543
  _log.info(
544
  "Built training dataset: %d rows across %d systems (%s)",
545
  len(dataset),
546
- len(SUPPORTED_SYSTEMS),
547
- ", ".join(SUPPORTED_SYSTEMS),
548
  )
549
 
550
  def _apply_chat_template(example: dict[str, object]) -> dict[str, object]:
@@ -914,6 +917,15 @@ def _parse_args() -> TrainingConfig:
914
  help="Max tokens per rollout completion. Shorter = faster generation.")
915
  parser.add_argument("--lora-r", type=int, default=16)
916
  parser.add_argument("--instances-per-system", type=int, default=32)
 
 
 
 
 
 
 
 
 
917
  parser.add_argument(
918
  "--ablation",
919
  choices=("no_progress", "no_simplicity", "no_format"),
@@ -961,6 +973,12 @@ def _parse_args() -> TrainingConfig:
961
  "--sft-checkpoint for a fresh GRPO from a merged SFT model."
962
  )
963
 
 
 
 
 
 
 
964
  return TrainingConfig(
965
  model_name=args.model,
966
  sft_checkpoint=args.sft_checkpoint,
@@ -972,6 +990,7 @@ def _parse_args() -> TrainingConfig:
972
  max_completion_length=args.max_completion_length,
973
  lora_r=args.lora_r,
974
  instances_per_system=args.instances_per_system,
 
975
  ablation=args.ablation,
976
  save_method=args.save_method,
977
  wandb_project=args.wandb_project,
 
99
  num_steps: int = 300
100
  seed: int = 0
101
  instances_per_system: int = 32
102
+ #: Subset of system IDs to train on. Defaults to all SUPPORTED_SYSTEMS.
103
+ #: Pass a single ID (e.g. ``("damped_spring",)``) for focused single-task runs.
104
+ system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS
105
  ablation: Optional[Ablation] = None
106
  wandb_project: str = "physix-live"
107
  wandb_run_name: Optional[str] = None
 
538
  tokenizer: AutoTokenizer,
539
  ) -> Dataset:
540
  spec = DatasetSpec(
541
+ system_ids=config.system_ids,
542
  instances_per_system=config.instances_per_system,
543
  seed=config.seed,
544
  )
 
546
  _log.info(
547
  "Built training dataset: %d rows across %d systems (%s)",
548
  len(dataset),
549
+ len(config.system_ids),
550
+ ", ".join(config.system_ids),
551
  )
552
 
553
  def _apply_chat_template(example: dict[str, object]) -> dict[str, object]:
 
917
  help="Max tokens per rollout completion. Shorter = faster generation.")
918
  parser.add_argument("--lora-r", type=int, default=16)
919
  parser.add_argument("--instances-per-system", type=int, default=32)
920
+ parser.add_argument(
921
+ "--system-ids",
922
+ default=None,
923
+ help=(
924
+ "Comma-separated list of system IDs to train on "
925
+ "(e.g. 'damped_spring' or 'free_fall,simple_pendulum'). "
926
+ "Defaults to all SUPPORTED_SYSTEMS when omitted."
927
+ ),
928
+ )
929
  parser.add_argument(
930
  "--ablation",
931
  choices=("no_progress", "no_simplicity", "no_format"),
 
973
  "--sft-checkpoint for a fresh GRPO from a merged SFT model."
974
  )
975
 
976
+ system_ids = (
977
+ tuple(s.strip() for s in args.system_ids.split(",") if s.strip())
978
+ if args.system_ids
979
+ else SUPPORTED_SYSTEMS
980
+ )
981
+
982
  return TrainingConfig(
983
  model_name=args.model,
984
  sft_checkpoint=args.sft_checkpoint,
 
990
  max_completion_length=args.max_completion_length,
991
  lora_r=args.lora_r,
992
  instances_per_system=args.instances_per_system,
993
+ system_ids=system_ids,
994
  ablation=args.ablation,
995
  save_method=args.save_method,
996
  wandb_project=args.wandb_project,
physix/training/sft.py CHANGED
@@ -143,6 +143,7 @@ def train_sft(
143
  gradient_accumulation_steps: int = 4,
144
  learning_rate: float = 2e-5,
145
  instances_per_system: int = 32,
 
146
  seed: int = 0,
147
  wandb_run_name: str | None = None,
148
  hub_checkpoint_repo_id: str | None = None,
@@ -198,7 +199,7 @@ def train_sft(
198
  random_state=seed,
199
  )
200
 
201
- dataset = build_sft_dataset(instances_per_system=instances_per_system, seed=seed)
202
 
203
  def _format_row(row: dict) -> dict:
204
  """Combine prompt + completion into a single training string."""
@@ -308,6 +309,14 @@ def main() -> None:
308
  parser.add_argument("--output-dir", default="runs/physix-1.5b-sft")
309
  parser.add_argument("--epochs", type=int, default=2)
310
  parser.add_argument("--instances-per-system", type=int, default=32)
 
 
 
 
 
 
 
 
311
  parser.add_argument("--lora-r", type=int, default=32)
312
  parser.add_argument("--learning-rate", type=float, default=2e-5)
313
  parser.add_argument("--seed", type=int, default=0)
@@ -323,6 +332,12 @@ def main() -> None:
323
  )
324
  args = parser.parse_args()
325
 
 
 
 
 
 
 
326
  os.environ.setdefault("WANDB_PROJECT", "physix-live")
327
  train_sft(
328
  model_name=args.model,
@@ -331,6 +346,7 @@ def main() -> None:
331
  lora_r=args.lora_r,
332
  learning_rate=args.learning_rate,
333
  instances_per_system=args.instances_per_system,
 
334
  seed=args.seed,
335
  wandb_run_name=args.wandb_run_name,
336
  hub_checkpoint_repo_id=args.hub_checkpoint_repo_id,
 
143
  gradient_accumulation_steps: int = 4,
144
  learning_rate: float = 2e-5,
145
  instances_per_system: int = 32,
146
+ system_ids: tuple[str, ...] = SUPPORTED_SYSTEMS,
147
  seed: int = 0,
148
  wandb_run_name: str | None = None,
149
  hub_checkpoint_repo_id: str | None = None,
 
199
  random_state=seed,
200
  )
201
 
202
+ dataset = build_sft_dataset(system_ids=system_ids, instances_per_system=instances_per_system, seed=seed)
203
 
204
  def _format_row(row: dict) -> dict:
205
  """Combine prompt + completion into a single training string."""
 
309
  parser.add_argument("--output-dir", default="runs/physix-1.5b-sft")
310
  parser.add_argument("--epochs", type=int, default=2)
311
  parser.add_argument("--instances-per-system", type=int, default=32)
312
+ parser.add_argument(
313
+ "--system-ids",
314
+ default=None,
315
+ help=(
316
+ "Comma-separated list of system IDs to include in the SFT dataset "
317
+ "(e.g. 'damped_spring'). Defaults to all SUPPORTED_SYSTEMS."
318
+ ),
319
+ )
320
  parser.add_argument("--lora-r", type=int, default=32)
321
  parser.add_argument("--learning-rate", type=float, default=2e-5)
322
  parser.add_argument("--seed", type=int, default=0)
 
332
  )
333
  args = parser.parse_args()
334
 
335
+ system_ids = (
336
+ tuple(s.strip() for s in args.system_ids.split(",") if s.strip())
337
+ if args.system_ids
338
+ else SUPPORTED_SYSTEMS
339
+ )
340
+
341
  os.environ.setdefault("WANDB_PROJECT", "physix-live")
342
  train_sft(
343
  model_name=args.model,
 
346
  lora_r=args.lora_r,
347
  learning_rate=args.learning_rate,
348
  instances_per_system=args.instances_per_system,
349
+ system_ids=system_ids,
350
  seed=args.seed,
351
  wandb_run_name=args.wandb_run_name,
352
  hub_checkpoint_repo_id=args.hub_checkpoint_repo_id,