GRPO: load model manually (avoid model_init_kwargs API drift); auto-set per_device_batch_size = num_generations to satisfy GRPO group-divisibility constraint
Browse files
train.py
CHANGED
|
@@ -20,7 +20,7 @@ import time
|
|
| 20 |
|
| 21 |
import torch
|
| 22 |
from peft import LoraConfig
|
| 23 |
-
from transformers import AutoTokenizer, BitsAndBytesConfig
|
| 24 |
from trl import GRPOConfig, GRPOTrainer
|
| 25 |
|
| 26 |
from opensleuth_train import (
|
|
@@ -52,8 +52,11 @@ def parse_args() -> argparse.Namespace:
|
|
| 52 |
p.add_argument("--max-prompt-length", type=int, default=int(os.environ.get("MAX_PROMPT_LENGTH", "768")))
|
| 53 |
p.add_argument("--learning-rate", type=float, default=float(os.environ.get("LEARNING_RATE", "1e-5")))
|
| 54 |
p.add_argument("--num-train-epochs", type=float, default=float(os.environ.get("NUM_TRAIN_EPOCHS", "1")))
|
| 55 |
-
|
| 56 |
-
|
|
|
|
|
|
|
|
|
|
| 57 |
p.add_argument("--no-4bit", action="store_true", default=os.environ.get("NO_4BIT", "0") == "1")
|
| 58 |
p.add_argument("--seed", type=int, default=int(os.environ.get("SEED", "42")))
|
| 59 |
return p.parse_args()
|
|
@@ -128,9 +131,23 @@ def main() -> int:
|
|
| 128 |
bias="none",
|
| 129 |
)
|
| 130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 131 |
grpo_config = GRPOConfig(
|
| 132 |
output_dir=args.output_dir,
|
| 133 |
-
per_device_train_batch_size=
|
| 134 |
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
| 135 |
learning_rate=args.learning_rate,
|
| 136 |
num_train_epochs=args.num_train_epochs,
|
|
@@ -155,42 +172,24 @@ def main() -> int:
|
|
| 155 |
env_reward_fn.__name__ = "env_verifier_reward"
|
| 156 |
format_reward.__name__ = "format_reward"
|
| 157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
log.info("instantiating GRPOTrainer")
|
| 159 |
-
|
| 160 |
-
|
| 161 |
-
trainer_kwargs = dict(
|
| 162 |
-
model=args.model_name,
|
| 163 |
reward_funcs=[env_reward_fn, format_reward],
|
| 164 |
args=grpo_config,
|
| 165 |
train_dataset=dataset,
|
| 166 |
peft_config=peft_config,
|
|
|
|
| 167 |
)
|
| 168 |
-
if bnb_config is not None:
|
| 169 |
-
# Some TRL versions accept model_init_kwargs to pass through to from_pretrained.
|
| 170 |
-
trainer_kwargs.setdefault("model_init_kwargs", {})
|
| 171 |
-
trainer_kwargs["model_init_kwargs"].update(
|
| 172 |
-
{"quantization_config": bnb_config, "torch_dtype": torch.bfloat16}
|
| 173 |
-
)
|
| 174 |
-
|
| 175 |
-
try:
|
| 176 |
-
trainer = GRPOTrainer(**trainer_kwargs)
|
| 177 |
-
except TypeError as e:
|
| 178 |
-
# Older TRL (<0.16) doesn't accept model_init_kwargs at GRPOTrainer level;
|
| 179 |
-
# fall back to loading model first.
|
| 180 |
-
log.warning("GRPOTrainer rejected kwargs (%s); falling back to manual model load", e)
|
| 181 |
-
from transformers import AutoModelForCausalLM
|
| 182 |
-
model_kwargs = {"trust_remote_code": True, "torch_dtype": torch.bfloat16}
|
| 183 |
-
if bnb_config is not None:
|
| 184 |
-
model_kwargs["quantization_config"] = bnb_config
|
| 185 |
-
model = AutoModelForCausalLM.from_pretrained(args.model_name, **model_kwargs)
|
| 186 |
-
trainer = GRPOTrainer(
|
| 187 |
-
model=model,
|
| 188 |
-
reward_funcs=[env_reward_fn, format_reward],
|
| 189 |
-
args=grpo_config,
|
| 190 |
-
train_dataset=dataset,
|
| 191 |
-
peft_config=peft_config,
|
| 192 |
-
processing_class=tokenizer,
|
| 193 |
-
)
|
| 194 |
|
| 195 |
log.info("starting GRPO training")
|
| 196 |
trainer.train()
|
|
|
|
| 20 |
|
| 21 |
import torch
|
| 22 |
from peft import LoraConfig
|
| 23 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
|
| 24 |
from trl import GRPOConfig, GRPOTrainer
|
| 25 |
|
| 26 |
from opensleuth_train import (
|
|
|
|
| 52 |
p.add_argument("--max-prompt-length", type=int, default=int(os.environ.get("MAX_PROMPT_LENGTH", "768")))
|
| 53 |
p.add_argument("--learning-rate", type=float, default=float(os.environ.get("LEARNING_RATE", "1e-5")))
|
| 54 |
p.add_argument("--num-train-epochs", type=float, default=float(os.environ.get("NUM_TRAIN_EPOCHS", "1")))
|
| 55 |
+
# GRPO requires per_device_train_batch_size to be a multiple of num_generations
|
| 56 |
+
# (one prompt is repeated num_generations times, all in the same forward pass).
|
| 57 |
+
# Default to 1 prompt × num_generations completions per device step.
|
| 58 |
+
p.add_argument("--per-device-batch-size", type=int, default=int(os.environ.get("PER_DEVICE_BATCH_SIZE", "0")))
|
| 59 |
+
p.add_argument("--gradient-accumulation-steps", type=int, default=int(os.environ.get("GRAD_ACCUM", "4")))
|
| 60 |
p.add_argument("--no-4bit", action="store_true", default=os.environ.get("NO_4BIT", "0") == "1")
|
| 61 |
p.add_argument("--seed", type=int, default=int(os.environ.get("SEED", "42")))
|
| 62 |
return p.parse_args()
|
|
|
|
| 131 |
bias="none",
|
| 132 |
)
|
| 133 |
|
| 134 |
+
# GRPO requires per_device_train_batch_size to be a multiple of num_generations.
|
| 135 |
+
# If the caller didn't pin one explicitly, default to one prompt per device.
|
| 136 |
+
per_device_bs = args.per_device_batch_size or args.num_generations
|
| 137 |
+
if per_device_bs % args.num_generations != 0:
|
| 138 |
+
raise ValueError(
|
| 139 |
+
f"per_device_batch_size ({per_device_bs}) must be a multiple of "
|
| 140 |
+
f"num_generations ({args.num_generations})."
|
| 141 |
+
)
|
| 142 |
+
log.info(
|
| 143 |
+
"GRPO batching: per_device_batch_size=%d (= %d prompt(s) × %d generations), grad_accum=%d",
|
| 144 |
+
per_device_bs, per_device_bs // args.num_generations, args.num_generations,
|
| 145 |
+
args.gradient_accumulation_steps,
|
| 146 |
+
)
|
| 147 |
+
|
| 148 |
grpo_config = GRPOConfig(
|
| 149 |
output_dir=args.output_dir,
|
| 150 |
+
per_device_train_batch_size=per_device_bs,
|
| 151 |
gradient_accumulation_steps=args.gradient_accumulation_steps,
|
| 152 |
learning_rate=args.learning_rate,
|
| 153 |
num_train_epochs=args.num_train_epochs,
|
|
|
|
| 172 |
env_reward_fn.__name__ = "env_verifier_reward"
|
| 173 |
format_reward.__name__ = "format_reward"
|
| 174 |
|
| 175 |
+
# Load the model ourselves so we control quantization + dtype precisely.
|
| 176 |
+
# GRPOTrainer in 0.16 takes model objects and passes them through to its
|
| 177 |
+
# internal ref-model copy + LoRA wrapping.
|
| 178 |
+
log.info("loading base model with quantization=%s", bnb_config is not None)
|
| 179 |
+
model_kwargs = {"trust_remote_code": True, "torch_dtype": torch.bfloat16}
|
| 180 |
+
if bnb_config is not None:
|
| 181 |
+
model_kwargs["quantization_config"] = bnb_config
|
| 182 |
+
model = AutoModelForCausalLM.from_pretrained(args.model_name, **model_kwargs)
|
| 183 |
+
|
| 184 |
log.info("instantiating GRPOTrainer")
|
| 185 |
+
trainer = GRPOTrainer(
|
| 186 |
+
model=model,
|
|
|
|
|
|
|
| 187 |
reward_funcs=[env_reward_fn, format_reward],
|
| 188 |
args=grpo_config,
|
| 189 |
train_dataset=dataset,
|
| 190 |
peft_config=peft_config,
|
| 191 |
+
processing_class=tokenizer,
|
| 192 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
|
| 194 |
log.info("starting GRPO training")
|
| 195 |
trainer.train()
|