loop.py: PEFT-based adapter resume (overrides stale base path)
Browse files- physix/training/loop.py +37 -3
physix/training/loop.py
CHANGED
|
@@ -229,17 +229,51 @@ def _load_model_and_tokenizer(
|
|
| 229 |
rollouts produce meaningful reward signal instead of all scoring zero.
|
| 230 |
"""
|
| 231 |
if config.lora_adapter_repo:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
_log.info(
|
| 233 |
-
"Resuming from existing LoRA adapter %s
|
| 234 |
config.lora_adapter_repo,
|
|
|
|
| 235 |
)
|
| 236 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 237 |
-
model_name=config.
|
| 238 |
max_seq_length=config.max_seq_length,
|
| 239 |
load_in_4bit=True,
|
| 240 |
dtype=None,
|
| 241 |
)
|
| 242 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 243 |
return model, tokenizer
|
| 244 |
|
| 245 |
if config.sft_checkpoint:
|
|
|
|
| 229 |
rollouts produce meaningful reward signal instead of all scoring zero.
|
| 230 |
"""
|
| 231 |
if config.lora_adapter_repo:
|
| 232 |
+
# Resume path: load the base model and attach the existing LoRA
|
| 233 |
+
# adapter via PEFT. We deliberately do NOT call
|
| 234 |
+
# ``FastLanguageModel.from_pretrained(model_name=adapter_repo)``
|
| 235 |
+
# because the adapter's ``adapter_config.json`` may carry a stale
|
| 236 |
+
# ``base_model_name_or_path`` pointing at a path that only existed
|
| 237 |
+
# inside the previous training container (e.g. ``/tmp/physix-sft/merged``).
|
| 238 |
+
# PEFT's ``load_adapter`` ignores that field — it adapts onto whatever
|
| 239 |
+
# base we hand it.
|
| 240 |
_log.info(
|
| 241 |
+
"Resuming from existing LoRA adapter %s on top of %s",
|
| 242 |
config.lora_adapter_repo,
|
| 243 |
+
config.model_name,
|
| 244 |
)
|
| 245 |
model, tokenizer = FastLanguageModel.from_pretrained(
|
| 246 |
+
model_name=config.model_name,
|
| 247 |
max_seq_length=config.max_seq_length,
|
| 248 |
load_in_4bit=True,
|
| 249 |
dtype=None,
|
| 250 |
)
|
| 251 |
+
# Wrap the base in a fresh trainable LoRA, then overwrite its
|
| 252 |
+
# weights with the saved adapter. We use the adapter's own r/alpha
|
| 253 |
+
# by relying on PEFT's ``load_adapter`` resolving from the repo's
|
| 254 |
+
# adapter_config.json. The dummy ``get_peft_model`` call is just to
|
| 255 |
+
# turn the model into a ``PeftModel`` instance whose ``load_adapter``
|
| 256 |
+
# method accepts a hub repo id.
|
| 257 |
+
model = FastLanguageModel.get_peft_model(
|
| 258 |
+
model,
|
| 259 |
+
r=config.lora_r,
|
| 260 |
+
lora_alpha=config.lora_alpha,
|
| 261 |
+
target_modules=[
|
| 262 |
+
"q_proj", "k_proj", "v_proj", "o_proj",
|
| 263 |
+
"gate_proj", "up_proj", "down_proj",
|
| 264 |
+
],
|
| 265 |
+
bias="none",
|
| 266 |
+
use_gradient_checkpointing="unsloth",
|
| 267 |
+
random_state=config.seed,
|
| 268 |
+
)
|
| 269 |
+
# Overwrite the freshly-initialised LoRA weights with the saved ones.
|
| 270 |
+
# ``adapter_name='default'`` matches what ``get_peft_model`` creates.
|
| 271 |
+
model.load_adapter(
|
| 272 |
+
config.lora_adapter_repo,
|
| 273 |
+
adapter_name="default",
|
| 274 |
+
is_trainable=True,
|
| 275 |
+
)
|
| 276 |
+
_log.info("Adapter loaded; LoRA is trainable and ready for GRPO.")
|
| 277 |
return model, tokenizer
|
| 278 |
|
| 279 |
if config.sft_checkpoint:
|