Fix total_mem → total_memory in V4 notebook (PyTorch API)
Browse filesCo-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
notebooks/v4_instruct_grpo.ipynb
CHANGED
|
@@ -27,7 +27,7 @@
|
|
| 27 |
"execution_count": null,
|
| 28 |
"metadata": {},
|
| 29 |
"outputs": [],
|
| 30 |
-
"source": "import torch\n\nprint(f\"CUDA available: {torch.cuda.is_available()}\")\nprint(f\"GPU: {torch.cuda.get_device_name(0)}\")\nprint(f\"VRAM: {torch.cuda.get_device_properties(0).
|
| 31 |
},
|
| 32 |
{
|
| 33 |
"cell_type": "markdown",
|
|
@@ -135,7 +135,7 @@
|
|
| 135 |
"execution_count": null,
|
| 136 |
"metadata": {},
|
| 137 |
"outputs": [],
|
| 138 |
-
"source": "from trl import GRPOConfig, GRPOTrainer\n\nFastLanguageModel.for_training(model)\n\nsmoke_config = GRPOConfig(\n output_dir=str(CHECKPOINT_DIR / \"smoke\"),\n num_generations=NUM_GENERATIONS,\n scale_rewards=SCALE_REWARDS,\n max_completion_length=MAX_COMPLETION_LENGTH,\n max_steps=1,\n temperature=TEMPERATURE,\n beta=BETA,\n per_device_train_batch_size=BATCH_SIZE,\n gradient_accumulation_steps=1,\n learning_rate=LEARNING_RATE,\n fp16=False,\n bf16=True,\n logging_steps=1,\n save_steps=999,\n report_to=\"none\",\n max_prompt_length=MAX_SEQ_LENGTH // 2,\n seed=42,\n remove_unused_columns=False,\n)\n\n\nclass UnslothGRPOTrainer(GRPOTrainer):\n \"\"\"Wraps generation with Unsloth for_inference()/for_training().\"\"\"\n def _generate(self, prompts, images):\n FastLanguageModel.for_inference(self.model)\n try:\n result = super()._generate(prompts, images)\n finally:\n FastLanguageModel.for_training(self.model)\n return result\n\n\nsmoke_trainer = UnslothGRPOTrainer(\n model=model,\n reward_funcs=commerce_reward_fn,\n args=smoke_config,\n train_dataset=train_dataset,\n processing_class=tokenizer,\n)\n\nt0 = time.time()\nsmoke_trainer.train()\nstep_time = time.time() - t0\n\npeak_vram = torch.cuda.max_memory_allocated() / 1e9\nprint(f\"\\n✓ Smoke test passed!\")\nprint(f\" Step time: {step_time:.0f}s\")\nprint(f\" Peak VRAM: {peak_vram:.1f}GB / {torch.cuda.get_device_properties(0).
|
| 139 |
},
|
| 140 |
{
|
| 141 |
"cell_type": "markdown",
|
|
|
|
| 27 |
"execution_count": null,
|
| 28 |
"metadata": {},
|
| 29 |
"outputs": [],
|
| 30 |
+
"source": "import torch\n\nprint(f\"CUDA available: {torch.cuda.is_available()}\")\nprint(f\"GPU: {torch.cuda.get_device_name(0)}\")\nprint(f\"VRAM: {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f} GB\")\nprint(f\"bf16 support: {torch.cuda.is_bf16_supported()}\")\n\nfrom unsloth import FastLanguageModel\nprint(f\"\\n✓ Unsloth loaded\")\n\nimport trl\nassert trl.__version__ == \"0.24.0\", f\"Expected TRL 0.24.0, got {trl.__version__}\"\nprint(f\"✓ TRL {trl.__version__}\")\n\nimport transformers\nprint(f\"✓ Transformers {transformers.__version__}\")"
|
| 31 |
},
|
| 32 |
{
|
| 33 |
"cell_type": "markdown",
|
|
|
|
| 135 |
"execution_count": null,
|
| 136 |
"metadata": {},
|
| 137 |
"outputs": [],
|
| 138 |
+
"source": "from trl import GRPOConfig, GRPOTrainer\n\nFastLanguageModel.for_training(model)\n\nsmoke_config = GRPOConfig(\n output_dir=str(CHECKPOINT_DIR / \"smoke\"),\n num_generations=NUM_GENERATIONS,\n scale_rewards=SCALE_REWARDS,\n max_completion_length=MAX_COMPLETION_LENGTH,\n max_steps=1,\n temperature=TEMPERATURE,\n beta=BETA,\n per_device_train_batch_size=BATCH_SIZE,\n gradient_accumulation_steps=1,\n learning_rate=LEARNING_RATE,\n fp16=False,\n bf16=True,\n logging_steps=1,\n save_steps=999,\n report_to=\"none\",\n max_prompt_length=MAX_SEQ_LENGTH // 2,\n seed=42,\n remove_unused_columns=False,\n)\n\n\nclass UnslothGRPOTrainer(GRPOTrainer):\n \"\"\"Wraps generation with Unsloth for_inference()/for_training().\"\"\"\n def _generate(self, prompts, images):\n FastLanguageModel.for_inference(self.model)\n try:\n result = super()._generate(prompts, images)\n finally:\n FastLanguageModel.for_training(self.model)\n return result\n\n\nsmoke_trainer = UnslothGRPOTrainer(\n model=model,\n reward_funcs=commerce_reward_fn,\n args=smoke_config,\n train_dataset=train_dataset,\n processing_class=tokenizer,\n)\n\nt0 = time.time()\nsmoke_trainer.train()\nstep_time = time.time() - t0\n\npeak_vram = torch.cuda.max_memory_allocated() / 1e9\nprint(f\"\\n✓ Smoke test passed!\")\nprint(f\" Step time: {step_time:.0f}s\")\nprint(f\" Peak VRAM: {peak_vram:.1f}GB / {torch.cuda.get_device_properties(0).total_memory / 1e9:.1f}GB\")\nprint(f\" Estimated full run ({MAX_STEPS} steps): {step_time * MAX_STEPS / 3600:.1f}h\")\n\ndel smoke_trainer\ngc.collect(); torch.cuda.empty_cache()"
|
| 139 |
},
|
| 140 |
{
|
| 141 |
"cell_type": "markdown",
|