asdf98 commited on
Commit
6579157
·
verified ·
1 Parent(s): b181b88

Upload EthicalHacking_Gemma4_E2B_Colab.ipynb

Browse files
EthicalHacking_Gemma4_E2B_Colab.ipynb CHANGED
@@ -80,6 +80,8 @@
80
  "| `PACKING` | **False** | Safer memory profile |\n",
81
  "| `optim` | `adamw_8bit` | Must use 8-bit optimizer |\n",
82
  "\n",
 
 
83
  "If you still OOM: lower `MAX_SEQ_LENGTH` to 1024, or use `use_rslora=True`."
84
  ]
85
  },
@@ -113,6 +115,7 @@
113
  " max_seq_length=MAX_SEQ_LENGTH,\n",
114
  " dtype=None,\n",
115
  " load_in_4bit=True,\n",
 
116
  ")\n",
117
  "\n",
118
  "model = FastLanguageModel.get_peft_model(\n",
 
80
  "| `PACKING` | **False** | Safer memory profile |\n",
81
  "| `optim` | `adamw_8bit` | Must use 8-bit optimizer |\n",
82
  "\n",
83
+ "**⚠️ ALSO ADDED:** `device_map={\"\": torch.cuda.current_device()}` to force GPU placement and avoid Kaggle/Colab `accelerate` bug.\n",
84
+ "\n",
85
  "If you still OOM: lower `MAX_SEQ_LENGTH` to 1024, or use `use_rslora=True`."
86
  ]
87
  },
 
115
  " max_seq_length=MAX_SEQ_LENGTH,\n",
116
  " dtype=None,\n",
117
  " load_in_4bit=True,\n",
118
+ " device_map={\"\": torch.cuda.current_device()}, # ← FORCE GPU: fixes Kaggle/Colab device placement bug\n",
119
  ")\n",
120
  "\n",
121
  "model = FastLanguageModel.get_peft_model(\n",