{ "nbformat": 4, "nbformat_minor": 5, "metadata": { "colab": {"provenance": [], "gpuType": "T4", "name": "Code_LLM_Training.ipynb"}, "kernelspec": {"display_name": "Python 3", "language": "python", "name": "python3"}, "language_info": {"codemirror_mode": {"name": "ipython", "version": 3}, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.10.12"}, "accelerator": "GPU" }, "cells": [ {"cell_type": "markdown", "metadata": {"id": "title"}, "source": ["# 🚀 Code LLM - QLoRA Fine-tuning\n", "\n", "**基礎模型**: Qwen/Qwen2.5-Coder-3B (HumanEval 84.1%)\n", "\n", "**訓練方法**: QLoRA SFT (4-bit NF4 + LoRA r=64)\n", "\n", "**數據集**: Code-Feedback (66K) + Magicoder-OSS (75K) + Evol-CodeAlpaca (110K) = **250K 程式碼樣本**\n", "\n", "**訓練時間**: ~6-8 小時 (T4 GPU)\n", "\n", "---\n", "\n", "⚠️ **請先設定 GPU**: Runtime → Change runtime type → **T4 GPU**"]}, {"cell_type": "markdown", "metadata": {"id": "s1"}, "source": ["## Step 1. 安裝依賴"]}, {"cell_type": "code", "metadata": {"id": "install"}, "execution_count": null, "outputs": [], "source": ["!pip install -q transformers trl peft bitsandbytes accelerate datasets trackio\n", "\n", "import torch\n", "print(f'PyTorch: {torch.__version__}')\n", "print(f'CUDA: {torch.cuda.is_available()}')\n", "if torch.cuda.is_available():\n", " print(f'GPU: {torch.cuda.get_device_name(0)}')\n", " print(f'VRAM: {torch.cuda.get_device_properties(0).total_mem / 1024**3:.1f} GB')"]}, {"cell_type": "markdown", "metadata": {"id": "s2"}, "source": ["## Step 2. HuggingFace 登入\n", "\n", "請先到 https://huggingface.co/settings/tokens 取得 Access Token(Write 權限)"]}, {"cell_type": "code", "metadata": {"id": "login"}, "execution_count": null, "outputs": [], "source": ["from huggingface_hub import login\n", "login()"]}, {"cell_type": "markdown", "metadata": {"id": "s3"}, "source": ["## Step 3. 配置設定\n", "\n", "❗ 請修改 `HF_USERNAME`"]}, {"cell_type": "code", "metadata": {"id": "config"}, "execution_count": null, "outputs": [], "source": ["# ======== 請修改這裡 ========\n", "HF_USERNAME = \"YOUR_HF_USERNAME\"\n", "# ============================\n", "\n", "MODEL_NAME = \"Qwen/Qwen2.5-Coder-3B\"\n", "OUTPUT_DIR = f\"{HF_USERNAME}/code-qwen2.5-coder-3b\"\n", "\n", "TRAINING_CONFIG = {\n", " \"learning_rate\": 2e-4,\n", " \"num_epochs\": 3,\n", " \"batch_size\": 1,\n", " \"gradient_accumulation\": 16,\n", " \"max_seq_length\": 2048,\n", " \"lora_r\": 64,\n", " \"lora_alpha\": 128,\n", "}\n", "\n", "print(f'模型: {MODEL_NAME}')\n", "print(f'輸出: https://huggingface.co/{OUTPUT_DIR}')"]}, {"cell_type": "markdown", "metadata": {"id": "s4"}, "source": ["## Step 4. 載入數據集 (250K 程式碼樣本)"]}, {"cell_type": "code", "metadata": {"id": "data"}, "execution_count": null, "outputs": [], "source": ["import os, torch\n", "from datasets import load_dataset, concatenate_datasets\n", "\n", "print('📦 [1/3] Code-Feedback (66K)...')\n", "code_feedback = load_dataset('m-a-p/Code-Feedback', split='train')\n", "cf_msgs = code_feedback.map(\n", " lambda x: {'messages': x['messages']},\n", " remove_columns=[c for c in code_feedback.column_names if c != 'messages']\n", ")\n", "print(f' ✅ {len(cf_msgs)} samples')\n", "\n", "print('📦 [2/3] Magicoder-OSS-Instruct (75K)...')\n", "magicoder = load_dataset('ise-uiuc/Magicoder-OSS-Instruct-75K', split='train')\n", "mc_msgs = magicoder.map(\n", " lambda x: {'messages': [\n", " {'role': 'system', 'content': 'You are an exceptionally skilled programmer. Write clean, efficient, well-documented code.'},\n", " {'role': 'user', 'content': x['problem']},\n", " {'role': 'assistant', 'content': x['solution']},\n", " ]},\n", " remove_columns=magicoder.column_names\n", ")\n", "print(f' ✅ {len(mc_msgs)} samples')\n", "\n", "print('📦 [3/3] Evol-CodeAlpaca (110K)...')\n", "evol = load_dataset('theblackcat102/evol-codealpaca-v1', split='train')\n", "evol_msgs = evol.map(\n", " lambda x: {'messages': [\n", " {'role': 'system', 'content': 'You are an exceptionally skilled programmer. Write clean, efficient, well-documented code.'},\n", " {'role': 'user', 'content': x['instruction']},\n", " {'role': 'assistant', 'content': x['output']},\n", " ]},\n", " remove_columns=evol.column_names\n", ")\n", "print(f' ✅ {len(evol_msgs)} samples')\n", "\n", "combined = concatenate_datasets([cf_msgs, mc_msgs, evol_msgs]).shuffle(seed=42)\n", "split = combined.train_test_split(test_size=0.02, seed=42)\n", "train_ds, eval_ds = split['train'], split['test']\n", "print(f'\\n📊 總計: {len(train_ds):,} train / {len(eval_ds):,} eval')"]}, {"cell_type": "markdown", "metadata": {"id": "s5"}, "source": ["## Step 5. 載入模型 + QLoRA"]}, {"cell_type": "code", "metadata": {"id": "model"}, "execution_count": null, "outputs": [], "source": ["from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n", "from peft import LoraConfig, prepare_model_for_kbit_training, get_peft_model\n", "\n", "tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)\n", "if tokenizer.pad_token is None:\n", " tokenizer.pad_token = tokenizer.eos_token\n", "print(f'Tokenizer vocab: {len(tokenizer):,}')\n", "\n", "bnb_config = BitsAndBytesConfig(\n", " load_in_4bit=True, bnb_4bit_quant_type='nf4',\n", " bnb_4bit_compute_dtype=torch.bfloat16, bnb_4bit_use_double_quant=True,\n", ")\n", "\n", "print('📥 載入模型 (4-bit)...')\n", "model = AutoModelForCausalLM.from_pretrained(\n", " MODEL_NAME, quantization_config=bnb_config, device_map='auto', trust_remote_code=True,\n", ")\n", "model = prepare_model_for_kbit_training(model)\n", "\n", "lora_config = LoraConfig(\n", " r=TRAINING_CONFIG['lora_r'], lora_alpha=TRAINING_CONFIG['lora_alpha'],\n", " lora_dropout=0.05, bias='none', task_type='CAUSAL_LM',\n", " target_modules=['q_proj', 'k_proj', 'v_proj', 'o_proj', 'gate_proj', 'up_proj', 'down_proj'],\n", " modules_to_save=['lm_head', 'embed_tokens'],\n", ")\n", "\n", "model = get_peft_model(model, lora_config)\n", "model.print_trainable_parameters()"]}, {"cell_type": "markdown", "metadata": {"id": "s6"}, "source": ["## Step 6. 開始訓練\n", "\n", "☕ 約需 6-8 小時(Colab T4),請保持瀏覽器開啟"]}, {"cell_type": "code", "metadata": {"id": "train"}, "execution_count": null, "outputs": [], "source": ["from trl import SFTTrainer, SFTConfig\n", "\n", "training_args = SFTConfig(\n", " learning_rate=TRAINING_CONFIG['learning_rate'], lr_scheduler_type='cosine', warmup_ratio=0.05,\n", " num_train_epochs=TRAINING_CONFIG['num_epochs'],\n", " per_device_train_batch_size=TRAINING_CONFIG['batch_size'],\n", " gradient_accumulation_steps=TRAINING_CONFIG['gradient_accumulation'],\n", " max_seq_length=TRAINING_CONFIG['max_seq_length'],\n", " gradient_checkpointing=True, bf16=True,\n", " optim='paged_adamw_8bit', packing=True,\n", " output_dir='./output_code', logging_steps=10, save_steps=1000, eval_steps=1000,\n", " push_to_hub=True, hub_model_id=OUTPUT_DIR,\n", " logging_strategy='steps', logging_first_step=True,\n", ")\n", "\n", "trainer = SFTTrainer(\n", " model=model, args=training_args,\n", " train_dataset=train_ds, eval_dataset=eval_ds,\n", " processing_class=tokenizer, peft_config=lora_config,\n", ")\n", "\n", "print('🚀 開始訓練...')\n", "print(f'模型將保存到: https://huggingface.co/{OUTPUT_DIR}')\n", "trainer.train()"]}, {"cell_type": "markdown", "metadata": {"id": "s7"}, "source": ["## Step 7. 上傳模型"]}, {"cell_type": "code", "metadata": {"id": "push"}, "execution_count": null, "outputs": [], "source": ["trainer.push_to_hub()\n", "print(f'✅ 模型已上傳: https://huggingface.co/{OUTPUT_DIR}')"]}, {"cell_type": "markdown", "metadata": {"id": "s8"}, "source": ["## Step 8. 測試寫程式能力"]}, {"cell_type": "code", "metadata": {"id": "eval"}, "execution_count": null, "outputs": [], "source": ["from transformers import pipeline\n", "\n", "pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=False)\n", "\n", "test_prompts = [\n", " ('Two Sum', 'def two_sum(nums: list[int], target: int) -> list[int]:\\n \"\"\"Return indices of two numbers that add up to target.\"\"\"\\n'),\n", " ('Fibonacci', 'def fibonacci(n: int) -> int:\\n \"\"\"Return the nth Fibonacci number.\"\"\"\\n'),\n", " ('Binary Search', 'def binary_search(arr: list[int], target: int) -> int:\\n \"\"\"Return index of target in sorted array, or -1 if not found.\"\"\"\\n'),\n", " ('Reverse List', 'def reverse_list(head):\\n \"\"\"Reverse a singly linked list.\"\"\"\\n'),\n", " ('Merge Sort', 'def merge_sort(arr: list[int]) -> list[int]:\\n \"\"\"Sort array using merge sort.\"\"\"\\n'),\n", "]\n", "\n", "for name, prompt in test_prompts:\n", " print(f'\\n{\"=\"*50}')\n", " print(f'📝 {name}')\n", " print(f'{\"=\"*50}')\n", " output = pipe(prompt, return_full_text=True)\n", " code = output[0]['generated_text']\n", " print(code)\n", " try:\n", " compile(code, '', 'exec')\n", " print('✅ 語法正確')\n", " except SyntaxError as e:\n", " print(f'❌ 語法錯誤: {e}')"]}, {"cell_type": "markdown", "metadata": {"id": "done"}, "source": ["---\n", "\n", "## ✅ 完成!\n", "\n", "### 在其他地方使用:\n", "\n", "```python\n", "from peft import PeftModel\n", "from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n", "import torch\n", "\n", "bnb_config = BitsAndBytesConfig(load_in_4bit=True, bnb_4bit_quant_type='nf4', bnb_4bit_compute_dtype=torch.bfloat16)\n", "base = AutoModelForCausalLM.from_pretrained('Qwen/Qwen2.5-Coder-3B', quantization_config=bnb_config, device_map='auto')\n", "model = PeftModel.from_pretrained(base, 'YOUR_USERNAME/code-qwen2.5-coder-3b')\n", "tokenizer = AutoTokenizer.from_pretrained('Qwen/Qwen2.5-Coder-3B')\n", "\n", "prompt = 'def quick_sort(arr):\\n \"\"\"Sort array using quicksort.\"\"\"\\n'\n", "inputs = tokenizer(prompt, return_tensors='pt').to('cuda')\n", "outputs = model.generate(**inputs, max_new_tokens=512)\n", "print(tokenizer.decode(outputs[0], skip_special_tokens=True))\n", "```"]} ] }