codeShare commited on
Commit
8a6dc9d
Β·
verified Β·
1 Parent(s): d53d5fe

Upload LLM_ernie_prompt_enhance.ipynb

Browse files
Files changed (1) hide show
  1. LLM_ernie_prompt_enhance.ipynb +1 -1
LLM_ernie_prompt_enhance.ipynb CHANGED
@@ -1 +1 @@
1
- {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[],"gpuType":"T4","authorship_tag":"ABX9TyPauHdmQ/V+K6piBdnfaCc3"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"YfXxChUnBegt","cellView":"form"},"outputs":[],"source":["#@markdown # Cell 1: Mount Drive & Load HF_TOKEN (run first)\n","from google.colab import drive\n","import os\n","from google.colab import userdata\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Load HF token from Colab secrets (faster downloads, no rate limits)\n","os.environ[\"HF_TOKEN\"] = userdata.get(\"HF_TOKEN\")\n","print(\"βœ… Drive mounted + HF_TOKEN loaded from Colab secrets\")"]},{"cell_type":"code","source":["#@markdown # Cell 2: Load the official PE model (fine-tuned Ministral 3B) on CPU\n","from transformers import AutoModelForCausalLM, AutoTokenizer\n","import torch\n","\n","print(\"Loading official ERNIE-Image PE model (baidu/ERNIE-Image/pe) on CPU...\")\n","\n","tokenizer = AutoTokenizer.from_pretrained(\n"," \"baidu/ERNIE-Image\",\n"," subfolder=\"pe_tokenizer\",\n"," trust_remote_code=True\n",")\n","\n","model_cpu = AutoModelForCausalLM.from_pretrained(\n"," \"baidu/ERNIE-Image\",\n"," subfolder=\"pe\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"cpu\",\n"," trust_remote_code=True,\n"," low_cpu_mem_usage=True\n",")\n","\n","print(\"βœ… Official PE model (Ministral 3B prompt enhancer) loaded on CPU\")"],"metadata":{"cellView":"form","id":"WS3ZiYKQBn40"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown # Cell 3: Install dependencies + move PE model to VRAM + setup environment\n","!pip install -q transformers accelerate\n","\n","import torch\n","from transformers import AutoModelForCausalLM, AutoTokenizer\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","print(f\"Using device: {device} ({torch.cuda.get_device_name(0) if device=='cuda' else 'CPU'})\")\n","\n","# Move the model loaded in Cell 2 to GPU VRAM (fits easily in 13 GB)\n","if device == \"cuda\":\n"," model = model_cpu.to(device)\n"," print(\"βœ… Official PE model moved to GPU VRAM (~6 GB usage)\")\n","else:\n"," model = model_cpu\n"," print(\"⚠️ Running on CPU (still works but slower)\")\n","\n","# Reuse tokenizer from Cell 2\n","tokenizer = AutoTokenizer.from_pretrained(\n"," \"baidu/ERNIE-Image\",\n"," subfolder=\"pe_tokenizer\",\n"," trust_remote_code=True\n",")\n","\n","# Light system prompt (the model is already fine-tuned as a prompt enhancer)\n","SYSTEM_PROMPT = \"\"\"You are ERNIE-Image's official prompt enhancer.\n","Take a short user idea and turn it into a rich, detailed, high-quality image generation prompt.\n","Output ONLY the enhanced prompt. No extra text.\"\"\"\n","\n","print(\"βœ… Environment ready β€” official PE model is now in VRAM\")"],"metadata":{"cellView":"form","id":"7ycnLkK_BtZ2"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import torch\n","from IPython.display import display, Markdown, clear_output\n","\n","# Persistent chat history (survives cell re-runs)\n","if 'chat_history' not in globals():\n"," chat_history = []\n","\n","\n","#@markdown ---\n","#@markdown **Reset chat history?**\n","reset_chat = True #@param {type:\"boolean\"}\n","\n","#@markdown **Your short idea / request (type here):**\n","user_input = \"\" #@param {type:\"string\"}\n","\n","# β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”\n","# Process immediately when the cell runs (no button needed)\n","# β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”\n","clear_output()\n","\n","if not user_input.strip():\n"," display(Markdown(\"⚠️ Please enter a prompt idea first.\"))\n","else:\n"," if reset_chat:\n"," chat_history.clear()\n"," display(Markdown(\"**βœ… Chat history has been reset.**\"))\n","\n"," # Build messages for the official ERNIE-Image PE model\n"," messages = [{\"role\": \"system\", \"content\": SYSTEM_PROMPT}]\n"," for u, a in chat_history:\n"," messages.append({\"role\": \"user\", \"content\": u})\n"," messages.append({\"role\": \"assistant\", \"content\": a})\n"," messages.append({\"role\": \"user\", \"content\": user_input})\n","\n"," # Generate (simple & reliable tensor style)\n"," tokenized_inputs = tokenizer.apply_chat_template(\n"," messages,\n"," add_generation_prompt=True,\n"," return_tensors=\"pt\"\n"," )\n"," input_ids = tokenized_inputs['input_ids'].to(model.device)\n"," attention_mask = tokenized_inputs['attention_mask'].to(model.device)\n","\n"," with torch.no_grad():\n"," output_ids = model.generate(\n"," input_ids,\n"," attention_mask=attention_mask,\n"," max_new_tokens=512,\n"," do_sample=True,\n"," temperature=0.7,\n"," top_p=0.9,\n"," repetition_penalty=1.1,\n"," )\n","\n"," enhanced_prompt = tokenizer.decode(\n"," output_ids[0][input_ids.shape[1]:],\n"," skip_special_tokens=True\n"," ).strip()\n"," enhanced_prompt = enhanced_prompt.replace('\\n', ' ') # Replace newlines with spaces\n","\n"," # Save to history\n"," chat_history.append((user_input, enhanced_prompt))\n","\n"," # Show full conversation + the new reply\n"," display(Markdown(\"### πŸ—£οΈ Chat History (PE-enhanced prompts)\"))\n"," for i, (u, a) in enumerate(chat_history, 1):\n"," display(Markdown(f\"**You {i}:** {u}\"))\n"," display(Markdown(f\"**PE Enhanced Prompt {i}:** {a}\"))\n"," display(Markdown(\"---\"))\n","\n"," # Final clear reply (exactly what you asked for)\n"," display(Markdown(\"**βœ… Here is your enhanced prompt:**\"))\n"," display(Markdown(f\"```prompt\\n{enhanced_prompt}\\n```\"))\n"," display(Markdown(\"**Just copy the block above and use it with ERNIE-Image!**\"))"],"metadata":{"cellView":"form","id":"gdnskFqeDN3a"},"execution_count":null,"outputs":[]}]}
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/LLM_ernie_prompt_enhance.ipynb","timestamp":1776255474031}],"gpuType":"T4","authorship_tag":"ABX9TyPHOO578UD91aMNXd44gukl"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"},"accelerator":"GPU"},"cells":[{"cell_type":"code","source":["#@markdown # Cell 1: Mount Drive & Load HF_TOKEN (run first)\n","from google.colab import drive\n","import os\n","from google.colab import userdata\n","\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Load HF token from Colab secrets (faster downloads, no rate limits)\n","os.environ[\"HF_TOKEN\"] = userdata.get(\"HF_TOKEN\")\n","print(\"βœ… Drive mounted + HF_TOKEN loaded from Colab secrets\")"],"metadata":{"cellView":"form","id":"SnZYwwmuFk_B"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown # Cell 2: Load the official PE model (fine-tuned Ministral 3B) on CPU\n","from transformers import AutoModelForCausalLM, AutoTokenizer\n","import torch\n","\n","print(\"Loading official ERNIE-Image PE model (baidu/ERNIE-Image/pe) on CPU...\")\n","\n","tokenizer = AutoTokenizer.from_pretrained(\n"," \"baidu/ERNIE-Image\",\n"," subfolder=\"pe_tokenizer\",\n"," trust_remote_code=True\n",")\n","\n","model_cpu = AutoModelForCausalLM.from_pretrained(\n"," \"baidu/ERNIE-Image\",\n"," subfolder=\"pe\",\n"," torch_dtype=torch.bfloat16,\n"," device_map=\"cpu\",\n"," trust_remote_code=True,\n"," low_cpu_mem_usage=True\n",")\n","\n","print(\"βœ… Official PE model (Ministral 3B prompt enhancer) loaded on CPU\")"],"metadata":{"cellView":"form","id":"CC5xU_71FoJ0"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown # Cell 3: Install dependencies + move PE model to VRAM + setup environment\n","!pip install -q transformers accelerate\n","\n","import torch\n","from transformers import (\n"," AutoModelForCausalLM,\n"," AutoTokenizer,\n"," AutoModelForSeq2SeqLM # ← NEW for manual translator\n",")\n","\n","device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n","print(f\"Using device: {device} ({torch.cuda.get_device_name(0) if device=='cuda' else 'CPU'})\")\n","\n","# Move the model loaded in Cell 2 to GPU VRAM\n","if device == \"cuda\":\n"," model = model_cpu.to(device)\n"," print(\"βœ… Official PE model moved to GPU VRAM (~6 GB usage)\")\n","else:\n"," model = model_cpu\n"," print(\"⚠️ Running on CPU (still works but slower)\")\n","\n","# Reuse tokenizer from Cell 2\n","tokenizer = AutoTokenizer.from_pretrained(\n"," \"baidu/ERNIE-Image\",\n"," subfolder=\"pe_tokenizer\",\n"," trust_remote_code=True\n",")\n","\n","# ── Load Chinese β†’ English translator MANUALLY (fixes Colab pipeline bug) ──\n","print(\"Loading Helsinki-NLP Chinese-to-English translator (manual load)...\")\n","zh_en_tokenizer = AutoTokenizer.from_pretrained(\"Helsinki-NLP/opus-mt-zh-en\")\n","zh_en_model = AutoModelForSeq2SeqLM.from_pretrained(\n"," \"Helsinki-NLP/opus-mt-zh-en\",\n"," torch_dtype=torch.float16 if device == \"cuda\" else torch.float32,\n",").to(device)\n","print(\"βœ… Chinese-to-English translator loaded (~300 MB)\")\n","\n","# Translation helper function\n","def translate_chinese_to_english(text):\n"," if not text or not text.strip():\n"," return text\n"," inputs = zh_en_tokenizer(\n"," text,\n"," return_tensors=\"pt\",\n"," padding=True,\n"," truncation=True,\n"," max_length=512\n"," ).to(device)\n"," with torch.no_grad():\n"," outputs = zh_en_model.generate(\n"," **inputs,\n"," max_new_tokens=512,\n"," num_beams=5,\n"," length_penalty=0.8,\n"," early_stopping=True\n"," )\n"," return zh_en_tokenizer.decode(outputs[0], skip_special_tokens=True).strip()\n","\n","print(\"βœ… Translation function ready\")\n","\n","# Light system prompt\n","SYSTEM_PROMPT = \"\"\"You are ERNIE-Image's official prompt enhancer.\n","Take a short user idea and turn it into a rich, detailed, high-quality image generation prompt.\n","Output ONLY the enhanced prompt. No extra text.\"\"\"\n","\n","print(\"βœ… Environment ready β€” PE model + translator are now in VRAM\")"],"metadata":{"cellView":"form","id":"HebyQd-qHoRg"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["import torch\n","from IPython.display import display, Markdown, clear_output\n","\n","# Persistent chat history (survives cell re-runs)\n","if 'chat_history' not in globals():\n"," chat_history = []\n","\n","\n","#@markdown ---\n","#@markdown **Reset chat history?**\n","reset_chat = False #@param {type:\"boolean\"}\n","\n","#@markdown **Your short idea / request (type here):**\n","user_input = \"\" #@param {type:\"string\"}\n","\n","# β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”\n","# Process immediately when the cell runs\n","# β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”β€”\n","clear_output()\n","\n","if not user_input.strip():\n"," display(Markdown(\"⚠️ Please enter a prompt idea first.\"))\n","else:\n"," if reset_chat:\n"," chat_history.clear()\n"," display(Markdown(\"**βœ… Chat history has been reset.**\"))\n","\n"," # Build messages for the official ERNIE-Image PE model\n"," messages = [{\"role\": \"system\", \"content\": SYSTEM_PROMPT}]\n"," for u, a in chat_history:\n"," messages.append({\"role\": \"user\", \"content\": u})\n"," messages.append({\"role\": \"assistant\", \"content\": a})\n"," messages.append({\"role\": \"user\", \"content\": user_input})\n","\n"," # Generate enhanced prompt (Chinese)\n"," tokenized_inputs = tokenizer.apply_chat_template(\n"," messages,\n"," add_generation_prompt=True,\n"," return_tensors=\"pt\"\n"," )\n"," input_ids = tokenized_inputs['input_ids'].to(model.device)\n"," attention_mask = tokenized_inputs['attention_mask'].to(model.device)\n","\n"," with torch.no_grad():\n"," output_ids = model.generate(\n"," input_ids,\n"," attention_mask=attention_mask,\n"," max_new_tokens=512,\n"," do_sample=True,\n"," temperature=0.7,\n"," top_p=0.9,\n"," repetition_penalty=1.1,\n"," )\n","\n"," enhanced_prompt = tokenizer.decode(\n"," output_ids[0][input_ids.shape[1]:],\n"," skip_special_tokens=True\n"," ).strip()\n"," enhanced_prompt = enhanced_prompt.replace('\\n', ' ') # Replace newlines with spaces\n","\n"," # ── Translate Chinese β†’ English using the manual translator from Cell 3 ──\n"," english_prompt = translate_chinese_to_english(enhanced_prompt)\n","\n"," # Save to history (we keep the original Chinese version)\n"," chat_history.append((user_input, enhanced_prompt))\n","\n"," # Show full conversation\n"," display(Markdown(\"### πŸ—£οΈ Chat History (PE-enhanced prompts)\"))\n"," for i, (u, a) in enumerate(chat_history, 1):\n"," display(Markdown(f\"**You {i}:** {u}\"))\n"," display(Markdown(f\"**PE Enhanced Prompt {i} (Chinese):** {a}\"))\n"," display(Markdown(\"---\"))\n","\n"," # Final output β€” both versions\n"," display(Markdown(\"**βœ… Here is your enhanced prompt (Chinese):**\"))\n"," display(Markdown(f\"```prompt\\n{enhanced_prompt}\\n```\"))\n","\n"," display(Markdown(\"**βœ… Translated to English (ready to copy & use):**\"))\n"," display(Markdown(f\"```prompt\\n{english_prompt}\\n```\"))\n","\n"," display(Markdown(\"**Just copy the English block above and paste it into ERNIE-Image!**\"))"],"metadata":{"cellView":"form","id":"AX-r-stEIO2t"},"execution_count":null,"outputs":[]}]}