Datasets:
Tags:
Not-For-All-Audiences
Upload gemma4_batch_captioner.ipynb
Browse files
gemma4_batch_captioner.ipynb
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"F5mVpzevnlmz"},"outputs":[],"source":["#@markdown # Cell 0: Mount Google Drive & Prepare HF_TOKEN for faster HF downloads\n","\n","from google.colab import drive\n","from google.colab import userdata\n","import os\n","\n","# Mount Google Drive\n","print(\"Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Create a working directory on Drive (change if you want a different path)\n","WORKING_DIR = \"/content/drive/MyDrive/DinoTaggerPipeline\"\n","os.makedirs(WORKING_DIR, exist_ok=True)\n","print(f\"β
Working directory set to: {WORKING_DIR}\")\n","\n","# HF_TOKEN from Colab Secrets (recommended for private/gated models and faster downloads)\n","hf_token = userdata.get('HF_TOKEN')\n","if hf_token:\n"," os.environ[\"HF_TOKEN\"] = hf_token\n"," print(\"β
HF_TOKEN loaded from Colab Secrets and set as environment variable.\")\n"," print(\" β This enables authenticated + faster/resumable Hugging Face downloads.\")\n","else:\n"," print(\"β οΈ HF_TOKEN not found in Colab Secrets.\")\n"," print(\" β Add it via the key icon (left sidebar) β Secrets β Name: HF_TOKEN\")\n"," print(\" β Some downloads may be slower or fail if the repo requires login.\")\n","\n","print(\"\\nβ
Cell 0 complete. Ready for model downloads.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","colab":{"background_save":true},"id":"bpJTAMcxnnuY"},"outputs":[],"source":["#@markdown # Cell 1: Download All Models to Disk (CPU-only, No VRAM Load)\n","\n","import torch\n","from huggingface_hub import hf_hub_download, snapshot_download\n","from pathlib import Path\n","import os\n","\n","print(\"Downloading models to disk (CPU-only, resumable)...\")\n","\n","# 1. DINO Tagger (lodestones/tagger-experiment)\n","print(\"\\n1. Downloading DINO Tagger files...\")\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"tagger_proto.safetensors\", local_dir=WORKING_DIR, local_dir_use_symlinks=False)\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"tagger_vocab_with_categories.json\", local_dir=WORKING_DIR, local_dir_use_symlinks=False)\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"inference_tagger_standalone.py\", local_dir=WORKING_DIR, local_dir_use_symlinks=False)\n","\n","# 2. WD-VIT Tagger (selected_tags.csv + model via timm/hf_hub)\n","print(\"\\n2. Downloading WD-VIT Tagger assets...\")\n","tags_path = Path(WORKING_DIR) / \"selected_tags.csv\"\n","if not tags_path.exists():\n"," import requests\n"," from io import StringIO\n"," import pandas as pd\n"," response = requests.get(\"https://huggingface.co/SmilingWolf/wd-vit-tagger-v3/resolve/main/selected_tags.csv\")\n"," tags_df = pd.read_csv(StringIO(response.text))\n"," tags_df.to_csv(tags_path, index=False)\n"," print(f\" β Saved {len(tags_df)} tags to selected_tags.csv\")\n","\n","# 3. Gemma-4-E2B-Heretic (processor + model files will be cached by transformers)\n","print(\"\\n3. Gemma-4-E2B-Heretic will be cached on first load in Cell 7 (large model).\")\n","\n","print(f\"\\nβ
All lightweight models downloaded/cached to: {WORKING_DIR}\")\n","print(\" Heavy models (Gemma) will download during loading in Cell 7.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"Arc9kCZKns2D"},"outputs":[],"source":["from pathlib import Path\n","import zipfile\n","\n","# === Settings ===\n","zip_file_path = \"/content/drive/MyDrive/Archive.zip\" #@param {type:\"string\"}\n","# Full path on Drive, e.g. /content/drive/MyDrive/my_images.zip\n","\n","extract_dir = Path(\"/content/extracted_images\")\n","extract_dir.mkdir(exist_ok=True)\n","\n","if not zip_file_path or not Path(zip_file_path).exists():\n"," print(f\"β ZIP file not found at: {zip_file_path}\")\n"," print(\" β Please provide a valid path to your image ZIP.\")\n","else:\n"," print(f\"Extracting images from: {zip_file_path}\")\n"," with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:\n"," # Filter out macOS specific files (e.g., __MACOSX directory, .DS_Store)\n"," members = [m for m in zip_ref.namelist() if not m.startswith('__MACOSX/') and not m.endswith('/.DS_Store')]\n"," for member in members:\n"," zip_ref.extract(member, extract_dir)\n"," print(\" β Ignored __MACOSX/ and .DS_Store files during extraction.\")\n","\n"," image_files = []\n"," for ext in ['.png', '.jpg', '.jpeg', '.webp', '.avif','.bmp']:\n"," image_files.extend(list(extract_dir.rglob(f\"*{ext}\")))\n"," image_files.extend(list(extract_dir.rglob(f\"*{ext.upper()}\")))\n","\n"," image_files = sorted(set(str(p) for p in image_files))\n"," print(f\"β
Extracted and found {len(image_files)} image(s).\")\n"," print(\" Images are ready in /content/extracted_images/\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"xHzgQ_u6nyhO"},"outputs":[],"source":["#@markdown # Cell 3: Clear VRAM + Load WD-VIT Tagger v3\n","\n","import torch\n","import gc\n","import timm\n","import torchvision.transforms as transforms\n","import pandas as pd\n","from pathlib import Path\n","\n","# Clear VRAM\n","print(\"π§Ή Clearing VRAM...\")\n","if 'model' in globals(): del model\n","if 'tagger' in globals(): del tagger\n","torch.cuda.empty_cache()\n","gc.collect()\n","print(\" β VRAM cleared.\")\n","\n","# Load WD-VIT Tagger\n","print(\"\\nπ₯ Loading WD-VIT Tagger v3...\")\n","tags_path = Path(WORKING_DIR) / \"selected_tags.csv\"\n","tags_df = pd.read_csv(tags_path)\n","tags_list = tags_df['name'].tolist()\n","print(f\" β Loaded {len(tags_list)} tags.\")\n","\n","model = timm.create_model(\"hf_hub:SmilingWolf/wd-vit-tagger-v3\", pretrained=True)\n","model.eval()\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = model.to(device)\n","\n","preprocess = transforms.Compose([\n"," transforms.Resize((448, 448)),\n"," transforms.ToTensor(),\n"," transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n","])\n","\n","print(f\"β
WD-VIT Tagger loaded on {device}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"6bt5-V-yn2Qc"},"outputs":[],"source":["#@markdown # Cell 4: Tag Images with WD-VIT Tagger\n","\n","from PIL import Image\n","from pathlib import Path\n","\n","threshold = 0.7 #@param {type:\"slider\", min:0.1, max:0.95, step:0.01}\n","add_commas = True #@param {type:\"boolean\"}\n","\n","input_dir = Path(\"/content/extracted_images\")\n","output_dir = Path(\"/content/wd_tags\")\n","output_dir.mkdir(exist_ok=True)\n","\n","image_files = sorted(list(input_dir.glob(\"*.*ηεε±θΆεΏεζͺζ₯\")))\n","image_files = [f for f in image_files if f.suffix.lower() in {'.png','.jpg','.jpeg','.webp','.bmp','.avif'}]\n","\n","print(f\"Starting WD tagging for {len(image_files)} images...\")\n","\n","for i, img_path in enumerate(image_files, start=1):\n"," img_name = img_path.name\n"," print(f\"[{i}/{len(image_files)}] Processing: {img_name}\")\n","\n"," image = Image.open(img_path).convert(\"RGB\")\n"," input_tensor = preprocess(image).unsqueeze(0).to(device)\n","\n"," with torch.no_grad():\n"," logits = model(input_tensor)\n"," probs = torch.sigmoid(logits).cpu().numpy()[0]\n","\n"," wd_tags = [tags_list[j] for j, prob in enumerate(probs) if prob > threshold]\n","\n"," tag_text = \" , \".join(wd_tags) if add_commas else \" \".join(wd_tags)\n","\n"," base_name = f\"{i:04d}\" # numbered for consistency\n"," Image.open(img_path).convert(\"RGB\").save(output_dir / f\"{base_name}.jpg\", \"JPEG\", quality=95)\n"," with open(output_dir / f\"{base_name}.txt\", \"w\", encoding=\"utf-8\") as f:\n"," f.write(tag_text)\n","\n"," print(f\" β Saved {base_name}.jpg + {base_name}.txt ({len(wd_tags)} WD tags)\")\n","\n","print(\"\\nβ
WD tagging complete. Tags saved in /content/wd_tags/\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"ODhu29DFoC0c"},"outputs":[],"source":["#@markdown # Cell 5: Clear VRAM + Load DINO Tagger\n","\n","import torch\n","import gc\n","import sys\n","from pathlib import Path\n","\n","# Add WORKING_DIR to sys.path so Python can find the module\n","if str(WORKING_DIR) not in sys.path:\n"," sys.path.insert(0, str(WORKING_DIR))\n","\n","from inference_tagger_standalone import Tagger # from Cell 1 download\n","\n","# Clear previous model\n","print(\"π§Ή Clearing VRAM...\")\n","if 'model' in globals(): del model\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","# Load DINO Tagger\n","print(\"\\nπ₯ Loading DINO Tagger...\")\n","tagger = Tagger(\n"," checkpoint_path=str(Path(WORKING_DIR) / \"tagger_proto.safetensors\"),\n"," vocab_path=str(Path(WORKING_DIR) / \"tagger_vocab_with_categories.json\"),\n"," device=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n"," dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n"," max_size=1024\n",")\n","\n","print(f\"β
DINO Tagger loaded on {tagger.device}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"s4_XYkSYoIlN"},"outputs":[],"source":["#@markdown # Cell 6.a: Tag with DINO Tagger + Append to .txt Files\n","\n","threshold_percent = 80 #@param {type:\"slider\", min:1, max:95, step:1}\n","max_tags = 100 #@param {type:\"slider\", min:5, max:150, step:5}\n","use_max_tags = False #@param {type:\"boolean\"}\n","\n","wd_dir = Path(\"/content/wd_tags\")\n","dino_dir = Path(\"/content/dino_combined\")\n","dino_dir.mkdir(exist_ok=True)\n","\n","image_files = sorted(list(wd_dir.glob(\"*.jpg\")))\n","\n","threshold = threshold_percent / 100.0\n","\n","print(f\"Starting DINO tagging for {len(image_files)} images...\")\n","\n","for i, img_path in enumerate(image_files, start=1):\n"," base_name = img_path.stem\n"," txt_path = wd_dir / f\"{base_name}.txt\"\n","\n"," print(f\"[{i}/{len(image_files)}] Processing: {img_path.name}\")\n","\n"," if use_max_tags:\n"," tags_list = tagger.predict(str(img_path), topk=max_tags, threshold=None)\n"," else:\n"," tags_list = tagger.predict(str(img_path), topk=None, threshold=threshold)\n"," if len(tags_list) > max_tags:\n"," tags_list = tags_list[:max_tags]\n","\n"," dino_tags = [tag for tag, prob in tags_list]\n","\n"," # Read WD tags\n"," if txt_path.exists():\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," existing_text = f.read().strip()\n"," existing_tags = [t.strip() for t in existing_text.replace(\",\", \" \").split() if t.strip()]\n"," else:\n"," existing_tags = []\n","\n"," # Combine: WD first, then DINO (or reverse if preferred)\n"," combined = existing_tags + dino_tags\n"," seen = set()\n"," final_tags = [tag for tag in combined if not (tag in seen or seen.add(tag))]\n","\n"," tag_text = \" , \".join(final_tags) if add_commas else \" \".join(final_tags) # add_commas from previous cell if defined\n","\n"," # Save combined\n"," new_img = dino_dir / f\"{base_name}.jpg\"\n"," new_txt = dino_dir / f\"{base_name}.txt\"\n","\n"," Image.open(img_path).convert(\"RGB\").save(new_img, \"JPEG\", quality=95)\n"," with open(new_txt, \"w\", encoding=\"utf-8\") as f:\n"," f.write(tag_text)\n","\n"," print(f\" β Saved combined tags ({len(final_tags)} total)\")\n","\n","print(\"\\nβ
DINO tags appended. Combined tags in /content/dino_combined/\")"]},{"cell_type":"code","source":["#@markdown # Cell 6.b: Save DINO-Tagged Results to Google Drive (Safe to Disconnect)\n","\n","from pathlib import Path\n","import zipfile\n","import shutil\n","\n","print(\"πΎ Saving all DINO-tagged results (images + combined tags) to Google Drive...\")\n","\n","dino_dir = Path(\"/content/dino_combined\")\n","if not dino_dir.exists() or not list(dino_dir.glob(\"*.jpg\")):\n"," print(\"β No /content/dino_combined folder found. Please run Cell 6 first.\")\n","else:\n"," # Create safe backup folder on Drive\n"," backup_dir = Path(WORKING_DIR) / \"dino_tagged_backup\"\n"," backup_dir.mkdir(parents=True, exist_ok=True)\n","\n"," # Copy entire folder to Drive (fast & reliable)\n"," print(f\" Copying {len(list(dino_dir.glob('*.jpg')))} image+tag pairs to Drive...\")\n"," for item in dino_dir.iterdir():\n"," dest = backup_dir / item.name\n"," if item.is_file():\n"," shutil.copy2(item, dest)\n","\n"," # Also create a ZIP for easy download / archive\n"," zip_path = Path(WORKING_DIR) / \"dino_tagged_backup.zip\"\n"," with zipfile.ZipFile(zip_path, \"w\") as zipf:\n"," for file in backup_dir.iterdir():\n"," zipf.write(file, file.name)\n","\n"," print(f\"\\nβ
Backup complete!\")\n"," print(f\" β Folder saved to: {backup_dir}\")\n"," print(f\" β ZIP archive saved to: {zip_path}\")\n"," print(\"\\nYou can now safely disconnect / restart the runtime.\")\n"," print(\" When you come back, run Cell 6.c to restore everything.\")"],"metadata":{"id":"lKhaoG11wm4T"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"FQF71-mvmlc1"},"outputs":[],"source":["# ================================================\n","# Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"π Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]},{"cell_type":"markdown","source":["Disconnect the drive. We retrieve values from drive. Google colab needs fresh session for gemma4 since its a fresh model (April 2 2026 released model) hence why you need to disconnect the Colab like this."],"metadata":{"id":"SI-pmpU1wyy8"}},{"cell_type":"code","source":["#@markdown # Cell 7.a: Reconnect Drive + Restore HF_TOKEN + Install Gemma-4 Dependencies\n","\n","from google.colab import drive\n","from google.colab import userdata\n","from pathlib import Path\n","import zipfile\n","import shutil\n","import os\n","\n","print(\"π Reconnecting to Google Drive...\")\n","\n","# Mount Drive again\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Restore working directory\n","WORKING_DIR = \"/content/drive/MyDrive/DinoTaggerPipeline\"\n","os.makedirs(WORKING_DIR, exist_ok=True)\n","print(f\"β
Working directory restored: {WORKING_DIR}\")\n","\n","# Restore HF_TOKEN\n","hf_token = userdata.get('HF_TOKEN')\n","if hf_token:\n"," os.environ[\"HF_TOKEN\"] = hf_token\n"," print(\"β
HF_TOKEN restored from Colab Secrets.\")\n","else:\n"," print(\"β οΈ HF_TOKEN not found in secrets. Add it via the key icon if Gemma-4 fails to download.\")\n","\n","# Restore DINO-tagged files from backup (if they exist)\n","backup_dir = Path(WORKING_DIR) / \"dino_tagged_backup\"\n","dino_dir = Path(\"/content/dino_combined\")\n","dino_dir.mkdir(exist_ok=True)\n","\n","if backup_dir.exists() and list(backup_dir.glob(\"*.jpg\")):\n"," print(f\"π¦ Restoring {len(list(backup_dir.glob('*.jpg')))} tagged image pairs...\")\n"," for item in backup_dir.iterdir():\n"," shutil.copy2(item, dino_dir / item.name)\n"," print(\"β
DINO-tagged files restored to /content/dino_combined\")\n","elif (Path(WORKING_DIR) / \"dino_tagged_backup.zip\").exists():\n"," print(\"π¦ Unzipping backup ZIP...\")\n"," with zipfile.ZipFile(Path(WORKING_DIR) / \"dino_tagged_backup.zip\", 'r') as zip_ref:\n"," zip_ref.extractall(dino_dir)\n"," print(\"β
Files restored from ZIP.\")\n","else:\n"," print(\"β οΈ No backup found on Drive. Make sure you ran Cell 6.b before disconnecting.\")\n","\n","# Install Gemma-4 dependencies EXACTLY as in the working Cell 1a (no git, no restart needed)\n","print(\"\\nπ Installing Gemma-4 dependencies (safe & official method from model card)...\")\n","!pip install -q --upgrade --force-reinstall \"pillow<12.0\"\n","!pip install -q --upgrade transformers accelerate bitsandbytes\n","\n","print(\"\\nβ
Dependencies installed.\")\n","print(\" β Ready to load Gemma-4 in Cell 7.\")\n","print(\"\\nCell 7.a complete. Run Cell 7 next.\")"],"metadata":{"id":"2lmjE8GWwxrK"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#@markdown # Cell 7.b: Clear VRAM + Load Gemma-4-E2B-Heretic (Merged β Safe Official Method)\n","\n","import torch\n","import gc\n","from google.colab import userdata\n","from transformers import AutoProcessor, AutoModelForMultimodalLM\n","\n","# Aggressive VRAM cleanup\n","print(\"π§Ή Clearing VRAM...\")\n","if 'tagger' in globals():\n"," del tagger\n","if 'model' in globals():\n"," del model\n","if 'processor' in globals():\n"," del processor\n","\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","if torch.cuda.is_available():\n"," print(f\" VRAM before loading: {torch.cuda.memory_allocated() / 1024**3:.2f} GB\")\n","else:\n"," print(\" β οΈ No GPU detected β loading on CPU (slow).\")\n","\n","# HF Token (already restored in Cell 6.c)\n","hf_token = userdata.get('HF_TOKEN')\n","if hf_token:\n"," print(\"β
HF_TOKEN loaded.\")\n","else:\n"," print(\"β οΈ HF_TOKEN not found.\")\n","\n","model_id = \"coder3101/gemma-4-E2B-it-heretic\"\n","\n","print(f\"\\nπ½ Loading Gemma-4-E2B-Heretic **EXACTLY** as recommended on model card...\")\n","\n","processor = AutoProcessor.from_pretrained(model_id, token=hf_token)\n","\n","# Official recommended loading (fixes vision hangs + gray-image bug)\n","model = AutoModelForMultimodalLM.from_pretrained(\n"," model_id,\n"," token=hf_token,\n"," dtype=\"auto\", # β model card uses \"dtype\"\n"," device_map=\"auto\", # β vision + text on GPU automatically\n"," low_cpu_mem_usage=True,\n",")\n","\n","print(\"\\nβ
Gemma-4-E2B-Heretic loaded successfully!\")\n","print(f\" Device: {model.device}\")\n","print(f\" Dtype: {model.dtype}\")\n","print(f\" VRAM used: {torch.cuda.memory_allocated() / 1024**3:.2f} GB\")\n","print(\" Vision encoder fully initialized β ready for captions.\")\n","\n","# Quick confirmation\n","print(f\"\\n Transformers version: {__import__('transformers').__version__}\")\n","print(f\" Model type: {model.config.model_type if hasattr(model.config, 'model_type') else 'N/A'}\")\n","\n","print(\"\\nβ
Cell 7 complete β ready for Cell 8 (caption generation with tags).\")"],"metadata":{"id":"2bEySax4xLlM"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["from PIL import Image\n","import base64\n","from pathlib import Path\n","import torch\n","import io\n","\n","input_dir = Path(\"/content/dino_combined\")\n","output_dir = Path(\"/content/final_captions\")\n","output_dir.mkdir(exist_ok=True)\n","\n","image_files = sorted(list(input_dir.glob(\"*.jpg\")))\n","\n","# Determine the optimal resolution for Gemma-4's vision encoder (improves latent features)\n","target_max_dim = 1024\n","if hasattr(processor, \"image_processor\") and hasattr(processor.image_processor, \"size\"):\n"," size_cfg = processor.image_processor.size\n"," if isinstance(size_cfg, dict):\n"," dims = [v for v in size_cfg.values() if isinstance(v, (int, float))]\n"," if dims:\n"," target_max_dim = int(max(dims))\n"," elif isinstance(size_cfg, (int, float)):\n"," target_max_dim = int(size_cfg)\n","\n","print(f\"π§ Using optimal vision resolution: max {target_max_dim}px (matched to processor for better latent representation)\")\n","\n","prompt_base = \"Describe this image in detail, including key objects, scene, colors, mood, lighting, and any visible text. The description text should be long, roughly 400 words in size. Be accurate and comprehensive. Do not use newlines, bold font, or itemized lists. Output a block of text.\"\n","\n","print(f\"Generating detailed captions for {len(image_files)} images with improved latent image representation...\")\n","\n","def image_to_data_url(img: Image.Image) -> str:\n"," \"\"\"Lossless PNG data URL β cleanest possible pixel data for the vision encoder\"\"\"\n"," buffered = io.BytesIO()\n"," img.save(buffered, format=\"PNG\") # lossless, no compression artifacts\n"," img_str = base64.b64encode(buffered.getvalue()).decode(\"utf-8\")\n"," return f\"data:image/png;base64,{img_str}\"\n","\n","for i, img_path in enumerate(image_files, start=1):\n"," base_name = img_path.stem\n"," txt_path = input_dir / f\"{base_name}.txt\"\n","\n"," print(f\"[{i}/{len(image_files)}] Processing: {img_path.name}\")\n","\n"," # Load existing DINO tags\n"," tags = \"\"\n"," if txt_path.exists():\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," tags = f.read().strip()\n","\n"," # === OPTIMAL INPUT FOR LATENT REPRESENTATION ===\n"," image = Image.open(img_path).convert(\"RGB\")\n","\n"," # Resize to the processor's preferred size (best latent quality)\n"," if max(image.size) != target_max_dim or min(image.size) < 512:\n"," scale = target_max_dim / max(image.size)\n"," new_size = (int(image.width * scale), int(image.height * scale))\n"," image = image.resize(new_size, Image.LANCZOS)\n","\n"," # Lossless data URL (direct pixel data β stronger latent features)\n"," image_url = image_to_data_url(image)\n","\n"," full_prompt = f\"{prompt_base}\\n\\nThe description must include these words : {tags}\"\n","\n"," messages = [{\n"," \"role\": \"user\",\n"," \"content\": [\n"," {\"type\": \"image\", \"url\": image_url}, # lossless PNG data\n"," {\"type\": \"text\", \"text\": full_prompt}\n"," ]\n"," }]\n","\n"," inputs = processor.apply_chat_template(\n"," messages, add_generation_prompt=True, tokenize=True, return_tensors=\"pt\", return_dict=True\n"," ).to(model.device)\n","\n"," with torch.no_grad():\n"," outputs = model.generate(\n"," **inputs,\n"," max_new_tokens=768,\n"," do_sample=True,\n"," temperature=0.65,\n"," top_p=0.9,\n"," )\n","\n"," input_len = inputs[\"input_ids\"].shape[-1]\n"," raw = processor.decode(outputs[0][input_len:], skip_special_tokens=False)\n"," caption = processor.parse_response(raw)[\"content\"] if hasattr(processor, \"parse_response\") else raw.strip()\n","\n"," # Save the (optimally resized) image + caption\n"," image.save(output_dir / f\"{base_name}.jpg\", \"JPEG\", quality=95)\n"," with open(output_dir / f\"{base_name}.txt\", \"w\", encoding=\"utf-8\") as f:\n"," f.write(caption)\n","\n"," print(f\" β Caption generated ({len(caption.split())} words) β latent features improved\")\n","\n","print(\"\\nβ
Gemma-4 captions complete with **improved latent image representation**!\")\n","print(\"Final pairs saved in /content/final_captions/\")"],"metadata":{"id":"gZk3qzO13bU8"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"qxd0tFpmosVI"},"outputs":[],"source":["#@markdown # Cell 9: Save Final Image-Text Pairs as ZIP on Google Drive\n","\n","from pathlib import Path\n","import zipfile\n","from google.colab import files\n","\n","final_dir = Path(\"/content/final_captions\")\n","zip_name = \"final_image_text_pairs.zip\"\n","zip_path = Path(WORKING_DIR) / zip_name\n","\n","print(f\"Creating ZIP: {zip_path}\")\n","\n","with zipfile.ZipFile(zip_path, 'w') as zipf:\n"," for file in final_dir.iterdir():\n"," zipf.write(file, file.name)\n","\n","print(f\"β
ZIP saved to Google Drive at: {zip_path}\")\n","\n","# Optional: Auto-download to local machine\n","auto_download = True #@param {type:\"boolean\"}\n","if auto_download:\n"," files.download(str(zip_path))\n"," print(\"π₯ Auto-download triggered.\")\n","else:\n"," print(\" β You can download the ZIP manually from Google Drive.\")\n","\n","print(\"\\nπ Pipeline complete! All steps finished.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"TZ4h8CWIy4uM"},"outputs":[],"source":["# ================================================\n","# Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"π Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
|