codeShare commited on
Commit
5aced65
Β·
verified Β·
1 Parent(s): 1732557

Upload dino_tagger.ipynb

Browse files
Files changed (1) hide show
  1. dino_tagger.ipynb +1 -1
dino_tagger.ipynb CHANGED
@@ -1 +1 @@
1
- {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"CrLhR87ElDnE"},"outputs":[],"source":["#@markdown # Cell 0: Mount Google Drive & Prepare HF_TOKEN for faster HF downloads (UPDATED)\n","\n","from google.colab import drive\n","from google.colab import userdata\n","import os\n","\n","# Mount Google Drive (still used for output ZIPs/results only)\n","print(\"Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Create a working directory on Drive (for final outputs/ZIPs only)\n","WORKING_DIR = \"/content/drive/MyDrive/DinoTaggerPipeline\"\n","os.makedirs(WORKING_DIR, exist_ok=True)\n","print(f\"βœ… Working directory set to: {WORKING_DIR} (outputs only)\")\n","\n","# === NEW: Local model directory on /content/ (CPU storage only) ===\n","# Checkpoints will ONLY go here. Nothing is saved to Drive.\n","MODEL_DIR = \"/content/models\"\n","os.makedirs(MODEL_DIR, exist_ok=True)\n","print(f\"βœ… Model directory set to: {MODEL_DIR} (local /content/ storage only)\")\n","print(\" β†’ Checkpoint models (safetensors, vocab, etc.) will be saved here only.\")\n","\n","# HF_TOKEN from Colab Secrets\n","hf_token = userdata.get('HF_TOKEN')\n","if hf_token:\n"," os.environ[\"HF_TOKEN\"] = hf_token\n"," print(\"βœ… HF_TOKEN loaded from Colab Secrets and set as environment variable.\")\n"," print(\" β†’ This enables authenticated + faster/resumable Hugging Face downloads.\")\n","else:\n"," print(\"⚠️ HF_TOKEN not found in Colab Secrets.\")\n"," print(\" β†’ Add it via the key icon (left sidebar) β†’ Secrets β†’ Name: HF_TOKEN\")\n"," print(\" β†’ Some downloads may be slower or fail if the repo requires login.\")\n","\n","print(\"\\nβœ… Cell 0 complete. Models will now save to local /content/models only.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"VlWR_znDlD1W"},"outputs":[],"source":["#@markdown # Cell 1: Download All Models to Disk (CPU-only, No VRAM Load) (UPDATED)\n","\n","import torch\n","from huggingface_hub import hf_hub_download, snapshot_download\n","from pathlib import Path\n","import os\n","\n","print(\"Downloading models to local /content/models (CPU-only, resumable)...\")\n","\n","# 1. DINO Tagger (lodestones/tagger-experiment) β†’ saved to MODEL_DIR only\n","print(\"\\n1. Downloading DINO Tagger files...\")\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"tagger_proto.safetensors\", local_dir=MODEL_DIR, local_dir_use_symlinks=False)\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"tagger_vocab_with_categories.json\", local_dir=MODEL_DIR, local_dir_use_symlinks=False)\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"inference_tagger_standalone.py\", local_dir=MODEL_DIR, local_dir_use_symlinks=False)\n","\n","# 2. WD-VIT Tagger (selected_tags.csv) β†’ saved to MODEL_DIR only\n","print(\"\\n2. Downloading WD-VIT Tagger assets...\")\n","tags_path = Path(MODEL_DIR) / \"selected_tags.csv\"\n","if not tags_path.exists():\n"," import requests\n"," from io import StringIO\n"," import pandas as pd\n"," response = requests.get(\"https://huggingface.co/SmilingWolf/wd-vit-tagger-v3/resolve/main/selected_tags.csv\")\n"," tags_df = pd.read_csv(StringIO(response.text))\n"," tags_df.to_csv(tags_path, index=False)\n"," print(f\" β†’ Saved {len(tags_df)} tags to selected_tags.csv in {MODEL_DIR}\")\n","\n","# 3. Gemma-4-E2B-Heretic (processor + model files will be cached by transformers on local storage)\n","#print(\"\\n3. Gemma-4-E2B-Heretic will be cached on first load in Cell 7 (large model - stored locally).\")\n","\n","print(f\"\\nβœ… All lightweight models downloaded/cached to: {MODEL_DIR} (local /content/ only)\")\n","print(\" β†’ No checkpoint models were saved to Google Drive.\")\n","#print(\" Heavy models (Gemma) will download during loading in Cell 7.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"s1hmHVmGlEAn"},"outputs":[],"source":["#@markdown # Cell 2: Extract Images from ZIP (MODIFIED - now supports existing image+text pairs)\n","\n","from pathlib import Path\n","import zipfile\n","\n","# === Settings ===\n","zip_file_path = \"/content/drive/MyDrive/my_set.zip\" #@param {type:\"string\"}\n","# Full path on Drive, e.g. /content/drive/MyDrive/my_images.zip\n","\n","extract_dir = Path(\"/content/extracted_images\")\n","extract_dir.mkdir(exist_ok=True)\n","\n","if not zip_file_path or not Path(zip_file_path).exists():\n"," print(f\"❌ ZIP file not found at: {zip_file_path}\")\n"," print(\" β†’ Please provide a valid path to your image ZIP.\")\n","else:\n"," print(f\"Extracting images from: {zip_file_path}\")\n"," with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:\n"," # Filter out macOS specific files (e.g., __MACOSX directory, .DS_Store)\n"," members = [m for m in zip_ref.namelist() if not m.startswith('__MACOSX/') and not m.endswith('/.DS_Store')]\n"," for member in members:\n"," zip_ref.extract(member, extract_dir)\n"," print(\" β†’ Ignored __MACOSX/ and .DS_Store files during extraction.\")\n","\n"," image_files = []\n"," for ext in ['.png', '.jpg', '.jpeg', '.webp', '.avif','.bmp']:\n"," image_files.extend(list(extract_dir.rglob(f\"*{ext}\")))\n"," image_files.extend(list(extract_dir.rglob(f\"*{ext.upper()}\")))\n","\n"," image_files = sorted(set(str(p) for p in image_files))\n","\n"," # === NEW: Support for ZIPs containing existing image + .txt caption pairs ===\n"," # If a .txt file with the same base name as an image exists, it will be read\n"," # and the generated tags (WD + DINO) will be inserted in the middle of its sentences.\n"," existing_captions = []\n"," paired_count = 0\n"," for img_str in image_files:\n"," img_path = Path(img_str)\n"," txt_path = img_path.with_suffix('.txt')\n"," if txt_path.exists():\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," caption = f.read().strip()\n"," existing_captions.append(caption)\n"," paired_count += 1\n"," else:\n"," existing_captions.append(None)\n","\n"," print(f\"βœ… Extracted and found {len(image_files)} image(s). {paired_count} have existing text pairs.\")\n"," if paired_count > 0:\n"," print(\" β†’ Existing captions detected! Tags will be inserted after half the sentences (see Cell 6.a).\")\n"," print(\" Images are ready in /content/extracted_images/\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"bS_tUFbMlEJl"},"outputs":[],"source":["#@markdown # Cell 3: Clear VRAM + Load WD-VIT Tagger v3 (UPDATED)\n","\n","import torch\n","import gc\n","import timm\n","import torchvision.transforms as transforms\n","import pandas as pd\n","from pathlib import Path\n","\n","# Clear VRAM\n","print(\"🧹 Clearing VRAM...\")\n","if 'model' in globals(): del model\n","if 'tagger' in globals(): del tagger\n","torch.cuda.empty_cache()\n","gc.collect()\n","print(\" β†’ VRAM cleared.\")\n","\n","# Load WD-VIT Tagger\n","print(\"\\nπŸ“₯ Loading WD-VIT Tagger v3...\")\n","# UPDATED: Use MODEL_DIR (local only)\n","tags_path = Path(MODEL_DIR) / \"selected_tags.csv\"\n","tags_df = pd.read_csv(tags_path)\n","tags_list = tags_df['name'].tolist()\n","print(f\" β†’ Loaded {len(tags_list)} tags.\")\n","\n","model = timm.create_model(\"hf_hub:SmilingWolf/wd-vit-tagger-v3\", pretrained=True)\n","model.eval()\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = model.to(device)\n","\n","preprocess = transforms.Compose([\n"," transforms.Resize((448, 448)),\n"," transforms.ToTensor(),\n"," transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n","])\n","\n","print(f\"βœ… WD-VIT Tagger loaded on {device}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"SthfRxwClETc"},"outputs":[],"source":["#@markdown # Cell 4: Tag Images with WD-VIT Tagger (FIXED) (unchanged)\n","\n","from PIL import Image\n","from pathlib import Path\n","\n","threshold = 0.75 #@param {type:\"slider\", min:0.1, max:0.95, step:0.01}\n","add_commas = True #@param {type:\"boolean\"}\n","\n","input_dir = Path(\"/content/extracted_images\")\n","output_dir = Path(\"/content/wd_tags\")\n","output_dir.mkdir(exist_ok=True)\n","\n","# FIXED: Clean glob pattern (was broken with Chinese text garbage)\n","image_files = sorted(list(input_dir.glob(\"*.*\")))\n","image_files = [f for f in image_files if f.suffix.lower() in {'.png','.jpg','.jpeg','.webp','.bmp','.avif'}]\n","\n","print(f\"Starting WD tagging for {len(image_files)} images...\")\n","\n","for i, img_path in enumerate(image_files, start=1):\n"," img_name = img_path.name\n"," print(f\"[{i}/{len(image_files)}] Processing: {img_name}\")\n","\n"," # Load image once\n"," image = Image.open(img_path).convert(\"RGB\")\n","\n"," # Prepare for model\n"," input_tensor = preprocess(image).unsqueeze(0).to(device)\n","\n"," with torch.no_grad():\n"," logits = model(input_tensor)\n"," probs = torch.sigmoid(logits).cpu().numpy()[0]\n","\n"," wd_tags = [tags_list[j] for j, prob in enumerate(probs) if prob > threshold]\n","\n"," tag_text = \" , \".join(wd_tags) if add_commas else \" \".join(wd_tags)\n","\n"," base_name = f\"{i:04d}\" # numbered for consistency\n","\n"," # FIXED: Reuse already-loaded image instead of opening twice\n"," image.save(output_dir / f\"{base_name}.jpg\", \"JPEG\", quality=95)\n","\n"," with open(output_dir / f\"{base_name}.txt\", \"w\", encoding=\"utf-8\") as f:\n"," f.write(tag_text)\n","\n"," print(f\" β†’ Saved {base_name}.jpg + {base_name}.txt ({len(wd_tags)} WD tags)\")\n","\n","print(\"\\nβœ… WD tagging complete. Tags saved in /content/wd_tags/\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"EKq1NmNYlEc3"},"outputs":[],"source":["#@markdown # Cell 5: Clear VRAM + Load DINO Tagger (UPDATED)\n","\n","import torch\n","import gc\n","import sys\n","from pathlib import Path\n","\n","# UPDATED: Add MODEL_DIR to sys.path so Python can find the module (inference_tagger_standalone.py is now in local /content/models)\n","if str(MODEL_DIR) not in sys.path:\n"," sys.path.insert(0, str(MODEL_DIR))\n","\n","from inference_tagger_standalone import Tagger # from Cell 1 download (now in MODEL_DIR)\n","\n","# Clear previous model\n","print(\"🧹 Clearing VRAM...\")\n","if 'model' in globals(): del model\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","# Load DINO Tagger\n","print(\"\\nπŸ“₯ Loading DINO Tagger...\")\n","# UPDATED: Load checkpoint & vocab from local MODEL_DIR only\n","tagger = Tagger(\n"," checkpoint_path=str(Path(MODEL_DIR) / \"tagger_proto.safetensors\"),\n"," vocab_path=str(Path(MODEL_DIR) / \"tagger_vocab_with_categories.json\"),\n"," device=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n"," dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n"," max_size=1024\n",")\n","\n","print(f\"βœ… DINO Tagger loaded on {tagger.device} (checkpoint from local /content/models)\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"EeBtbvB4lYd7"},"outputs":[],"source":["import re\n","import random # Added for randomization\n","\n","# === Rest of original Cell 6.a (with new randomization logic) ===\n","treshold_percent = 95 #@param {type:\"slider\", min:1, max:95, step:1}\n","max_tags = 100 #@param {type:\"slider\", min:5, max:150, step:5}\n","use_max_tags = False #@param {type:\"boolean\"}\n","\n","wd_dir = Path(\"/content/wd_tags\")\n","dino_dir = Path(\"/content/dino_combined\")\n","dino_dir.mkdir(exist_ok=True)\n","\n","image_files = sorted(list(wd_dir.glob(\"*.jpg\")))\n","\n","treshold = treshold_percent / 100.0\n","\n","print(f\"Starting DINO tagging for {len(image_files)} images...\")\n","\n","# Fallback if existing_captions was not created in Cell 2\n","if 'existing_captions' not in globals() or len(existing_captions) != len(image_files):\n"," existing_captions = [None] * len(image_files)\n"," print(\"⚠️ No existing_captions list found from Cell 2 (normal if input ZIP had no .txt files).\")\n","\n","for i, img_path in enumerate(image_files, start=1):\n"," base_name = img_path.stem\n"," txt_path = wd_dir / f\"{base_name}.txt\"\n","\n"," print(f\"[{i}/{len(image_files)}] Processing: {img_path.name}\")\n","\n"," if use_max_tags:\n"," tags_list = tagger.predict(str(img_path), topk=max_tags, treshold=None)\n"," else:\n"," tags_list = tagger.predict(str(img_path), topk=None, treshold=treshold)\n"," if len(tags_list) > max_tags:\n"," tags_list = tags_list[:max_tags]\n","\n"," dino_tags = [tag for tag, prob in tags_list]\n","\n"," # Read WD tags\n"," if txt_path.exists():\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," existing_text = f.read().strip()\n"," existing_tags = [t.strip() for t in existing_text.replace(\",\", \" \").split() if t.strip()]\n"," else:\n"," existing_tags = []\n","\n"," # Combine WD first, then DINO\n"," combined = existing_tags + dino_tags\n"," seen = set()\n"," final_tags = [tag for tag in combined if not (tag in seen or seen.add(tag))]\n"," # final_tags now contains de-duplicated WD and DINO tags.\n","\n"," all_caption_elements = []\n"," num_sentences = 0\n"," num_tags = len(final_tags)\n","\n"," existing_caption_for_this_image = existing_captions[i-1] if i-1 < len(existing_captions) else None\n","\n"," if existing_caption_for_this_image:\n"," # Apply user-requested word replacements\n"," existing_caption_for_this_image = re.sub(r'\\bchildren\\b', 'girls', existing_caption_for_this_image, flags=re.IGNORECASE)\n"," existing_caption_for_this_image = re.sub(r'\\bchild\\b', 'girl', existing_caption_for_this_image, flags=re.IGNORECASE)\n","\n"," # Split into sentences\n"," sentence_endings = re.compile(r'(?<=[.!?])\\s+')\n"," sentences = [s.strip() for s in sentence_endings.split(existing_caption_for_this_image) if s.strip()]\n"," all_caption_elements.extend(sentences)\n"," num_sentences = len(sentences)\n","\n"," all_caption_elements.extend(final_tags) # Add the collected tags\n","\n"," # Shuffle all elements (sentences and tags) together\n"," random.shuffle(all_caption_elements)\n","\n"," # Join them with appropriate separators\n"," # Use ' , ' if add_commas is true, otherwise ' '\n"," separator = \" , \" if add_commas else \" \"\n"," final_text = separator.join(all_caption_elements)\n","\n"," # User requested comma spacing adjustment for the final text (this still applies)\n"," final_text = re.sub(r'\\s*,\\s*', ' , ', final_text)\n","\n"," # Save combined / augmented result\n"," new_img = dino_dir / f\"{base_name}.jpg\"\n"," new_txt = dino_dir / f\"{base_name}.txt\"\n","\n"," Image.open(img_path).convert(\"RGB\").save(new_img, \"JPEG\", quality=95)\n"," with open(new_txt, \"w\", encoding=\"utf-8\") as f:\n"," f.write(final_text)\n","\n"," if num_sentences > 0:\n"," print(f\" β†’ Combined and randomized {num_sentences} sentences and {num_tags} tags.\")\n"," else:\n"," print(f\" β†’ Randomized {num_tags} tags (no existing sentences).\")\n"," print(f\" β†’ Saved {base_name}.jpg + {base_name}.txt ({len(all_caption_elements)} total elements in prompt)\")\n","\n","print(\"\\nβœ… DINO tags processed. Combined/augmented tags in /content/dino_combined/\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","colab":{"background_save":true},"id":"Ytpw174ClYnj"},"outputs":[],"source":["#@markdown # Cell 6.b: Save DINO-Tagged Results to Google Drive (Safe to Disconnect) (unchanged)\n","\n","from pathlib import Path\n","import zipfile\n","import shutil\n","\n","print(\"πŸ’Ύ Saving all DINO-tagged results (images + combined tags) to Google Drive as a ZIP...\")\n","\n","dino_dir = Path(\"/content/dino_combined\")\n","if not dino_dir.exists() or not list(dino_dir.glob(\"*.jpg\")):\n"," print(\"❌ No /content/dino_combined folder found. Please run Cell 6 first.\")\n","else:\n"," # Create a ZIP for easy download / archive directly from dino_dir\n"," zip_path = Path(WORKING_DIR) / \"dino_tagged_results.zip\" # Changed zip name for clarity\n"," num_files_to_zip = len(list(dino_dir.iterdir()))\n"," print(f\" Zipping {num_files_to_zip} image+tag pairs...\")\n","\n"," with zipfile.ZipFile(zip_path, \"w\") as zipf:\n"," for file in dino_dir.iterdir():\n"," zipf.write(file, file.name) # Write directly from dino_dir, preserving original file names in zip\n","\n"," print(f\"\\nβœ… ZIP creation complete!\")\n"," print(f\" β†’ ZIP archive saved to: {zip_path}\")\n"," print(\"\\nYou can now safely disconnect / restart the runtime.\")\n"," print(\" When you come back, run Cell 6.c to restore everything.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"c_7hA_Rxmyep"},"outputs":[],"source":["#@markdown # Cell 6.c: Generate Combined Prompts for LLM Tweaking (deterministic image order)\n","\n","from pathlib import Path\n","import zipfile\n","import os\n","import shutil\n","\n","zip_file_on_drive_path = Path(WORKING_DIR) / \"dino_tagged_results.zip\"\n","drive_target_output_dir = Path(zip_file_on_drive_path).parent\n","drive_target_output_dir.mkdir(parents=True, exist_ok=True)\n","\n","print(f\"πŸ”„ Processing ZIP from Drive: {zip_file_on_drive_path}\")\n","\n","extract_dir = \"/content/extracted_txt_files_drive_processing\"\n","if os.path.exists(extract_dir):\n"," shutil.rmtree(extract_dir)\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","try:\n"," with zipfile.ZipFile(zip_file_on_drive_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n"," print(f\"βœ… ZIP extracted from Drive to {extract_dir}\")\n","except FileNotFoundError:\n"," print(f\"❌ ZIP file not found at {zip_file_on_drive_path}.\")\n"," combined_content = '{}'\n","\n","txt_paths = sorted(\n"," list(Path(extract_dir).glob(\"*.txt\")),\n"," key=lambda p: f\"{int(p.stem):05d}\" if p.stem.isdigit() else p.stem.lower()\n",")\n","\n","cleaned_titles = []\n","if not txt_paths:\n"," print(\"⚠️ No .txt files found!\")\n"," combined_content = '{}'\n","else:\n"," print(f\"πŸ“„ Found {len(txt_paths)} prompts (sorted deterministically by image number).\")\n"," for txt_path in txt_paths:\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," title = f.read().strip()\n","\n"," cleaned = (title\n"," .replace('^', '')\n"," .replace('{', '')\n"," .replace('}', '')\n"," .replace('|', '')\n"," .replace('\"','')\n"," .replace('>', '')\n"," .replace('<',''))\n"," cleaned_titles.append(cleaned)\n","\n"," combined_content = '{' + '|'.join(cleaned_titles) + '}'\n","\n","local_output_file_name = \"combined_for_llm_tweaking.txt\"\n","local_output_file_path = f\"/content/{local_output_file_name}\"\n","with open(local_output_file_path, \"w\", encoding=\"utf-8\") as f:\n"," f.write(combined_content)\n","\n","print(f\"\\nβœ… Done! Created deterministic combined prompt string with {len(cleaned_titles)} entries.\")\n","print(f\"πŸ“ Local file saved as: {local_output_file_path}\")\n","print(f\" β†’ Copy the entire content of this file and paste it into an LLM to tweak/refine all prompts at once.\")\n","print(f\" β†’ After tweaking, save the LLM output (keep the same {len(cleaned_titles)} | separated format) and use it in Cell 6.d.\")\n","\n","drive_final_output_path = drive_target_output_dir / local_output_file_name\n","shutil.copy(local_output_file_path, str(drive_final_output_path))\n","print(f\"☁️ Also saved to Google Drive: {drive_final_output_path}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"83pxrBGwm4gY","cellView":"form"},"outputs":[],"source":["#@markdown # Cell 6.d: Apply Refined Prompts from LLM (updates image+text pairs)\n","\n","from pathlib import Path\n","\n","# === Settings ===\n","refined_combined_file = \"/content/drive/MyDrive/DinoTaggerPipeline/refined_combined_titles.txt\" #@param {type:\"string\"}\n","# ↑ Paste the full path to the .txt file you saved after tweaking in the LLM.\n","# The file must contain exactly one line in the format: {prompt1|prompt2|prompt3|...}\n","# (or without the outer {} β€” the code handles both).\n","\n","dino_dir = Path(\"/content/dino_combined\")\n","\n","if not dino_dir.exists() or not list(dino_dir.glob(\"*.jpg\")):\n"," print(\"❌ No /content/dino_combined folder found. Please run Cells 2–6.a (and 6.b) first.\")\n","else:\n"," # Get images in the exact same deterministic order as Cell 6.c\n"," image_files = sorted(\n"," list(dino_dir.glob(\"*.jpg\")),\n"," key=lambda p: int(p.stem) if p.stem.isdigit() else p.stem\n"," )\n","\n"," # Read the refined combined file\n"," with open(refined_combined_file, \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n","\n"," # Remove outer {} if present\n"," if content.startswith('{') and content.endswith('}'):\n"," content = content[1:-1]\n","\n"," refined_prompts = [p.strip() for p in content.split('|') if p.strip()]\n","\n"," if len(refined_prompts) != len(image_files):\n"," print(f\"❌ Mismatch! Found {len(refined_prompts)} refined prompts but {len(image_files)} images.\")\n"," print(\" β†’ Make sure the LLM output has exactly the same number of entries separated by |\")\n"," else:\n"," print(f\"βœ… Found {len(refined_prompts)} refined prompts. Updating .txt files in /content/dino_combined...\")\n","\n"," for i, img_path in enumerate(image_files):\n"," base_name = img_path.stem\n"," txt_path = dino_dir / f\"{base_name}.txt\"\n","\n"," new_prompt = refined_prompts[i]\n","\n"," with open(txt_path, \"w\", encoding=\"utf-8\") as f:\n"," f.write(new_prompt)\n","\n"," print(f\" β†’ Updated {base_name}.txt with refined prompt\")\n","\n"," print(\"\\nπŸŽ‰ All image+text pairs have been updated with your refined prompts!\")\n"," print(\" β†’ You can now re-run Cell 6.b to create a fresh ZIP with the new refined .txt files.\")\n"," print(\" β†’ (Optional) Re-run Cell 6.c to generate a new combined file from the refined versions.)\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"FQF71-mvmlc1"},"outputs":[],"source":["# ================================================\n","# Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/dino_tagger.ipynb","timestamp":1776414686834},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/dino_tagger.ipynb","timestamp":1776305835855},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/dino_tagger.ipynb","timestamp":1776163207317},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/gemma4_batch_captioner.ipynb","timestamp":1775991040063},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/gemma4_batch_captioner.ipynb","timestamp":1775586753307},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/gemma4_batch_captioner.ipynb","timestamp":1775585424465}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}
 
1
+ {"cells":[{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"CrLhR87ElDnE"},"outputs":[],"source":["#@markdown # Cell 0: Mount Google Drive & Prepare HF_TOKEN for faster HF downloads (UPDATED)\n","\n","from google.colab import drive\n","from google.colab import userdata\n","import os\n","\n","# Mount Google Drive (still used for output ZIPs/results only)\n","print(\"Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Create a working directory on Drive (for final outputs/ZIPs only)\n","WORKING_DIR = \"/content/drive/MyDrive/DinoTaggerPipeline\"\n","os.makedirs(WORKING_DIR, exist_ok=True)\n","print(f\"βœ… Working directory set to: {WORKING_DIR} (outputs only)\")\n","\n","# === NEW: Local model directory on /content/ (CPU storage only) ===\n","# Checkpoints will ONLY go here. Nothing is saved to Drive.\n","MODEL_DIR = \"/content/models\"\n","os.makedirs(MODEL_DIR, exist_ok=True)\n","print(f\"βœ… Model directory set to: {MODEL_DIR} (local /content/ storage only)\")\n","print(\" β†’ Checkpoint models (safetensors, vocab, etc.) will be saved here only.\")\n","\n","# HF_TOKEN from Colab Secrets\n","hf_token = userdata.get('HF_TOKEN')\n","if hf_token:\n"," os.environ[\"HF_TOKEN\"] = hf_token\n"," print(\"βœ… HF_TOKEN loaded from Colab Secrets and set as environment variable.\")\n"," print(\" β†’ This enables authenticated + faster/resumable Hugging Face downloads.\")\n","else:\n"," print(\"⚠️ HF_TOKEN not found in Colab Secrets.\")\n"," print(\" β†’ Add it via the key icon (left sidebar) β†’ Secrets β†’ Name: HF_TOKEN\")\n"," print(\" β†’ Some downloads may be slower or fail if the repo requires login.\")\n","\n","print(\"\\nβœ… Cell 0 complete. Models will now save to local /content/models only.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"VlWR_znDlD1W"},"outputs":[],"source":["#@markdown # Cell 1: Download All Models to Disk (CPU-only, No VRAM Load) (UPDATED)\n","\n","import torch\n","from huggingface_hub import hf_hub_download, snapshot_download\n","from pathlib import Path\n","import os\n","\n","print(\"Downloading models to local /content/models (CPU-only, resumable)...\")\n","\n","# 1. DINO Tagger (lodestones/tagger-experiment) β†’ saved to MODEL_DIR only\n","print(\"\\n1. Downloading DINO Tagger files...\")\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"tagger_proto.safetensors\", local_dir=MODEL_DIR, local_dir_use_symlinks=False)\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"tagger_vocab_with_categories.json\", local_dir=MODEL_DIR, local_dir_use_symlinks=False)\n","hf_hub_download(repo_id=\"lodestones/tagger-experiment\", filename=\"inference_tagger_standalone.py\", local_dir=MODEL_DIR, local_dir_use_symlinks=False)\n","\n","# 2. WD-VIT Tagger (selected_tags.csv) β†’ saved to MODEL_DIR only\n","print(\"\\n2. Downloading WD-VIT Tagger assets...\")\n","tags_path = Path(MODEL_DIR) / \"selected_tags.csv\"\n","if not tags_path.exists():\n"," import requests\n"," from io import StringIO\n"," import pandas as pd\n"," response = requests.get(\"https://huggingface.co/SmilingWolf/wd-vit-tagger-v3/resolve/main/selected_tags.csv\")\n"," tags_df = pd.read_csv(StringIO(response.text))\n"," tags_df.to_csv(tags_path, index=False)\n"," print(f\" β†’ Saved {len(tags_df)} tags to selected_tags.csv in {MODEL_DIR}\")\n","\n","# 3. Gemma-4-E2B-Heretic (processor + model files will be cached by transformers on local storage)\n","#print(\"\\n3. Gemma-4-E2B-Heretic will be cached on first load in Cell 7 (large model - stored locally).\")\n","\n","print(f\"\\nβœ… All lightweight models downloaded/cached to: {MODEL_DIR} (local /content/ only)\")\n","print(\" β†’ No checkpoint models were saved to Google Drive.\")\n","#print(\" Heavy models (Gemma) will download during loading in Cell 7.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"s1hmHVmGlEAn"},"outputs":[],"source":["#@markdown # Cell 2: Extract Images from ZIP (MODIFIED - now supports existing image+text pairs)\n","\n","from pathlib import Path\n","import zipfile\n","\n","# === Settings ===\n","zip_file_path = \"/content/drive/MyDrive/my_set.zip\" #@param {type:\"string\"}\n","# Full path on Drive, e.g. /content/drive/MyDrive/my_images.zip\n","\n","extract_dir = Path(\"/content/extracted_images\")\n","extract_dir.mkdir(exist_ok=True)\n","\n","if not zip_file_path or not Path(zip_file_path).exists():\n"," print(f\"❌ ZIP file not found at: {zip_file_path}\")\n"," print(\" β†’ Please provide a valid path to your image ZIP.\")\n","else:\n"," print(f\"Extracting images from: {zip_file_path}\")\n"," with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:\n"," # Filter out macOS specific files (e.g., __MACOSX directory, .DS_Store)\n"," members = [m for m in zip_ref.namelist() if not m.startswith('__MACOSX/') and not m.endswith('/.DS_Store')]\n"," for member in members:\n"," zip_ref.extract(member, extract_dir)\n"," print(\" β†’ Ignored __MACOSX/ and .DS_Store files during extraction.\")\n","\n"," image_files = []\n"," for ext in ['.png', '.jpg', '.jpeg', '.webp', '.avif','.bmp']:\n"," image_files.extend(list(extract_dir.rglob(f\"*{ext}\")))\n"," image_files.extend(list(extract_dir.rglob(f\"*{ext.upper()}\")))\n","\n"," image_files = sorted(set(str(p) for p in image_files))\n","\n"," # === NEW: Support for ZIPs containing existing image + .txt caption pairs ===\n"," # If a .txt file with the same base name as an image exists, it will be read\n"," # and the generated tags (WD + DINO) will be inserted in the middle of its sentences.\n"," existing_captions = []\n"," paired_count = 0\n"," for img_str in image_files:\n"," img_path = Path(img_str)\n"," txt_path = img_path.with_suffix('.txt')\n"," if txt_path.exists():\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," caption = f.read().strip()\n"," existing_captions.append(caption)\n"," paired_count += 1\n"," else:\n"," existing_captions.append(None)\n","\n"," print(f\"βœ… Extracted and found {len(image_files)} image(s). {paired_count} have existing text pairs.\")\n"," if paired_count > 0:\n"," print(\" β†’ Existing captions detected! Tags will be inserted after half the sentences (see Cell 6.a).\")\n"," print(\" Images are ready in /content/extracted_images/\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"bS_tUFbMlEJl"},"outputs":[],"source":["#@markdown # Cell 3: Clear VRAM + Load WD-VIT Tagger v3 (UPDATED)\n","\n","import torch\n","import gc\n","import timm\n","import torchvision.transforms as transforms\n","import pandas as pd\n","from pathlib import Path\n","\n","# Clear VRAM\n","print(\"🧹 Clearing VRAM...\")\n","if 'model' in globals(): del model\n","if 'tagger' in globals(): del tagger\n","torch.cuda.empty_cache()\n","gc.collect()\n","print(\" β†’ VRAM cleared.\")\n","\n","# Load WD-VIT Tagger\n","print(\"\\nπŸ“₯ Loading WD-VIT Tagger v3...\")\n","# UPDATED: Use MODEL_DIR (local only)\n","tags_path = Path(MODEL_DIR) / \"selected_tags.csv\"\n","tags_df = pd.read_csv(tags_path)\n","tags_list = tags_df['name'].tolist()\n","print(f\" β†’ Loaded {len(tags_list)} tags.\")\n","\n","model = timm.create_model(\"hf_hub:SmilingWolf/wd-vit-tagger-v3\", pretrained=True)\n","model.eval()\n","device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n","model = model.to(device)\n","\n","preprocess = transforms.Compose([\n"," transforms.Resize((448, 448)),\n"," transforms.ToTensor(),\n"," transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),\n","])\n","\n","print(f\"βœ… WD-VIT Tagger loaded on {device}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"SthfRxwClETc"},"outputs":[],"source":["#@markdown # Cell 4: Tag Images with WD-VIT Tagger (FIXED) (unchanged)\n","\n","from PIL import Image\n","from pathlib import Path\n","\n","threshold = 0.75 #@param {type:\"slider\", min:0.1, max:0.95, step:0.01}\n","add_commas = True #@param {type:\"boolean\"}\n","\n","input_dir = Path(\"/content/extracted_images\")\n","output_dir = Path(\"/content/wd_tags\")\n","output_dir.mkdir(exist_ok=True)\n","\n","# FIXED: Clean glob pattern (was broken with Chinese text garbage)\n","image_files = sorted(list(input_dir.glob(\"*.*\")))\n","image_files = [f for f in image_files if f.suffix.lower() in {'.png','.jpg','.jpeg','.webp','.bmp','.avif'}]\n","\n","print(f\"Starting WD tagging for {len(image_files)} images...\")\n","\n","for i, img_path in enumerate(image_files, start=1):\n"," img_name = img_path.name\n"," print(f\"[{i}/{len(image_files)}] Processing: {img_name}\")\n","\n"," # Load image once\n"," image = Image.open(img_path).convert(\"RGB\")\n","\n"," # Prepare for model\n"," input_tensor = preprocess(image).unsqueeze(0).to(device)\n","\n"," with torch.no_grad():\n"," logits = model(input_tensor)\n"," probs = torch.sigmoid(logits).cpu().numpy()[0]\n","\n"," wd_tags = [tags_list[j] for j, prob in enumerate(probs) if prob > threshold]\n","\n"," tag_text = \" , \".join(wd_tags) if add_commas else \" \".join(wd_tags)\n","\n"," base_name = f\"{i:04d}\" # numbered for consistency\n","\n"," # FIXED: Reuse already-loaded image instead of opening twice\n"," image.save(output_dir / f\"{base_name}.jpg\", \"JPEG\", quality=95)\n","\n"," with open(output_dir / f\"{base_name}.txt\", \"w\", encoding=\"utf-8\") as f:\n"," f.write(tag_text)\n","\n"," print(f\" β†’ Saved {base_name}.jpg + {base_name}.txt ({len(wd_tags)} WD tags)\")\n","\n","print(\"\\nβœ… WD tagging complete. Tags saved in /content/wd_tags/\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"EKq1NmNYlEc3"},"outputs":[],"source":["#@markdown # Cell 5: Clear VRAM + Load DINO Tagger (UPDATED)\n","\n","import torch\n","import gc\n","import sys\n","from pathlib import Path\n","\n","# UPDATED: Add MODEL_DIR to sys.path so Python can find the module (inference_tagger_standalone.py is now in local /content/models)\n","if str(MODEL_DIR) not in sys.path:\n"," sys.path.insert(0, str(MODEL_DIR))\n","\n","from inference_tagger_standalone import Tagger # from Cell 1 download (now in MODEL_DIR)\n","\n","# Clear previous model\n","print(\"🧹 Clearing VRAM...\")\n","if 'model' in globals(): del model\n","torch.cuda.empty_cache()\n","gc.collect()\n","\n","# Load DINO Tagger\n","print(\"\\nπŸ“₯ Loading DINO Tagger...\")\n","# UPDATED: Load checkpoint & vocab from local MODEL_DIR only\n","tagger = Tagger(\n"," checkpoint_path=str(Path(MODEL_DIR) / \"tagger_proto.safetensors\"),\n"," vocab_path=str(Path(MODEL_DIR) / \"tagger_vocab_with_categories.json\"),\n"," device=\"cuda\" if torch.cuda.is_available() else \"cpu\",\n"," dtype=torch.bfloat16 if torch.cuda.is_available() else torch.float32,\n"," max_size=1024\n",")\n","\n","print(f\"βœ… DINO Tagger loaded on {tagger.device} (checkpoint from local /content/models)\")"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"EeBtbvB4lYd7","outputId":"b9b63f6f-b0f5-47dc-87a7-5c630210dcb7"},"outputs":[{"output_type":"stream","name":"stdout","text":["Starting DINO tagging for 200 images...\n","[1/200] Processing: 0001.jpg\n"," β†’ Combined and randomized 10 sentences and 51 tags.\n"," β†’ Saved 0001.jpg + 0001.txt (61 total elements in prompt)\n","[2/200] Processing: 0002.jpg\n"," β†’ Combined and randomized 11 sentences and 31 tags.\n"," β†’ Saved 0002.jpg + 0002.txt (42 total elements in prompt)\n","[3/200] Processing: 0003.jpg\n"," β†’ Combined and randomized 8 sentences and 40 tags.\n"," β†’ Saved 0003.jpg + 0003.txt (48 total elements in prompt)\n","[4/200] Processing: 0004.jpg\n"," β†’ Combined and randomized 14 sentences and 62 tags.\n"," β†’ Saved 0004.jpg + 0004.txt (76 total elements in prompt)\n","[5/200] Processing: 0005.jpg\n"," β†’ Combined and randomized 9 sentences and 91 tags.\n"," β†’ Saved 0005.jpg + 0005.txt (100 total elements in prompt)\n","[6/200] Processing: 0006.jpg\n"," β†’ Combined and randomized 12 sentences and 67 tags.\n"," β†’ Saved 0006.jpg + 0006.txt (79 total elements in prompt)\n","[7/200] Processing: 0007.jpg\n"," β†’ Combined and randomized 11 sentences and 35 tags.\n"," β†’ Saved 0007.jpg + 0007.txt (46 total elements in prompt)\n","[8/200] Processing: 0008.jpg\n"," β†’ Combined and randomized 10 sentences and 53 tags.\n"," β†’ Saved 0008.jpg + 0008.txt (63 total elements in prompt)\n","[9/200] Processing: 0009.jpg\n"," β†’ Combined and randomized 11 sentences and 39 tags.\n"," β†’ Saved 0009.jpg + 0009.txt (50 total elements in prompt)\n","[10/200] Processing: 0010.jpg\n"," β†’ Combined and randomized 11 sentences and 61 tags.\n"," β†’ Saved 0010.jpg + 0010.txt (72 total elements in prompt)\n","[11/200] Processing: 0011.jpg\n"," β†’ Combined and randomized 11 sentences and 60 tags.\n"," β†’ Saved 0011.jpg + 0011.txt (71 total elements in prompt)\n","[12/200] Processing: 0012.jpg\n"]}],"source":["import re\n","import random # Added for randomization\n","\n","# === Rest of original Cell 6.a (with new randomization logic) ===\n","treshold_percent = 95 #@param {type:\"slider\", min:1, max:95, step:1}\n","max_tags = 100 #@param {type:\"slider\", min:5, max:150, step:5}\n","use_max_tags = False #@param {type:\"boolean\"}\n","\n","wd_dir = Path(\"/content/wd_tags\")\n","dino_dir = Path(\"/content/dino_combined\")\n","dino_dir.mkdir(exist_ok=True)\n","\n","image_files = sorted(list(wd_dir.glob(\"*.jpg\")))\n","\n","treshold = treshold_percent / 100.0\n","\n","print(f\"Starting DINO tagging for {len(image_files)} images...\")\n","\n","# Fallback if existing_captions was not created in Cell 2\n","if 'existing_captions' not in globals() or len(existing_captions) != len(image_files):\n"," existing_captions = [None] * len(image_files)\n"," print(\"⚠️ No existing_captions list found from Cell 2 (normal if input ZIP had no .txt files).\")\n","\n","for i, img_path in enumerate(image_files, start=1):\n"," base_name = img_path.stem\n"," txt_path = wd_dir / f\"{base_name}.txt\"\n","\n"," print(f\"[{i}/{len(image_files)}] Processing: {img_path.name}\")\n","\n"," if use_max_tags:\n"," tags_list = tagger.predict(str(img_path), topk=max_tags, threshold=None)\n"," else:\n"," tags_list = tagger.predict(str(img_path), topk=None, threshold=treshold)\n"," if len(tags_list) > max_tags:\n"," tags_list = tags_list[:max_tags]\n","\n"," dino_tags = [tag for tag, prob in tags_list]\n","\n"," # Read WD tags\n"," if txt_path.exists():\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," existing_text = f.read().strip()\n"," existing_tags = [t.strip() for t in existing_text.replace(\",\", \" \").split() if t.strip()]\n"," else:\n"," existing_tags = []\n","\n"," # Combine WD first, then DINO\n"," combined = existing_tags + dino_tags\n"," seen = set()\n"," final_tags = [tag for tag in combined if not (tag in seen or seen.add(tag))]\n"," # final_tags now contains de-duplicated WD and DINO tags.\n","\n"," all_caption_elements = []\n"," num_sentences = 0\n"," num_tags = len(final_tags)\n","\n"," existing_caption_for_this_image = existing_captions[i-1] if i-1 < len(existing_captions) else None\n","\n"," if existing_caption_for_this_image:\n"," # Apply user-requested word replacements\n"," existing_caption_for_this_image = re.sub(r'\\bchildren\\b', 'girls', existing_caption_for_this_image, flags=re.IGNORECASE)\n"," existing_caption_for_this_image = re.sub(r'\\bchild\\b', 'girl', existing_caption_for_this_image, flags=re.IGNORECASE)\n","\n"," # Split into sentences\n"," sentence_endings = re.compile(r'(?<=[.!?])\\s+')\n"," sentences = [s.strip() for s in sentence_endings.split(existing_caption_for_this_image) if s.strip()]\n"," all_caption_elements.extend(sentences)\n"," num_sentences = len(sentences)\n","\n"," all_caption_elements.extend(final_tags) # Add the collected tags\n","\n"," # Shuffle all elements (sentences and tags) together\n"," random.shuffle(all_caption_elements)\n","\n"," # Join them with appropriate separators\n"," # Use ' , ' if add_commas is true, otherwise ' '\n"," separator = \" , \" if add_commas else \" \"\n"," final_text = separator.join(all_caption_elements)\n","\n"," # User requested comma spacing adjustment for the final text (this still applies)\n"," final_text = re.sub(r'\\s*,\\s*', ' , ', final_text)\n","\n"," # Save combined / augmented result\n"," new_img = dino_dir / f\"{base_name}.jpg\"\n"," new_txt = dino_dir / f\"{base_name}.txt\"\n","\n"," Image.open(img_path).convert(\"RGB\").save(new_img, \"JPEG\", quality=95)\n"," with open(new_txt, \"w\", encoding=\"utf-8\") as f:\n"," f.write(final_text)\n","\n"," if num_sentences > 0:\n"," print(f\" β†’ Combined and randomized {num_sentences} sentences and {num_tags} tags.\")\n"," else:\n"," print(f\" β†’ Randomized {num_tags} tags (no existing sentences).\")\n"," print(f\" β†’ Saved {base_name}.jpg + {base_name}.txt ({len(all_caption_elements)} total elements in prompt)\")\n","\n","print(\"\\nβœ… DINO tags processed. Combined/augmented tags in /content/dino_combined/\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"Ytpw174ClYnj"},"outputs":[],"source":["#@markdown # Cell 6.b: Save DINO-Tagged Results to Google Drive (Safe to Disconnect) (unchanged)\n","\n","from pathlib import Path\n","import zipfile\n","import shutil\n","\n","print(\"πŸ’Ύ Saving all DINO-tagged results (images + combined tags) to Google Drive as a ZIP...\")\n","\n","dino_dir = Path(\"/content/dino_combined\")\n","if not dino_dir.exists() or not list(dino_dir.glob(\"*.jpg\")):\n"," print(\"❌ No /content/dino_combined folder found. Please run Cell 6 first.\")\n","else:\n"," # Create a ZIP for easy download / archive directly from dino_dir\n"," zip_path = Path(WORKING_DIR) / \"dino_tagged_results.zip\" # Changed zip name for clarity\n"," num_files_to_zip = len(list(dino_dir.iterdir()))\n"," print(f\" Zipping {num_files_to_zip} image+tag pairs...\")\n","\n"," with zipfile.ZipFile(zip_path, \"w\") as zipf:\n"," for file in dino_dir.iterdir():\n"," zipf.write(file, file.name) # Write directly from dino_dir, preserving original file names in zip\n","\n"," print(f\"\\nβœ… ZIP creation complete!\")\n"," print(f\" β†’ ZIP archive saved to: {zip_path}\")\n"," print(\"\\nYou can now safely disconnect / restart the runtime.\")\n"," print(\" When you come back, run Cell 6.c to restore everything.\")"]},{"cell_type":"code","execution_count":null,"metadata":{"cellView":"form","id":"c_7hA_Rxmyep"},"outputs":[],"source":["#@markdown # Cell 6.c: Generate Combined Prompts for LLM Tweaking (deterministic image order)\n","\n","from pathlib import Path\n","import zipfile\n","import os\n","import shutil\n","\n","zip_file_on_drive_path = Path(WORKING_DIR) / \"dino_tagged_results.zip\"\n","drive_target_output_dir = Path(zip_file_on_drive_path).parent\n","drive_target_output_dir.mkdir(parents=True, exist_ok=True)\n","\n","print(f\"πŸ”„ Processing ZIP from Drive: {zip_file_on_drive_path}\")\n","\n","extract_dir = \"/content/extracted_txt_files_drive_processing\"\n","if os.path.exists(extract_dir):\n"," shutil.rmtree(extract_dir)\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","try:\n"," with zipfile.ZipFile(zip_file_on_drive_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n"," print(f\"βœ… ZIP extracted from Drive to {extract_dir}\")\n","except FileNotFoundError:\n"," print(f\"❌ ZIP file not found at {zip_file_on_drive_path}.\")\n"," combined_content = '{}'\n","\n","txt_paths = sorted(\n"," list(Path(extract_dir).glob(\"*.txt\")),\n"," key=lambda p: f\"{int(p.stem):05d}\" if p.stem.isdigit() else p.stem.lower()\n",")\n","\n","cleaned_titles = []\n","if not txt_paths:\n"," print(\"⚠️ No .txt files found!\")\n"," combined_content = '{}'\n","else:\n"," print(f\"πŸ“„ Found {len(txt_paths)} prompts (sorted deterministically by image number).\")\n"," for txt_path in txt_paths:\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," title = f.read().strip()\n","\n"," cleaned = (title\n"," .replace('^', '')\n"," .replace('{', '')\n"," .replace('}', '')\n"," .replace('|', '')\n"," .replace('\"','')\n"," .replace('>', '')\n"," .replace('<',''))\n"," cleaned_titles.append(cleaned)\n","\n"," combined_content = '{' + '|'.join(cleaned_titles) + '}'\n","\n","local_output_file_name = \"combined_for_llm_tweaking.txt\"\n","local_output_file_path = f\"/content/{local_output_file_name}\"\n","with open(local_output_file_path, \"w\", encoding=\"utf-8\") as f:\n"," f.write(combined_content)\n","\n","print(f\"\\nβœ… Done! Created deterministic combined prompt string with {len(cleaned_titles)} entries.\")\n","print(f\"πŸ“ Local file saved as: {local_output_file_path}\")\n","print(f\" β†’ Copy the entire content of this file and paste it into an LLM to tweak/refine all prompts at once.\")\n","print(f\" β†’ After tweaking, save the LLM output (keep the same {len(cleaned_titles)} | separated format) and use it in Cell 6.d.\")\n","\n","drive_final_output_path = drive_target_output_dir / local_output_file_name\n","shutil.copy(local_output_file_path, str(drive_final_output_path))\n","print(f\"☁️ Also saved to Google Drive: {drive_final_output_path}\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"83pxrBGwm4gY","cellView":"form"},"outputs":[],"source":["#@markdown # Cell 6.d: Apply Refined Prompts from LLM (updates image+text pairs)\n","\n","from pathlib import Path\n","\n","# === Settings ===\n","refined_combined_file = \"/content/drive/MyDrive/DinoTaggerPipeline/refined_combined_titles.txt\" #@param {type:\"string\"}\n","# ↑ Paste the full path to the .txt file you saved after tweaking in the LLM.\n","# The file must contain exactly one line in the format: {prompt1|prompt2|prompt3|...}\n","# (or without the outer {} β€” the code handles both).\n","\n","dino_dir = Path(\"/content/dino_combined\")\n","\n","if not dino_dir.exists() or not list(dino_dir.glob(\"*.jpg\")):\n"," print(\"❌ No /content/dino_combined folder found. Please run Cells 2–6.a (and 6.b) first.\")\n","else:\n"," # Get images in the exact same deterministic order as Cell 6.c\n"," image_files = sorted(\n"," list(dino_dir.glob(\"*.jpg\")),\n"," key=lambda p: int(p.stem) if p.stem.isdigit() else p.stem\n"," )\n","\n"," # Read the refined combined file\n"," with open(refined_combined_file, \"r\", encoding=\"utf-8\") as f:\n"," content = f.read().strip()\n","\n"," # Remove outer {} if present\n"," if content.startswith('{') and content.endswith('}'):\n"," content = content[1:-1]\n","\n"," refined_prompts = [p.strip() for p in content.split('|') if p.strip()]\n","\n"," if len(refined_prompts) != len(image_files):\n"," print(f\"❌ Mismatch! Found {len(refined_prompts)} refined prompts but {len(image_files)} images.\")\n"," print(\" β†’ Make sure the LLM output has exactly the same number of entries separated by |\")\n"," else:\n"," print(f\"βœ… Found {len(refined_prompts)} refined prompts. Updating .txt files in /content/dino_combined...\")\n","\n"," for i, img_path in enumerate(image_files):\n"," base_name = img_path.stem\n"," txt_path = dino_dir / f\"{base_name}.txt\"\n","\n"," new_prompt = refined_prompts[i]\n","\n"," with open(txt_path, \"w\", encoding=\"utf-8\") as f:\n"," f.write(new_prompt)\n","\n"," print(f\" β†’ Updated {base_name}.txt with refined prompt\")\n","\n"," print(\"\\nπŸŽ‰ All image+text pairs have been updated with your refined prompts!\")\n"," print(\" β†’ You can now re-run Cell 6.b to create a fresh ZIP with the new refined .txt files.\")\n"," print(\" β†’ (Optional) Re-run Cell 6.c to generate a new combined file from the refined versions.)\")"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"FQF71-mvmlc1"},"outputs":[],"source":["# ================================================\n","# Auto Disconnect Colab Session\n","# ================================================\n","\n","print(\"πŸ”Œ Disconnecting Colab session in 15 seconds...\")\n","import time\n","time.sleep(3)\n","\n","from google.colab import runtime\n","runtime.unassign()\n","\n","print(\"Session disconnected.\")"]}],"metadata":{"accelerator":"GPU","colab":{"gpuType":"T4","provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/dino_tagger.ipynb","timestamp":1776414686834},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/dino_tagger.ipynb","timestamp":1776305835855},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/dino_tagger.ipynb","timestamp":1776163207317},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/gemma4_batch_captioner.ipynb","timestamp":1775991040063},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/gemma4_batch_captioner.ipynb","timestamp":1775586753307},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/gemma4_batch_captioner.ipynb","timestamp":1775585424465}]},"kernelspec":{"display_name":"Python 3","name":"python3"},"language_info":{"name":"python"}},"nbformat":4,"nbformat_minor":0}