diff --git "a/fetch_from_reddit.ipynb" "b/fetch_from_reddit.ipynb"
--- "a/fetch_from_reddit.ipynb"
+++ "b/fetch_from_reddit.ipynb"
@@ -1 +1 @@
-{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"_gXEa0VE88VQ"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["# π Authenticate credentials for Reddit PRAW into Google colab Secrets"],"metadata":{"id":"gSErGKBctoAc"}},{"cell_type":"markdown","source":["\n","\n","**One-time setup** β after this, your notebook will automatically load Reddit credentials without prompts or hard-coded secrets.\n","\n","---\n","\n","### Step 1: Create Your Reddit βScriptβ App\n","1. Go to: **[https://www.reddit.com/prefs/apps](https://www.reddit.com/prefs/apps)** (log in first)\n","2. Scroll to the bottom β click **βcreate another appβ** (or **βcreate appβ**)\n","3. Fill the form:\n"," - **Name**: `Colab-PRAW-Script` (or any name you like)\n"," - **App type**: Select **script** β very important\n"," - **Description**: (optional)\n"," - **Redirect URI**: `http://localhost:8080`\n","4. Click **Create app**\n","\n","You will now see:\n","- **personal use script** β 14-character string β **REDDIT_CLIENT_ID**\n","- **secret** β long string β **REDDIT_CLIENT_SECRET**\n","\n","Copy both values.\n","\n","---\n","\n","### Step 2: Choose Your User-Agent\n","Use this format (replace `yourusername` with your actual Reddit username):"],"metadata":{"id":"rst5O8t0tW7F"}},{"cell_type":"markdown","source":[""],"metadata":{"id":"rQPyne1Hr_CM"}},{"cell_type":"markdown","source":["This will be your **REDDIT_USER_AGENT**.\n","\n","---\n","\n","### Step 3: Add Secrets in Google Colab\n","1. Open your Colab notebook\n","2. In the left sidebar click the **π Secrets** tab\n","3. Click **+ Add new secret** for each of these:\n","\n","| Secret Name | What to paste |\n","|------------------------|----------------------------------------------------|\n","| `REDDIT_CLIENT_ID` | 14-character string from Reddit |\n","| `REDDIT_CLIENT_SECRET` | The secret string from Reddit |\n","| `REDDIT_USER_AGENT` | `Colab-PRAW:v1.0 (by u/yourusername)` |\n","| `REDDIT_USERNAME` | Your Reddit username (no `u/`) |\n","| `REDDIT_PASSWORD` | Your Reddit account password |\n","\n","After each one, click **Add** and **Grant access** when prompted.\n","\n","---"],"metadata":{"id":"d4ikd1h7tbr5"}},{"cell_type":"code","source":["#@markdown **Reddit PRAW Authentication with Colab Secrets**\n","\n","# Install PRAW (run once)\n","!pip install praw -q\n","\n","import praw\n","from google.colab import userdata\n","import getpass\n","\n","# === AUTHENTICATION (uses secrets first, falls back to prompt if missing) ===\n","def get_secret_or_prompt(key, prompt_text):\n"," try:\n"," return userdata.get(key)\n"," except (KeyError, Exception):\n"," return getpass.getpass(prompt_text)\n","\n","reddit = praw.Reddit(\n"," client_id=get_secret_or_prompt(\"REDDIT_CLIENT_ID\", \"Enter your REDDIT_CLIENT_ID: \"),\n"," client_secret=get_secret_or_prompt(\"REDDIT_CLIENT_SECRET\", \"Enter your REDDIT_CLIENT_SECRET: \"),\n"," user_agent=get_secret_or_prompt(\"REDDIT_USER_AGENT\", \"Enter your REDDIT_USER_AGENT: \"),\n"," username=get_secret_or_prompt(\"REDDIT_USERNAME\", \"Enter your REDDIT_USERNAME: \"),\n"," password=get_secret_or_prompt(\"REDDIT_PASSWORD\", \"Enter your REDDIT_PASSWORD: \"),\n",")\n","\n","# Quick verification\n","print(\"β
Successfully logged in as:\", reddit.user.me())\n","print(\"π Read-only mode:\", reddit.read_only)"],"metadata":{"cellView":"form","id":"WLopd-NFtHwY"},"execution_count":null,"outputs":[]},{"cell_type":"markdown","metadata":{"id":"V5i2-xwTGG4K"},"source":["#π Fetch content from Reddit"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"background_save":true},"id":"-ewlMjrTga21","cellView":"form"},"outputs":[],"source":["# === INTEGRATED REDDIT DOWNLOADER + MEDIA EXTRACTOR (Drive + Checkboxes + Sliders Edition) ===\n","# Replace BOTH of your previous cells with this single block and run it.\n","# All results (titles, links, images, frames, zips) are saved directly to your Google Drive.\n","# Images/frames are numbered sequentially (1.jpeg, 2.jpeg, ...) with optional paired .txt files.\n","\n","# @markdown ** Subreddit & Fetch Settings**\n","subreddit_name = \"WatchItForThePlot\" # @param {type:\"string\"}\n","sort_method = \"top\" # @param [\"hot\", \"new\", \"top\"] {type:\"string\"}\n","num_posts_to_pull = 200 # @param {type:\"slider\", min:10, max:500, step:10}\n","offset_index = 0 # @param {type:\"slider\", min:0, max:500, step:10}\n","\n","# @markdown **β
Functionality Checkboxes (select any combination)**\n","thumbnail_low_res = False # @param {type:\"boolean\"}\n","#1) Thumbnail download at low res\n","proper_image_download = False # @param {type:\"boolean\"}\n","#2) Proper image download including sub images in galleries\n","gif_frame_extraction = True # @param {type:\"boolean\"}\n","#3) GIF + RedGifs video β keyframe extraction (original files are NEVER saved)\n","combined_titles_txt = True # @param {type:\"boolean\"}\n","#4) Combined txt file of titles\n","pair_txt_with_media = False # @param {type:\"boolean\"}\n","#5) Adding txt files to saved images in zips as enumerated pairs (1.txt, 2.txt, ...)\n","debug_mode = False # @param {type:\"boolean\"}\n","#6) Enable detailed debug printouts during media processing\n","\n","import os\n","import shutil\n","import glob\n","import requests\n","import subprocess\n","import yt_dlp\n","from google.colab import files, drive\n","\n","# ========================== DRIVE SETUP ==========================\n","print(\" Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=False)\n","\n","drive_base_dir = f\"/content/drive/MyDrive/{subreddit_name}_reddit_downloads\"\n","os.makedirs(drive_base_dir, exist_ok=True)\n","\n","# Local temp media storage (Not on Drive)\n","local_media_dir = f\"/content/temp_output/{subreddit_name}_media\"\n","if os.path.exists(local_media_dir): shutil.rmtree(local_media_dir)\n","os.makedirs(local_media_dir, exist_ok=True)\n","\n","local_output_dir = \"/content/output\"\n","os.makedirs(local_output_dir, exist_ok=True)\n","\n","print(f\" Individual images stay local; only Zips/Txt go to: {drive_base_dir}\")\n","\n","# ========================== FETCH POSTS ==========================\n","print(f\" Fetching up to {num_posts_to_pull} posts from r/{subreddit_name} ({sort_method} sorting, offset {offset_index})...\")\n","\n","sub = reddit.subreddit(subreddit_name)\n","\n","if sort_method == \"hot\":\n"," iterator = sub.hot(limit=offset_index + num_posts_to_pull + 50)\n","elif sort_method == \"new\":\n"," iterator = sub.new(limit=offset_index + num_posts_to_pull + 50)\n","elif sort_method == \"top\":\n"," iterator = sub.top(limit=offset_index + num_posts_to_pull + 50)\n","else:\n"," iterator = sub.hot(limit=offset_index + num_posts_to_pull + 50)\n","\n","all_posts = list(iterator)\n","posts = all_posts[offset_index : offset_index + num_posts_to_pull]\n","\n","print(f\"β
Successfully fetched {len(posts)} posts.\")\n","\n","# === NEW: Filename suffix with actual fetched count + offset ===\n","fetch_suffix = f\"_{len(posts)}posts_offset{offset_index}\"\n","\n","titles = []\n","external_links = []\n","\n","for submission in posts:\n"," cleaned_title = (submission.title\n"," .replace('^', '').replace('{', '').replace('}', '').replace('|', '')\n"," .replace('[','').replace(']','').replace('\"',''))\n"," titles.append(cleaned_title)\n","\n"," url = getattr(submission, 'url', None)\n"," if (url and url.startswith(('http://', 'https://')) and\n"," not any(domain in url for domain in ['reddit.com', 'redd.it'])):\n"," external_links.append(url)\n","\n","# ========================== OPTION 4: Combined Titles TXT ==========================\n","if combined_titles_txt:\n"," combined_content = '{' + '|'.join(titles) + '}'\n"," titles_file_drive = f\"{drive_base_dir}/{subreddit_name}_titles{fetch_suffix}.txt\"\n"," with open(titles_file_drive, \"w\", encoding=\"utf-8\") as f:\n"," f.write(combined_content)\n"," print(f\" Combined titles saved β {titles_file_drive}\")\n","\n","# ========================== External Links ==========================\n","if external_links:\n"," links_file = f\"{drive_base_dir}/{subreddit_name}_links{fetch_suffix}.txt\"\n"," with open(links_file, \"w\", encoding=\"utf-8\") as f:\n"," for link in external_links: f.write(link + \"\\n\")\n"," print(f\" External links saved β {links_file}\")\n"," print(f\" β {len(external_links)} links (mostly RedGifs)\")\n","\n","# ========================== MEDIA PROCESSING ==========================\n","any_media = thumbnail_low_res or proper_image_download or gif_frame_extraction\n","\n","if any_media:\n"," if gif_frame_extraction:\n"," !pip install -q yt-dlp\n","\n"," temp_dir = \"/content/temp_download\"\n"," os.makedirs(temp_dir, exist_ok=True)\n","\n"," global_counter = 1\n","\n"," def save_media_and_txt(source, is_url=False, title=\"\"):\n"," global global_counter\n"," if is_url:\n"," clean_url = source.split('?')[0]\n"," ext = os.path.splitext(clean_url)[1].lower()\n"," if ext not in ['.jpg', '.jpeg', '.png']: ext = '.jpg'\n"," temp_file = f\"{temp_dir}/dl_{global_counter}{ext}\"\n"," try:\n"," r = requests.get(source, stream=True, timeout=60)\n"," r.raise_for_status()\n"," with open(temp_file, 'wb') as f:\n"," for chunk in r.iter_content(8192): f.write(chunk)\n"," except Exception:\n"," return\n"," else:\n"," temp_file = source\n"," ext = os.path.splitext(temp_file)[1].lower() or '.jpeg'\n","\n"," local_path = f\"{local_media_dir}/{global_counter}{ext}\"\n"," shutil.copy2(temp_file, local_path)\n","\n"," if pair_txt_with_media:\n"," with open(f\"{local_media_dir}/{global_counter}.txt\", \"w\", encoding=\"utf-8\") as f:\n"," f.write(title)\n","\n"," global_counter += 1\n"," print(f\" β
Saved locally #{global_counter-1} \", end=\"\\r\")\n"," if is_url and os.path.exists(temp_file): os.remove(temp_file)\n","\n"," for idx, submission in enumerate(posts, 1):\n"," cleaned_title = titles[idx-1]\n","\n"," if thumbnail_low_res:\n"," thumb_url = getattr(submission, 'thumbnail', None)\n"," if thumb_url and thumb_url.startswith('http'):\n"," save_media_and_txt(thumb_url, is_url=True, title=cleaned_title)\n","\n"," if proper_image_download:\n"," url = getattr(submission, 'url', None)\n"," if url and any(url.lower().endswith(e) for e in ['.jpg', '.jpeg', '.png']):\n"," save_media_and_txt(url, is_url=True, title=cleaned_title)\n","\n"," # === GIF + RedGifs keyframe extraction (debug toggleable) ===\n"," if gif_frame_extraction:\n"," url = getattr(submission, 'url', None)\n"," if url and url.startswith(('http://', 'https://')):\n"," if debug_mode:\n"," print(f\"π DEBUG [{idx:03d}]: Checking URL β {url[:90]}...\")\n","\n"," if url.lower().endswith('.gif') or 'redgifs.com' in url.lower():\n"," if debug_mode:\n"," print(f\"β
DEBUG: Eligible for keyframe extraction β {url[:90]}...\")\n"," temp_media = None\n"," try:\n"," # 1. Download\n"," if url.lower().endswith('.gif'):\n"," temp_media = f\"{temp_dir}/dl_gif_{idx}.gif\"\n"," r = requests.get(url, stream=True, timeout=60)\n"," r.raise_for_status()\n"," with open(temp_media, 'wb') as f:\n"," for chunk in r.iter_content(8192):\n"," f.write(chunk)\n"," if debug_mode:\n"," print(f\" π₯ Downloaded GIF ({os.path.getsize(temp_media)/1024/1024:.1f} MB)\")\n"," else:\n"," # RedGifs video\n"," temp_media = f\"{temp_dir}/dl_video_{idx}.mp4\"\n"," ydl_opts = {\n"," 'outtmpl': temp_media,\n"," 'quiet': True,\n"," 'no_warnings': True,\n"," 'format': 'bestvideo+bestaudio/best',\n"," 'merge_output_format': 'mp4',\n"," }\n"," with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n"," ydl.download([url])\n"," if debug_mode:\n"," print(f\" π₯ Downloaded RedGifs video ({os.path.getsize(temp_media)/1024/1024:.1f} MB)\")\n","\n"," # 2. Keyframe extraction\n"," output_pattern = f\"{temp_dir}/kf_{idx}_%04d.jpg\"\n"," cmd = [\n"," 'ffmpeg', '-y', '-i', temp_media,\n"," '-vf', \"select='gt(scene,0.15)',setpts=N/(FRAME_RATE*TB)\",\n"," '-vsync', 'vfr',\n"," '-q:v', '5',\n"," output_pattern\n"," ]\n"," subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n"," if debug_mode:\n"," print(f\" π¬ ffmpeg keyframe extraction finished (scene threshold 0.15)\")\n","\n"," # 3. Save keyframes\n"," extracted_frames = sorted(glob.glob(f\"{temp_dir}/kf_{idx}_*.jpg\"))\n"," if debug_mode:\n"," print(f\" πΈ Extracted {len(extracted_frames)} keyframes β saving to ZIP\")\n","\n"," for frame_path in extracted_frames:\n"," save_media_and_txt(frame_path, is_url=False, title=cleaned_title)\n"," os.remove(frame_path)\n","\n"," # 4. Cleanup original\n"," if temp_media and os.path.exists(temp_media):\n"," os.remove(temp_media)\n"," if debug_mode:\n"," print(f\" ποΈ Cleaned up original media file\")\n","\n"," except Exception as e:\n"," if debug_mode:\n"," print(f\" β οΈ ERROR processing {url[:80]}... β {e}\")\n"," if temp_media and os.path.exists(temp_media):\n"," os.remove(temp_media)\n"," continue\n","\n"," print(f\"\\n\\nβ
MEDIA CACHED! Creating ZIP on Drive...\")\n"," zip_name = f\"{subreddit_name}_media{fetch_suffix}\"\n"," shutil.make_archive(f\"{drive_base_dir}/{zip_name}\", 'zip', local_media_dir)\n"," print(f\" ZIP created on Drive β {drive_base_dir}/{zip_name}.zip\")\n","\n","print(\"\\nβ¨ ALL DONE!\")"]},{"cell_type":"markdown","metadata":{"id":"LaxF_cnIGNMh"},"source":["# Convert a dataset into a combined text\n","\n","{text1|text2|....} useful on perchance.org for random selection of text , in the prompt box https://perchance.org/ai-text-to-image-generator"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"trl0Eg8Xqy6B"},"outputs":[],"source":["from google.colab import files\n","import zipfile\n","import os\n","from pathlib import Path\n","import shutil # Added for rmtree\n","\n","print(\"π€ Please upload your ZIP file (the one containing 1.txt, 2.txt, 3.txt, β¦)\")\n","\n","# Let the user upload the ZIP\n","uploaded = files.upload()\n","\n","# Find the uploaded ZIP file\n","zip_files = [f for f in uploaded.keys() if f.lower().endswith('.zip')]\n","if not zip_files:\n"," raise ValueError(\"β No .zip file was uploaded. Please try again.\")\n","\n","zip_filename = zip_files[0]\n","zip_path = f\"/content/{zip_filename}\"\n","\n","# Extract the ZIP to a temporary folder\n","extract_dir = \"/content/extracted_txt_files\"\n","\n","# --- Added: Clear the directory if it exists ---\n","if os.path.exists(extract_dir):\n"," shutil.rmtree(extract_dir)\n","# ---\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","print(f\"β
ZIP extracted to {extract_dir}\")\n","\n","# Get all .txt files and sort them numerically (1.txt, 2.txt, 10.txt, etc.)\n","txt_paths = sorted(\n"," list(Path(extract_dir).glob(\"*.txt\")),\n"," key=lambda p: f\"{int(p.stem):05d}\" if p.stem.isdigit() else p.stem.lower()\n",")\n","\n","if not txt_paths:\n"," raise ValueError(\"β No .txt files found inside the ZIP!\")\n","\n","print(f\"π Found {len(txt_paths)} title files. Processing...\")\n","\n","# Read, clean, and combine titles\n","cleaned_titles = []\n","for txt_path in txt_paths:\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," title = f.read().strip()\n","\n"," # Remove the forbidden characters: ^ { } | [] and newlines\n"," cleaned = (title\n"," .replace('^', '')\n"," .replace('{', '')\n"," .replace('}', '')\n"," .replace('|', '')\n"," .replace('\"','')\n"," .replace('>', '')\n"," .replace('<','')\n"," .replace('[','') # New: Remove opening bracket\n"," .replace(']','') # New: Remove closing bracket\n"," .replace('\\n', ' ')) # New: Replace newlines with spaces\n","\n"," cleaned_titles.append(cleaned)\n","\n","# Build the exact format you want: {text1|text2|text3|...}\n","combined_content = '{' + '|'.join(cleaned_titles) + '}'\n","\n","# Save to a single .txt file (no newlines)\n","output_file = \"/content/combined_titles.txt\"\n","with open(output_file, \"w\", encoding=\"utf-8\") as f:\n"," f.write(combined_content)\n","\n","# Confirmation\n","print(f\"\\nβ
Done! Combined {len(cleaned_titles)} titles into one line.\")\n","print(f\"π Saved as: {output_file}\")\n","print(f\" (First 100 characters: {combined_content[:100]}...)\")\n","\n","# Auto-download the combined file\n","files.download(output_file)\n","\n","print(\"π₯ Download started. You can now use this file wherever you need the {title1|title2|...} format.\")"]},{"cell_type":"markdown","source":["# Sample a set from drive"],"metadata":{"id":"SbX9rloh3iZb"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"6edbf718"},"outputs":[],"source":["#@markdown Randomly sample a selected zip file (with many images for example) , and download as smaller zip\n","import zipfile\n","import os\n","import random\n","import shutil\n","from glob import glob\n","from google.colab import files\n","\n","# --- Configuration ---\n","input_zip_file = '' #@param {type:'string'}\n","\n","extraction_dir = '/content/extracted_keyframes_temp'\n","output_zip_name = 'random_300_keyframes.zip'\n","num_images_to_pick = 180 #@param {type:'slider',min:0,step:5,max:1000}\n","\n","# --- Step 1: Unzip the input file ---\n","print(f\"π Unzipping {input_zip_file}...\")\n","if os.path.exists(extraction_dir):\n"," shutil.rmtree(extraction_dir)\n","os.makedirs(extraction_dir, exist_ok=True)\n","\n","try:\n"," with zipfile.ZipFile(input_zip_file, 'r') as zip_ref:\n"," zip_ref.extractall(extraction_dir)\n"," print(\"β
Unzip complete.\")\n","except Exception as e:\n"," print(f\"β Error unzipping file: {e}\")\n"," raise\n","\n","# --- Step 2: Find all images in the extracted directory ---\n","all_images = []\n","for ext in ('*.jpg', '*.jpeg', '*.png', '*.gif'): # Add more extensions if needed\n"," all_images.extend(glob(os.path.join(extraction_dir, '**', ext), recursive=True))\n","\n","if not all_images:\n"," print(\"β οΈ No images found in the extracted archive.\")\n"," raise FileNotFoundError(\"No images to process.\")\n","\n","print(f\"Found {len(all_images)} total images.\")\n","\n","# --- Step 3: Randomly select images ---\n","if len(all_images) <= num_images_to_pick:\n"," selected_images = all_images\n"," print(f\"Selecting all {len(all_images)} images (fewer than {num_images_to_pick} available).\")\n","else:\n"," selected_images = random.sample(all_images, num_images_to_pick)\n"," print(f\"β
Randomly selected {len(selected_images)} images.\")\n","\n","# --- Step 4: Create a new zip file with selected images ---\n","output_zip_path = os.path.join('/content', output_zip_name)\n","\n","print(f\"π Creating new zip file: {output_zip_name}...\")\n","with zipfile.ZipFile(output_zip_path, 'w', zipfile.ZIP_DEFLATED) as new_zip:\n"," for img_path in selected_images:\n"," # Add image to zip, preserving its relative path within the extracted folder\n"," arcname = os.path.relpath(img_path, extraction_dir)\n"," new_zip.write(img_path, arcname)\n","\n","print(f\"β
New zip file created: {output_zip_path}\")\n","\n","# --- Step 5: Provide as widget output for download ---\n","print(\"π₯ Initiating download of the new zip file...\")\n","files.download(output_zip_path)\n","\n","# --- Step 6: Cleanup temporary files ---\n","print(\"ποΈ Cleaning up temporary extraction directory...\")\n","shutil.rmtree(extraction_dir)\n","print(\"β
Cleanup complete.\")\n","print(\"You can find the downloaded zip file in your local downloads.\")"]},{"cell_type":"markdown","metadata":{"id":"qisU9VeIyzX2"},"source":["# itch.io (Indie Game Website) coverpage scraper\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"HDCueX6d3K00"},"outputs":[],"source":["#@markdown # π itch.io Image-Text Dataset Creator\n","#@markdown **Fetch enumerated thumbnails + matching TXT files β ZIP β Google Drive**\n","\n","#@markdown ---\n","#@markdown ### π Choose your settings below then **Run this cell**\n","\n","num_games = 1000 #@param {type:\"slider\", min:10, max:5000, step:10, description:\"How many games to fetch\"}\n","\n","sort_by = \"Most Recent\" #@param [\"Popular (default)\", \"New & Popular\", \"Top Sellers\", \"Top Rated\", \"Most Recent\"]\n","\n","subcategory = \"None (All Games)\" #@param [\"None (All Games)\", \"Genre: Action\", \"Genre: Adventure\", \"Genre: Arcade\", \"Genre: Card Game\", \"Genre: Educational\", \"Genre: Fighting\", \"Genre: Platformer\", \"Genre: Puzzle\", \"Genre: RPG\", \"Genre: Shooter\", \"Genre: Simulation\", \"Genre: Strategy\", \"Genre: Visual Novel\", \"Platform: Web\", \"Platform: Windows\", \"Platform: macOS\", \"Platform: Linux\", \"Platform: Android\", \"Tag: 2D\", \"Tag: Pixel Art\", \"Tag: Horror\", \"Tag: Multiplayer\", \"Tag: Roguelike\", \"Tag: Retro\"]\n","\n","#@markdown ---\n","\n","#@markdown **After changing the values above, just click the βΆοΈ Run button on this cell.**\n","\n","# ================================================\n","# β
READY-TO-RUN COLAB CELL (single cell version)\n","# ================================================\n","\n","import requests\n","from bs4 import BeautifulSoup\n","import os\n","import re\n","import json\n","import time\n","import shutil\n","import datetime\n","from urllib.parse import urljoin\n","from IPython.display import display, HTML, Image\n","from google.colab import drive\n","\n","print(\"β
Starting itch.io scraper with your chosen settings...\")\n","\n","# ====================== MAP USER CHOICES TO URL SLUGS ======================\n","sort_map = {\n"," \"Popular (default)\": \"\",\n"," \"New & Popular\": \"new-and-popular\",\n"," \"Top Sellers\": \"top-sellers\",\n"," \"Top Rated\": \"top-rated\",\n"," \"Most Recent\": \"newest\"\n","}\n","\n","filter_map = {\n"," \"None (All Games)\": \"\",\n"," \"Genre: Action\": \"genre-action\",\n"," \"Genre: Adventure\": \"genre-adventure\",\n"," \"Genre: Arcade\": \"genre-arcade\",\n"," \"Genre: Card Game\": \"genre-card-game\",\n"," \"Genre: Educational\": \"genre-educational\",\n"," \"Genre: Fighting\": \"genre-fighting\",\n"," \"Genre: Platformer\": \"genre-platformer\",\n"," \"Genre: Puzzle\": \"genre-puzzle\",\n"," \"Genre: RPG\": \"genre-rpg\",\n"," \"Genre: Shooter\": \"genre-shooter\",\n"," \"Genre: Simulation\": \"genre-simulation\",\n"," \"Genre: Strategy\": \"genre-strategy\",\n"," \"Genre: Visual Novel\": \"genre-visual-novel\",\n"," \"Platform: Web\": \"platform-web\",\n"," \"Platform: Windows\": \"platform-windows\",\n"," \"Platform: macOS\": \"platform-macos\",\n"," \"Platform: Linux\": \"platform-linux\",\n"," \"Platform: Android\": \"platform-android\",\n"," \"Tag: 2D\": \"tag-2d\",\n"," \"Tag: Pixel Art\": \"tag-pixel-art\",\n"," \"Tag: Horror\": \"tag-horror\",\n"," \"Tag: Multiplayer\": \"tag-multiplayer\",\n"," \"Tag: Roguelike\": \"tag-roguelike\",\n"," \"Tag: Retro\": \"tag-retro\"\n","}\n","\n","sort_slug = sort_map[sort_by]\n","filter_slug = filter_map[subcategory]\n","\n","# ====================== CONFIG ======================\n","MAX_PAGES = 20\n","DELAY_BETWEEN_PAGES = 1.5\n","HEADERS = {\n"," \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\n"," \"(KHTML, like Gecko) Chrome/134.0 Safari/537.36\"\n","}\n","\n","# Build the base URL according to user selections\n","base_url = \"https://itch.io/games\"\n","if sort_slug:\n"," base_url += f\"/{sort_slug}\"\n","if filter_slug:\n"," base_url += f\"/{filter_slug}\"\n","\n","print(f\"π Target: {base_url} | Fetching up to {num_games} games\")\n","\n","# Mount Google Drive\n","print(\"π Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Create working folder\n","dataset_dir = \"/content/itch_dataset\"\n","os.makedirs(dataset_dir, exist_ok=True)\n","\n","# ====================== SCRAPE MULTIPLE PAGES ======================\n","pairs = []\n","page = 1\n","\n","while len(pairs) < num_games and page <= MAX_PAGES:\n"," url = f\"{base_url}?page={page}\" if page > 1 else base_url\n"," print(f\"π Scraping page {page} β {url}\")\n","\n"," try:\n"," response = requests.get(url, headers=HEADERS, timeout=20)\n"," if response.status_code != 200:\n"," print(f\"β Page {page} failed (HTTP {response.status_code})\")\n"," break\n","\n"," soup = BeautifulSoup(response.text, \"html.parser\")\n"," game_cells = soup.find_all(\"div\", class_=\"game_cell\")\n","\n"," if not game_cells:\n"," print(\"β
No more games on this page.\")\n"," break\n","\n"," added = 0\n"," for cell in game_cells:\n"," if len(pairs) >= num_games:\n"," break\n","\n"," # Clean title (no price spam)\n"," title_tag = cell.find(\"div\", class_=\"game_title\")\n"," if not title_tag:\n"," continue\n"," title_link = title_tag.find(\"a\", class_=\"title\")\n"," title = title_link.get_text(strip=True) if title_link else title_tag.get_text(strip=True).split(\"$\")[0].strip()\n"," if not title:\n"," continue\n","\n"," # Extract thumbnail (robust for current itch.io layout)\n"," img_url = None\n"," img_tag = cell.find(\"img\")\n"," if img_tag:\n"," for attr in [\"data-lazy-src\", \"data-lazy_src\", \"data-src\", \"src\"]:\n"," img_url = img_tag.get(attr)\n"," if img_url:\n"," break\n"," if not img_url and img_tag.get(\"srcset\"):\n"," img_url = img_tag.get(\"srcset\").split(\",\")[0].strip().split(\" \")[0]\n","\n"," # Fallback: background-image\n"," if not img_url:\n"," for el in cell.find_all(lambda t: t.has_attr(\"style\") and \"background-image\" in t.get(\"style\", \"\").lower()):\n"," style = el.get(\"style\", \"\")\n"," match = re.search(r'background-image\\s*:\\s*url\\([\\'\\\"]?([^\\'\\\"]+)[\\'\\\"]?\\)', style, re.IGNORECASE)\n"," if match:\n"," img_url = match.group(1)\n"," break\n","\n"," if img_url:\n"," if img_url.startswith(\"//\"):\n"," img_url = \"https:\" + img_url\n"," elif not img_url.startswith((\"http://\", \"https://\")):\n"," img_url = urljoin(\"https://itch.io\", img_url)\n","\n"," pairs.append((title, img_url))\n"," added += 1\n","\n"," print(f\" β Added {added} games (total so far: {len(pairs)})\")\n","\n"," except Exception as e:\n"," print(f\"β Error on page {page}: {e}\")\n"," break\n","\n"," page += 1\n"," time.sleep(DELAY_BETWEEN_PAGES)\n","\n","if not pairs:\n"," print(\"β No games found with current filters. Try different settings.\")\n","else:\n"," print(f\"\\nβ
Collected {len(pairs)} games. Downloading images and creating TXT files...\")\n","\n"," # ====================== DOWNLOAD ENUMERATED FILES ======================\n"," downloaded = 0\n"," for idx, (title, img_url) in enumerate(pairs, start=1):\n"," num_str = f\"{idx:04d}\"\n"," img_path = f\"{dataset_dir}/{num_str}.jpg\"\n"," txt_path = f\"{dataset_dir}/{num_str}.txt\"\n","\n"," try:\n"," img_response = requests.get(img_url, headers=HEADERS, timeout=15)\n"," if img_response.status_code == 200:\n"," with open(img_path, \"wb\") as f:\n"," f.write(img_response.content)\n"," with open(txt_path, \"w\", encoding=\"utf-8\") as f:\n"," f.write(title)\n"," downloaded += 1\n"," if downloaded % 10 == 0 or downloaded == len(pairs):\n"," print(f\" β
Saved {downloaded:04d}.jpg + {num_str}.txt\")\n"," else:\n"," print(f\"β οΈ Failed to download image {num_str}\")\n"," except Exception as e:\n"," print(f\"β Error downloading {num_str}: {e}\")\n","\n"," # ====================== CREATE ZIP & SAVE TO DRIVE ======================\n"," timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n"," zip_name = f\"itch_games_{timestamp}\"\n"," zip_path_local = f\"/content/{zip_name}.zip\"\n","\n"," print(f\"\\nποΈ Creating ZIP file with {downloaded} image+txt pairs...\")\n"," shutil.make_archive(f\"/content/{zip_name}\", 'zip', dataset_dir)\n","\n"," # Copy to Google Drive\n"," drive_folder = \"/content/drive/MyDrive/itch_datasets\"\n"," os.makedirs(drive_folder, exist_ok=True)\n"," drive_zip_path = f\"{drive_folder}/{zip_name}.zip\"\n"," shutil.copy(zip_path_local, drive_zip_path)\n","\n"," print(\"\\n\" + \"=\"*80)\n"," print(\"π SUCCESS! Your dataset is ready\")\n"," print(f\"π¦ ZIP file: {zip_name}.zip\")\n"," print(f\"π€ Saved to Google Drive β {drive_zip_path}\")\n"," print(f\" β’ Files inside: 0001.jpg + 0001.txt, 0002.jpg + 0002.txt, ...\")\n"," print(f\" β’ Total pairs: {downloaded}\")\n"," print(\"=\"*80)\n","\n"," # ====================== PREVIEW FIRST 3 PAIRS ======================\n"," print(\"\\nπΈ Preview of first 3 pairs (click images to enlarge):\")\n"," for i in range(min(3, len(pairs))):\n"," num_str = f\"{i+1:04d}\"\n"," img_file = f\"{dataset_dir}/{num_str}.jpg\"\n"," display(HTML(f\"
{num_str}. {pairs[i][0]}
\"))\n"," display(Image(filename=img_file, width=400))\n"," print(\"β\" * 70)\n","\n"," print(f\"\\nβ
All files are also in: {dataset_dir} (you can download the folder manually if needed)\")"]},{"cell_type":"markdown","source":["# VNDB (Visual Novel Database) coverpage fetcher by tag\n"],"metadata":{"id":"0VlyEacYp7Pn"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"70mWljq0EJCT"},"outputs":[],"source":["#@markdown # π VNDB Image-Text Dataset Creator **(OFFSET SUPPORT + METADATA)**\n","#@markdown **β
Added offset slider + metadata in ZIP**\n","#@markdown Now you can fetch any batch (e.g. first 1000 β offset 0, next 1000 β offset 1000, etc.)\n","\n","#@markdown ---\n","#@markdown ### π Choose your settings below then **Run this cell**\n","\n","num_vns = 1000 #@param {type:\"slider\", min:10, max:5000, step:10, description:\"How many visual novels to fetch in this batch\"}\n","\n","offset = 0 #@param {type:\"slider\", min:0, max:20000, step:100}\n","#description:\"Offset: skip this many VNs before starting (0 = first batch, 1000 = second batch, etc.)\"}\n","\n","sort_by = \"Most Recent (released desc)\" #@param [\"Most Recent (released desc)\", \"Highest Rated\", \"Most Voted\"]\n","\n","#@markdown **Tag ID** (from your link: https://vndb.org/g3560)\n","tag_id = \"g3560\" #@param {type:\"string\", description:\"VNDB tag ID (e.g. g3560 = 3D Graphics)\"}\n","\n","#@markdown ---\n","\n","#@markdown **After changing the values above, just click the βΆοΈ Run button on this cell.**\n","\n","# ================================================\n","# β
FULLY DEBUGGED + OFFSET + METADATA READY-TO-RUN COLAB CELL\n","# ================================================\n","\n","import requests\n","import os\n","import json\n","import time\n","import shutil\n","import datetime\n","from IPython.display import display, HTML, Image\n","from google.colab import drive\n","\n","print(\"β
Starting VNDB scraper with OFFSET support...\")\n","\n","# ====================== CONFIG ======================\n","MAX_RESULTS_PER_PAGE = 100\n","DELAY_BETWEEN_PAGES = 0.5\n","HEADERS = {\n"," \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\n"," \"(KHTML, like Gecko) Chrome/134.0 Safari/537.36\",\n"," \"Content-Type\": \"application/json\"\n","}\n","\n","API_URL = \"https://api.vndb.org/kana/vn\"\n","\n","# Sort mapping\n","sort_map = {\n"," \"Most Recent (released desc)\": {\"sort\": \"released\", \"reverse\": True},\n"," \"Highest Rated\": {\"sort\": \"rating\", \"reverse\": True},\n"," \"Most Voted\": {\"sort\": \"votecount\", \"reverse\": True}\n","}\n","\n","selected_sort = sort_map[sort_by]\n","\n","print(f\"π Target: VNDB Tag {tag_id} | Offset: {offset} | Fetching up to {num_vns} VNs (sorted by {sort_by})\")\n","\n","# Mount Google Drive\n","print(\"π Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Create working folder\n","dataset_dir = \"/content/vndb_dataset\"\n","os.makedirs(dataset_dir, exist_ok=True)\n","\n","# ====================== CALCULATE PAGINATION WITH OFFSET ======================\n","start_page = (offset // MAX_RESULTS_PER_PAGE) + 1\n","skip_in_first_page = offset % MAX_RESULTS_PER_PAGE\n","\n","print(f\"π Calculated start_page = {start_page}, skip first {skip_in_first_page} items on that page\")\n","\n","# ====================== FETCH VIA VNDB KANA API ======================\n","pairs = []\n","page = start_page\n","items_collected = 0\n","\n","while len(pairs) < num_vns:\n"," payload = {\n"," \"filters\": [\"tag\", \"=\", tag_id],\n"," \"fields\": \"id, title, image.url\",\n"," \"sort\": selected_sort[\"sort\"],\n"," \"reverse\": selected_sort[\"reverse\"],\n"," \"results\": MAX_RESULTS_PER_PAGE,\n"," \"page\": page\n"," }\n","\n"," # ==================== FULL DEBUG PRINT ====================\n"," print(f\"\\nπ === API REQUEST PAGE {page} (offset={offset}) ===\")\n"," print(\"Payload sent:\")\n"," print(json.dumps(payload, indent=2))\n"," # ========================================================\n","\n"," try:\n"," response = requests.post(API_URL, headers=HEADERS, json=payload, timeout=30)\n","\n"," print(f\" π‘ Status code: {response.status_code}\")\n","\n"," if response.status_code != 200:\n"," print(\" β ERROR RESPONSE BODY:\")\n"," try:\n"," error_json = response.json()\n"," print(json.dumps(error_json, indent=2))\n"," except:\n"," print(response.text[:1000])\n"," break\n","\n"," data = response.json()\n"," results = data.get(\"results\", [])\n","\n"," if not results:\n"," print(\"β
No more results.\")\n"," break\n","\n"," # Handle offset skipping on the very first page we fetch\n"," if page == start_page and skip_in_first_page > 0:\n"," print(f\" βοΈ Skipping first {skip_in_first_page} items due to offset\")\n"," results = results[skip_in_first_page:]\n"," skip_in_first_page = 0\n","\n"," added = 0\n"," for vn in results:\n"," if len(pairs) >= num_vns:\n"," break\n","\n"," title = vn.get(\"title\", \"\").strip()\n"," if not title:\n"," continue\n","\n"," img_url = vn.get(\"image\", {}).get(\"url\") if isinstance(vn.get(\"image\"), dict) else None\n","\n"," if img_url:\n"," pairs.append((title, img_url))\n"," added += 1\n"," items_collected += 1\n","\n"," print(f\" β Added {added} VNs (total so far: {len(pairs)})\")\n","\n"," if not data.get(\"more\", False):\n"," print(\"β
Reached the end of results.\")\n"," break\n","\n"," except Exception as e:\n"," print(f\"β Exception on API page {page}: {e}\")\n"," break\n","\n"," page += 1\n"," time.sleep(DELAY_BETWEEN_PAGES)\n","\n","if not pairs:\n"," print(\"\\nβ No visual novels found in this offset range. Check debug output above.\")\n","else:\n"," print(f\"\\nβ
Collected {len(pairs)} visual novels (offset {offset}). Downloading images and creating TXT files...\")\n","\n"," # ====================== DOWNLOAD ENUMERATED FILES ======================\n"," downloaded = 0\n"," for idx, (title, img_url) in enumerate(pairs, start=1):\n"," num_str = f\"{idx:04d}\"\n"," img_path = f\"{dataset_dir}/{num_str}.jpg\"\n"," txt_path = f\"{dataset_dir}/{num_str}.txt\"\n","\n"," try:\n"," img_response = requests.get(img_url, headers=HEADERS, timeout=15)\n"," if img_response.status_code == 200:\n"," with open(img_path, \"wb\") as f:\n"," f.write(img_response.content)\n"," with open(txt_path, \"w\", encoding=\"utf-8\") as f:\n"," f.write(title)\n"," downloaded += 1\n"," if downloaded % 10 == 0 or downloaded == len(pairs):\n"," print(f\" β
Saved {num_str}.jpg + {num_str}.txt\")\n"," else:\n"," print(f\"β οΈ Failed to download image {num_str} (HTTP {img_response.status_code})\")\n"," except Exception as e:\n"," print(f\"β Error downloading {num_str}: {e}\")\n","\n"," # ====================== WRITE METADATA (index/offset/tag) ======================\n"," timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n"," with open(f\"{dataset_dir}/INFO.txt\", \"w\", encoding=\"utf-8\") as f:\n"," f.write(f\"VNDB Tag ID : {tag_id}\\n\")\n"," f.write(f\"Offset : {offset}\\n\")\n"," f.write(f\"Batch Size : {num_vns}\\n\")\n"," f.write(f\"Actual Downloaded: {downloaded}\\n\")\n"," f.write(f\"Sort Order : {sort_by}\\n\")\n"," f.write(f\"Start Page : {start_page}\\n\")\n"," f.write(f\"Collected on : {timestamp}\\n\")\n"," f.write(f\"File index 0001 = VN #{offset + 1} in the full tag list\\n\")\n","\n"," print(\"π Metadata INFO.txt written (contains index/offset/tag info)\")\n","\n"," # ====================== CREATE ZIP & SAVE TO DRIVE ======================\n"," zip_name = f\"vndb_{tag_id}_offset{offset:04d}_{num_vns}vns_{timestamp}\"\n"," zip_path_local = f\"/content/{zip_name}.zip\"\n","\n"," print(f\"\\nποΈ Creating ZIP file with {downloaded} image+txt pairs + INFO.txt...\")\n"," shutil.make_archive(f\"/content/{zip_name}\", 'zip', dataset_dir)\n","\n"," drive_folder = \"/content/drive/MyDrive/vndb_datasets\"\n"," os.makedirs(drive_folder, exist_ok=True)\n"," drive_zip_path = f\"{drive_folder}/{zip_name}.zip\"\n"," shutil.copy(zip_path_local, drive_zip_path)\n","\n"," print(\"\\n\" + \"=\"*80)\n"," print(\"π SUCCESS! Your dataset is ready\")\n"," print(f\"π¦ ZIP file: {zip_name}.zip\")\n"," print(f\"π€ Saved to Google Drive β {drive_zip_path}\")\n"," print(f\" β’ Contains: 0001.jpg + 0001.txt ... + INFO.txt (with offset/tag/index)\")\n"," print(f\" β’ Total pairs: {downloaded}\")\n"," print(\"=\"*80)\n","\n"," # ====================== PREVIEW FIRST 3 PAIRS ======================\n"," print(\"\\nπΈ Preview of first 3 pairs (click images to enlarge):\")\n"," for i in range(min(3, len(pairs))):\n"," num_str = f\"{i+1:04d}\"\n"," img_file = f\"{dataset_dir}/{num_str}.jpg\"\n"," display(HTML(f\"{num_str}. {pairs[i][0]}
\"))\n"," display(Image(filename=img_file, width=400))\n"," print(\"β\" * 70)\n","\n"," print(f\"\\nβ
All files are also in: {dataset_dir} (you can download the folder manually if needed)\")"]},{"cell_type":"markdown","source":["# Pinterest fetch"],"metadata":{"id":"JrtsI98cmAxB"}},{"cell_type":"markdown","metadata":{"id":"HO3NmF03QDpt"},"source":["Pinterest board downloader\n","1. Install the EditThisCookie (or \"Get cookies.txt LOCALLY\") Chrome extension.\n","2. Log into Pinterest in Chrome β open your board.\n","3. Click the extension icon β Export cookies for pinterest.com β save as cookies.txt (plain text / Netscape format).\n","4. In Colab, click the folder icon (left sidebar) β upload cookies.txt to your google drive."]},{"cell_type":"code","source":["# ==================== SINGLE CELL - FULL PINTEREST BOARD DOWNLOADER (Cookies from Google Drive) ====================\n","\n","# Install gallery-dl\n","!pip install -q gallery-dl\n","\n","import os\n","from google.colab import files\n","from google.colab import drive\n","import shutil # Import shutil for copying files\n","\n","# ====================== CONFIGURATION ======================\n","# 1. Paste your board URL here\n","board_url = \"\" #@param {type:\"string\"}\n","\n","# 2. Path to your cookies file on Google Drive (change only if it's in a subfolder)\n","cookies_file = \"/content/drive/MyDrive/pinterest_cookies.txt\" #@param {type:\"string\"}\n","\n","# Optional: custom board name (auto-detected from URL by default)\n","board_name = board_url.rstrip(\"/\").split(\"/\")[-1]\n","#or \"pinterest_board\" #@param {type:\"string\"}\n","\n","print(\"β
Board URL:\", board_url)\n","print(\"π Board name:\", board_name)\n","print(\"π Cookies path:\", cookies_file)\n","\n","# ====================== MOUNT GOOGLE DRIVE ======================\n","print(\"π Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=False)\n","\n","# Check if cookies file exists\n","if os.path.exists(cookies_file):\n"," print(\"β
Cookies file found! Full board (700+ images) will be downloaded.\")\n","else:\n"," print(\"β Cookies file NOT found at the path above. Only ~200 images will download.\")\n","\n","# ====================== CREATE OUTPUT FOLDER ======================\n","output_dir = f\"/content/{board_name}\"\n","os.makedirs(output_dir, exist_ok=True)\n","\n","print(\"π Starting download... (this can take a while for large boards)\")\n","\n","# ====================== BUILD & RUN GALLERY-DL COMMAND ======================\n","cmd = f'gallery-dl --dest \"{output_dir}\"'\n","if os.path.exists(cookies_file):\n"," cmd += f' --cookies \"{cookies_file}\"'\n","cmd += f' \"{board_url}\"'\n","\n","# Execute the download\n","!{cmd}\n","\n","# Count downloaded files\n","total_files = sum([len(files) for r, d, files in os.walk(output_dir)])\n","print(f\"β
Download finished! {total_files} files saved in {output_dir}\")\n","\n","# ====================== ZIP & AUTO-DOWNLOAD ======================\n","zip_path = f\"/content/{board_name}.zip\"\n","print(\"π¦ Zipping all images...\")\n","!zip -r -q \"{zip_path}\" \"{output_dir}\"\n","\n","print(f\"β
Zipped everything β {zip_path}\")\n","\n","# Save to Google Drive\n","drive_zip_destination = f\"/content/drive/MyDrive/{board_name}.zip\"\n","shutil.copy2(zip_path, drive_zip_destination)\n","print(f\"β
Copied '{zip_path}' to Google Drive at '{drive_zip_destination}'\")\n","\n","# Auto-download the zip to your computer\n","#files.download(zip_path)\n","\n","#print(\"π All done! Your full Pinterest board is now downloaded, zipped, and available in your Google Drive and local downloads.\")"],"metadata":{"id":"E7vSr64JkWWa","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"g0524iIvU1I_"},"outputs":[],"source":["# Auto-download the zip file\n","files.download(zip_path)"]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/fetch_from_reddit.ipynb","timestamp":1775893490401},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/fetch_from_reddit.ipynb","timestamp":1775849356252},{"file_id":"1mdMfryfIudXrlN1MWsiNjoXQjondKOah","timestamp":1775845568290},{"file_id":"/v2/external/notebooks/intro.ipynb","timestamp":1775575177068}],"collapsed_sections":["LaxF_cnIGNMh","SbX9rloh3iZb","qisU9VeIyzX2","0VlyEacYp7Pn","JrtsI98cmAxB"]},"kernelspec":{"display_name":"Python 3","name":"python3"}},"nbformat":4,"nbformat_minor":0}
\ No newline at end of file
+{"cells":[{"cell_type":"code","execution_count":null,"metadata":{"id":"_gXEa0VE88VQ"},"outputs":[],"source":["from google.colab import drive\n","drive.mount('/content/drive')"]},{"cell_type":"markdown","source":["# π Authenticate credentials for Reddit PRAW into Google colab Secrets"],"metadata":{"id":"gSErGKBctoAc"}},{"cell_type":"markdown","source":["\n","\n","**One-time setup** β after this, your notebook will automatically load Reddit credentials without prompts or hard-coded secrets.\n","\n","---\n","\n","### Step 1: Create Your Reddit βScriptβ App\n","1. Go to: **[https://www.reddit.com/prefs/apps](https://www.reddit.com/prefs/apps)** (log in first)\n","2. Scroll to the bottom β click **βcreate another appβ** (or **βcreate appβ**)\n","3. Fill the form:\n"," - **Name**: `Colab-PRAW-Script` (or any name you like)\n"," - **App type**: Select **script** β very important\n"," - **Description**: (optional)\n"," - **Redirect URI**: `http://localhost:8080`\n","4. Click **Create app**\n","\n","You will now see:\n","- **personal use script** β 14-character string β **REDDIT_CLIENT_ID**\n","- **secret** β long string β **REDDIT_CLIENT_SECRET**\n","\n","Copy both values.\n","\n","---\n","\n","### Step 2: Choose Your User-Agent\n","Use this format (replace `yourusername` with your actual Reddit username):"],"metadata":{"id":"rst5O8t0tW7F"}},{"cell_type":"markdown","source":[""],"metadata":{"id":"rQPyne1Hr_CM"}},{"cell_type":"markdown","source":["This will be your **REDDIT_USER_AGENT**.\n","\n","---\n","\n","### Step 3: Add Secrets in Google Colab\n","1. Open your Colab notebook\n","2. In the left sidebar click the **π Secrets** tab\n","3. Click **+ Add new secret** for each of these:\n","\n","| Secret Name | What to paste |\n","|------------------------|----------------------------------------------------|\n","| `REDDIT_CLIENT_ID` | 14-character string from Reddit |\n","| `REDDIT_CLIENT_SECRET` | The secret string from Reddit |\n","| `REDDIT_USER_AGENT` | `Colab-PRAW:v1.0 (by u/yourusername)` |\n","| `REDDIT_USERNAME` | Your Reddit username (no `u/`) |\n","| `REDDIT_PASSWORD` | Your Reddit account password |\n","\n","After each one, click **Add** and **Grant access** when prompted.\n","\n","---"],"metadata":{"id":"d4ikd1h7tbr5"}},{"cell_type":"code","source":["#@markdown **Reddit PRAW Authentication with Colab Secrets**\n","\n","# Install PRAW (run once)\n","!pip install praw -q\n","\n","import praw\n","from google.colab import userdata\n","import getpass\n","\n","# === AUTHENTICATION (uses secrets first, falls back to prompt if missing) ===\n","def get_secret_or_prompt(key, prompt_text):\n"," try:\n"," return userdata.get(key)\n"," except (KeyError, Exception):\n"," return getpass.getpass(prompt_text)\n","\n","reddit = praw.Reddit(\n"," client_id=get_secret_or_prompt(\"REDDIT_CLIENT_ID\", \"Enter your REDDIT_CLIENT_ID: \"),\n"," client_secret=get_secret_or_prompt(\"REDDIT_CLIENT_SECRET\", \"Enter your REDDIT_CLIENT_SECRET: \"),\n"," user_agent=get_secret_or_prompt(\"REDDIT_USER_AGENT\", \"Enter your REDDIT_USER_AGENT: \"),\n"," username=get_secret_or_prompt(\"REDDIT_USERNAME\", \"Enter your REDDIT_USERNAME: \"),\n"," password=get_secret_or_prompt(\"REDDIT_PASSWORD\", \"Enter your REDDIT_PASSWORD: \"),\n",")\n","\n","# Quick verification\n","print(\"β
Successfully logged in as:\", reddit.user.me())\n","print(\"π Read-only mode:\", reddit.read_only)"],"metadata":{"cellView":"form","id":"WLopd-NFtHwY","colab":{"base_uri":"https://localhost:8080/"},"executionInfo":{"status":"ok","timestamp":1775894266603,"user_tz":-120,"elapsed":23446,"user":{"displayName":"","userId":""}},"outputId":"7e77565d-da42-40e2-e201-87624213c769"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["\u001b[?25l \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m0.0/189.3 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91mββββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[91mβΈ\u001b[0m\u001b[90mβ\u001b[0m \u001b[32m184.3/189.3 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m189.3/189.3 kB\u001b[0m \u001b[31m4.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h"]},{"output_type":"stream","name":"stderr","text":["WARNING:praw:It appears that you are using PRAW in an asynchronous environment.\n","It is strongly recommended to use Async PRAW: https://asyncpraw.readthedocs.io.\n","See https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments for more info.\n","\n"]},{"output_type":"stream","name":"stdout","text":["β
Successfully logged in as: MoreAd2538\n","π Read-only mode: False\n"]}]},{"cell_type":"markdown","metadata":{"id":"V5i2-xwTGG4K"},"source":["#π Fetch content from Reddit"]},{"cell_type":"code","execution_count":null,"metadata":{"colab":{"base_uri":"https://localhost:8080/"},"id":"-ewlMjrTga21","executionInfo":{"status":"ok","timestamp":1775894481839,"user_tz":-120,"elapsed":22730,"user":{"displayName":"","userId":""}},"outputId":"a581750c-ddca-404d-e606-80f150c11510"},"outputs":[{"output_type":"stream","name":"stdout","text":["\u001b[?25l \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m0.0/182.3 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91mβββββββββββββββββββββββββββββββββββββ\u001b[0m\u001b[90mβΊ\u001b[0m\u001b[90mβ\u001b[0m \u001b[32m174.1/182.3 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90mβββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m182.3/182.3 kB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h\u001b[?25l \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m0.0/3.3 MB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[91mβββββββββββββββββββ\u001b[0m\u001b[91mβΈ\u001b[0m\u001b[90mββββββββββββββββββββ\u001b[0m \u001b[32m1.6/3.3 MB\u001b[0m \u001b[31m48.9 MB/s\u001b[0m eta \u001b[36m0:00:01\u001b[0m\r\u001b[2K \u001b[90mββββββββββββββββββββββββββββββββββββββββ\u001b[0m \u001b[32m3.3/3.3 MB\u001b[0m \u001b[31m49.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n","\u001b[?25h Mounting Google Drive...\n"]},{"output_type":"stream","name":"stderr","text":["WARNING:praw:It appears that you are using PRAW in an asynchronous environment.\n","It is strongly recommended to use Async PRAW: https://asyncpraw.readthedocs.io.\n","See https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments for more info.\n","\n"]},{"output_type":"stream","name":"stdout","text":["Drive already mounted at /content/drive; to attempt to forcibly remount, call drive.mount(\"/content/drive\", force_remount=True).\n"," Individual images stay local; only Zips/Txt go to: /content/drive/MyDrive/gonewild_reddit_downloads\n"," Fetching up to 500 posts from r/gonewild (new sorting, offset 0)...\n"]},{"output_type":"stream","name":"stderr","text":["WARNING:praw:It appears that you are using PRAW in an asynchronous environment.\n","It is strongly recommended to use Async PRAW: https://asyncpraw.readthedocs.io.\n","See https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments for more info.\n","\n","WARNING:praw:It appears that you are using PRAW in an asynchronous environment.\n","It is strongly recommended to use Async PRAW: https://asyncpraw.readthedocs.io.\n","See https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments for more info.\n","\n","WARNING:praw:It appears that you are using PRAW in an asynchronous environment.\n","It is strongly recommended to use Async PRAW: https://asyncpraw.readthedocs.io.\n","See https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments for more info.\n","\n","WARNING:praw:It appears that you are using PRAW in an asynchronous environment.\n","It is strongly recommended to use Async PRAW: https://asyncpraw.readthedocs.io.\n","See https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments for more info.\n","\n","WARNING:praw:It appears that you are using PRAW in an asynchronous environment.\n","It is strongly recommended to use Async PRAW: https://asyncpraw.readthedocs.io.\n","See https://praw.readthedocs.io/en/latest/getting_started/multiple_instances.html#discord-bots-and-asynchronous-environments for more info.\n","\n"]},{"output_type":"stream","name":"stdout","text":["β
Successfully fetched 500 posts.\n"," Combined titles saved β /content/drive/MyDrive/gonewild_reddit_downloads/gonewild_titles_500posts_offset0.txt\n"," External links saved β /content/drive/MyDrive/gonewild_reddit_downloads/gonewild_links_500posts_offset0.txt\n"," β 3 links (mostly RedGifs)\n","\n","β¨ ALL DONE!\n"]}],"source":["# === INTEGRATED REDDIT DOWNLOADER + MEDIA EXTRACTOR (Drive + Checkboxes + Sliders Edition) ===\n","# Replace BOTH of your previous cells with this single block and run it.\n","# All results (titles, links, images, frames, zips) are saved directly to your Google Drive.\n","# Images/frames are numbered sequentially (1.jpeg, 2.jpeg, ...) with optional paired .txt files.\n","\n","# @markdown ** Subreddit & Fetch Settings**\n","subreddit_name = \"gonewild\" # @param {type:\"string\"}\n","sort_method = \"new\" # @param [\"hot\", \"new\", \"top\"] {type:\"string\"}\n","num_posts_to_pull = 200 # @param {type:\"slider\", min:10, max:500, step:10}\n","offset_index = 0 # @param {type:\"slider\", min:0, max:500, step:10}\n","\n","# @markdown **β
Functionality Checkboxes (select any combination)**\n","thumbnail_low_res = True # @param {type:\"boolean\"}\n","#1) Thumbnail download at low res\n","proper_image_download = False # @param {type:\"boolean\"}\n","#2) Proper image download including sub images in galleries\n","gif_frame_extraction = False # @param {type:\"boolean\"}\n","#3) GIF + RedGifs video β keyframe extraction (original files are NEVER saved)\n","combined_titles_txt = True # @param {type:\"boolean\"}\n","#4) Combined txt file of titles\n","pair_txt_with_media = False # @param {type:\"boolean\"}\n","#5) Adding txt files to saved images in zips as enumerated pairs (1.txt, 2.txt, ...)\n","debug_mode = False # @param {type:\"boolean\"}\n","#6) Enable detailed debug printouts during media processing\n","\n","import os\n","import shutil\n","import glob\n","import requests\n","import subprocess\n","from google.colab import files, drive\n","\n","if gif_frame_extraction and not yt_dlp:\n"," # Install yt-dlp if it's not already installed (moved outside conditional block)\n"," !pip install -q yt-dlp\n"," import yt_dlp\n","\n","\n","# ========================== DRIVE SETUP ==========================\n","print(\" Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=False)\n","\n","drive_base_dir = f\"/content/drive/MyDrive/{subreddit_name}_reddit_downloads\"\n","os.makedirs(drive_base_dir, exist_ok=True)\n","\n","# Local temp media storage (Not on Drive)\n","local_media_dir = f\"/content/temp_output/{subreddit_name}_media\"\n","if os.path.exists(local_media_dir): shutil.rmtree(local_media_dir)\n","os.makedirs(local_media_dir, exist_ok=True)\n","\n","local_output_dir = \"/content/output\"\n","os.makedirs(local_output_dir, exist_ok=True)\n","\n","print(f\" Individual images stay local; only Zips/Txt go to: {drive_base_dir}\")\n","\n","# ========================== FETCH POSTS ==========================\n","print(f\" Fetching up to {num_posts_to_pull} posts from r/{subreddit_name} ({sort_method} sorting, offset {offset_index})...\")\n","\n","sub = reddit.subreddit(subreddit_name)\n","\n","if sort_method == \"hot\":\n"," iterator = sub.hot(limit=offset_index + num_posts_to_pull + 50)\n","elif sort_method == \"new\":\n"," iterator = sub.new(limit=offset_index + num_posts_to_pull + 50)\n","elif sort_method == \"top\":\n"," iterator = sub.top(limit=offset_index + num_posts_to_pull + 50)\n","else:\n"," iterator = sub.hot(limit=offset_index + num_posts_to_pull + 50)\n","\n","all_posts = list(iterator)\n","posts = all_posts[offset_index : offset_index + num_posts_to_pull]\n","\n","print(f\"β
Successfully fetched {len(posts)} posts.\")\n","\n","# === NEW: Filename suffix with actual fetched count + offset ===\n","fetch_suffix = f\"_{len(posts)}posts_offset{offset_index}\"\n","\n","titles = []\n","external_links = []\n","\n","for submission in posts:\n"," cleaned_title = (submission.title\n"," .replace('^', '').replace('{', '').replace('}', '').replace('|', '')\n"," .replace('[','').replace(']','').replace('\"',''))\n"," titles.append(cleaned_title)\n","\n"," url = getattr(submission, 'url', None)\n"," if (url and url.startswith(('http://', 'https://')) and\n"," not any(domain in url for domain in ['reddit.com', 'redd.it'])):\n"," external_links.append(url)\n","\n","# ========================== OPTION 4: Combined Titles TXT ==========================\n","if combined_titles_txt:\n"," combined_content = '{' + '|'.join(titles) + '}'\n"," titles_file_drive = f\"{drive_base_dir}/{subreddit_name}_titles{fetch_suffix}.txt\"\n"," with open(titles_file_drive, \"w\", encoding=\"utf-8\") as f:\n"," f.write(combined_content)\n"," print(f\" Combined titles saved β {titles_file_drive}\")\n","\n","# ========================== External Links ==========================\n","if external_links:\n"," links_file = f\"{drive_base_dir}/{subreddit_name}_links{fetch_suffix}.txt\"\n"," with open(links_file, \"w\", encoding=\"utf-8\") as f:\n"," for link in external_links: f.write(link + \"\\n\")\n"," print(f\" External links saved β {links_file}\")\n"," print(f\" β {len(external_links)} links (mostly RedGifs)\")\n","\n","# ========================== MEDIA PROCESSING ==========================\n","any_media = thumbnail_low_res or proper_image_download or gif_frame_extraction\n","\n","if any_media:\n","\n"," temp_dir = \"/content/temp_download\"\n"," os.makedirs(temp_dir, exist_ok=True)\n","\n"," global_counter = 1\n","\n"," def save_media_and_txt(source, is_url=False, title=\"\"):\n"," global global_counter\n"," if is_url:\n"," clean_url = source.split('?')[0]\n"," ext = os.path.splitext(clean_url)[1].lower()\n"," if ext not in ['.jpg', '.jpeg', '.png']: ext = '.jpg'\n"," temp_file = f\"{temp_dir}/dl_{global_counter}{ext}\"\n"," try:\n"," r = requests.get(source, stream=True, timeout=60)\n"," r.raise_for_status()\n"," with open(temp_file, 'wb') as f:\n"," for chunk in r.iter_content(8192): f.write(chunk)\n"," except Exception:\n"," return\n"," else:\n"," temp_file = source\n"," ext = os.path.splitext(temp_file)[1].lower() or '.jpeg'\n","\n"," local_path = f\"{local_media_dir}/{global_counter}{ext}\"\n"," shutil.copy2(temp_file, local_path)\n","\n"," if pair_txt_with_media:\n"," with open(f\"{local_media_dir}/{global_counter}.txt\", \"w\", encoding=\"utf-8\") as f:\n"," f.write(title)\n","\n"," global_counter += 1\n"," print(f\" β
Saved locally #{global_counter-1} \", end=\"\\r\")\n"," if is_url and os.path.exists(temp_file): os.remove(temp_file)\n","\n"," for idx, submission in enumerate(posts, 1):\n"," cleaned_title = titles[idx-1]\n","\n"," if thumbnail_low_res:\n"," thumb_url = getattr(submission, 'thumbnail', None)\n"," if thumb_url and thumb_url.startswith('http'):\n"," save_media_and_txt(thumb_url, is_url=True, title=cleaned_title)\n","\n"," if proper_image_download:\n"," url = getattr(submission, 'url', None)\n"," if url and any(url.lower().endswith(e) for e in ['.jpg', '.jpeg', '.png']):\n"," save_media_and_txt(url, is_url=True, title=cleaned_title)\n","\n"," # === GIF + RedGifs keyframe extraction (debug toggleable) ===\n"," if gif_frame_extraction:\n"," url = getattr(submission, 'url', None)\n"," if url and url.startswith(('http://', 'https://')):\n"," if debug_mode:\n"," print(f\"\u001f DEBUG [{idx:03d}]: Checking URL β {url[:90]}...\")\n","\n"," if url.lower().endswith('.gif') or 'redgifs.com' in url.lower():\n"," if debug_mode:\n"," print(f\"β
DEBUG: Eligible for keyframe extraction β {url[:90]}...\")\n"," temp_media = None\n"," try:\n"," # 1. Download\n"," if url.lower().endswith('.gif'):\n"," temp_media = f\"{temp_dir}/dl_gif_{idx}.gif\"\n"," r = requests.get(url, stream=True, timeout=60)\n"," r.raise_for_status()\n"," with open(temp_media, 'wb') as f:\n"," for chunk in r.iter_content(8192):\n"," f.write(chunk)\n"," if debug_mode:\n"," print(f\" β Downloaded GIF ({os.path.getsize(temp_media)/1024/1024:.1f} MB)\")\n"," else:\n"," # RedGifs video\n"," temp_media = f\"{temp_dir}/dl_video_{idx}.mp4\"\n"," ydl_opts = {\n"," 'outtmpl': temp_media,\n"," 'quiet': True,\n"," 'no_warnings': True,\n"," 'format': 'bestvideo+bestaudio/best',\n"," 'merge_output_format': 'mp4',\n"," }\n"," with yt_dlp.YoutubeDL(ydl_opts) as ydl:\n"," ydl.download([url])\n"," if debug_mode:\n"," print(f\" β Downloaded RedGifs video ({os.path.getsize(temp_media)/1024/1024:.1f} MB)\")\n","\n"," # 2. Keyframe extraction\n"," output_pattern = f\"{temp_dir}/kf_{idx}_%04d.jpg\"\n"," cmd = [\n"," 'ffmpeg', '-y', '-i', temp_media,\n"," '-vf', \"select='gt(scene,0.15)',setpts=N/(FRAME_RATE*TB)\",\n"," '-vsync', 'vfr',\n"," '-q:v', '5',\n"," output_pattern\n"," ]\n"," subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=False)\n"," if debug_mode:\n"," print(f\" π ffmpeg keyframe extraction finished (scene threshold 0.15)\")\n","\n"," # 3. Save keyframes\n"," extracted_frames = sorted(glob.glob(f\"{temp_dir}/kf_{idx}_*.jpg\"))\n"," if debug_mode:\n"," print(f\" π Extracted {len(extracted_frames)} keyframes β saving to ZIP\")\n","\n"," for frame_path in extracted_frames:\n"," save_media_and_txt(frame_path, is_url=False, title=cleaned_title)\n"," os.remove(frame_path)\n","\n"," # 4. Cleanup original\n"," if temp_media and os.path.exists(temp_media):\n"," os.remove(temp_media)\n"," if debug_mode:\n"," print(f\" ποΈ Cleaned up original media file\")\n","\n"," except Exception as e:\n"," if debug_mode:\n"," print(f\" β οΈ ERROR processing {url[:80]}... β {e}\")\n"," if temp_media and os.path.exists(temp_media):\n"," os.remove(temp_media)\n"," continue\n","\n"," print(f\"\\n\\nβ
MEDIA CACHED! Creating ZIP on Drive...\")\n"," zip_name = f\"{subreddit_name}_media{fetch_suffix}\"\n"," shutil.make_archive(f\"{drive_base_dir}/{zip_name}\", 'zip', local_media_dir)\n"," print(f\" ZIP created on Drive β {drive_base_dir}/{zip_name}.zip\")\n","\n","print(\"\\nβ¨ ALL DONE!\")"]},{"cell_type":"markdown","metadata":{"id":"LaxF_cnIGNMh"},"source":["# Convert a dataset into a combined text\n","\n","{text1|text2|....} useful on perchance.org for random selection of text , in the prompt box https://perchance.org/ai-text-to-image-generator"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"trl0Eg8Xqy6B"},"outputs":[],"source":["from google.colab import files\n","import zipfile\n","import os\n","from pathlib import Path\n","import shutil # Added for rmtree\n","\n","print(\"π€ Please upload your ZIP file (the one containing 1.txt, 2.txt, 3.txt, β¦)\")\n","\n","# Let the user upload the ZIP\n","uploaded = files.upload()\n","\n","# Find the uploaded ZIP file\n","zip_files = [f for f in uploaded.keys() if f.lower().endswith('.zip')]\n","if not zip_files:\n"," raise ValueError(\"β No .zip file was uploaded. Please try again.\")\n","\n","zip_filename = zip_files[0]\n","zip_path = f\"/content/{zip_filename}\"\n","\n","# Extract the ZIP to a temporary folder\n","extract_dir = \"/content/extracted_txt_files\"\n","\n","# --- Added: Clear the directory if it exists ---\n","if os.path.exists(extract_dir):\n"," shutil.rmtree(extract_dir)\n","# ---\n","os.makedirs(extract_dir, exist_ok=True)\n","\n","with zipfile.ZipFile(zip_path, 'r') as zip_ref:\n"," zip_ref.extractall(extract_dir)\n","\n","print(f\"β
ZIP extracted to {extract_dir}\")\n","\n","# Get all .txt files and sort them numerically (1.txt, 2.txt, 10.txt, etc.)\n","txt_paths = sorted(\n"," list(Path(extract_dir).glob(\"*.txt\")),\n"," key=lambda p: f\"{int(p.stem):05d}\" if p.stem.isdigit() else p.stem.lower()\n",")\n","\n","if not txt_paths:\n"," raise ValueError(\"β No .txt files found inside the ZIP!\")\n","\n","print(f\"π Found {len(txt_paths)} title files. Processing...\")\n","\n","# Read, clean, and combine titles\n","cleaned_titles = []\n","for txt_path in txt_paths:\n"," with open(txt_path, \"r\", encoding=\"utf-8\") as f:\n"," title = f.read().strip()\n","\n"," # Remove the forbidden characters: ^ { } | [] and newlines\n"," cleaned = (title\n"," .replace('^', '')\n"," .replace('{', '')\n"," .replace('}', '')\n"," .replace('|', '')\n"," .replace('\"','')\n"," .replace('>', '')\n"," .replace('<','')\n"," .replace('[','') # New: Remove opening bracket\n"," .replace(']','') # New: Remove closing bracket\n"," .replace('\\n', ' ')) # New: Replace newlines with spaces\n","\n"," cleaned_titles.append(cleaned)\n","\n","# Build the exact format you want: {text1|text2|text3|...}\n","combined_content = '{' + '|'.join(cleaned_titles) + '}'\n","\n","# Save to a single .txt file (no newlines)\n","output_file = \"/content/combined_titles.txt\"\n","with open(output_file, \"w\", encoding=\"utf-8\") as f:\n"," f.write(combined_content)\n","\n","# Confirmation\n","print(f\"\\nβ
Done! Combined {len(cleaned_titles)} titles into one line.\")\n","print(f\"π Saved as: {output_file}\")\n","print(f\" (First 100 characters: {combined_content[:100]}...)\")\n","\n","# Auto-download the combined file\n","files.download(output_file)\n","\n","print(\"π₯ Download started. You can now use this file wherever you need the {title1|title2|...} format.\")"]},{"cell_type":"markdown","source":["# Sample a set from drive"],"metadata":{"id":"SbX9rloh3iZb"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"6edbf718"},"outputs":[],"source":["#@markdown Randomly sample a selected zip file (with many images for example) , and download as smaller zip\n","import zipfile\n","import os\n","import random\n","import shutil\n","from glob import glob\n","from google.colab import files\n","\n","# --- Configuration ---\n","input_zip_file = '' #@param {type:'string'}\n","\n","extraction_dir = '/content/extracted_keyframes_temp'\n","output_zip_name = 'random_300_keyframes.zip'\n","num_images_to_pick = 180 #@param {type:'slider',min:0,step:5,max:1000}\n","\n","# --- Step 1: Unzip the input file ---\n","print(f\"π Unzipping {input_zip_file}...\")\n","if os.path.exists(extraction_dir):\n"," shutil.rmtree(extraction_dir)\n","os.makedirs(extraction_dir, exist_ok=True)\n","\n","try:\n"," with zipfile.ZipFile(input_zip_file, 'r') as zip_ref:\n"," zip_ref.extractall(extraction_dir)\n"," print(\"β
Unzip complete.\")\n","except Exception as e:\n"," print(f\"β Error unzipping file: {e}\")\n"," raise\n","\n","# --- Step 2: Find all images in the extracted directory ---\n","all_images = []\n","for ext in ('*.jpg', '*.jpeg', '*.png', '*.gif'): # Add more extensions if needed\n"," all_images.extend(glob(os.path.join(extraction_dir, '**', ext), recursive=True))\n","\n","if not all_images:\n"," print(\"β οΈ No images found in the extracted archive.\")\n"," raise FileNotFoundError(\"No images to process.\")\n","\n","print(f\"Found {len(all_images)} total images.\")\n","\n","# --- Step 3: Randomly select images ---\n","if len(all_images) <= num_images_to_pick:\n"," selected_images = all_images\n"," print(f\"Selecting all {len(all_images)} images (fewer than {num_images_to_pick} available).\")\n","else:\n"," selected_images = random.sample(all_images, num_images_to_pick)\n"," print(f\"β
Randomly selected {len(selected_images)} images.\")\n","\n","# --- Step 4: Create a new zip file with selected images ---\n","output_zip_path = os.path.join('/content', output_zip_name)\n","\n","print(f\"π Creating new zip file: {output_zip_name}...\")\n","with zipfile.ZipFile(output_zip_path, 'w', zipfile.ZIP_DEFLATED) as new_zip:\n"," for img_path in selected_images:\n"," # Add image to zip, preserving its relative path within the extracted folder\n"," arcname = os.path.relpath(img_path, extraction_dir)\n"," new_zip.write(img_path, arcname)\n","\n","print(f\"β
New zip file created: {output_zip_path}\")\n","\n","# --- Step 5: Provide as widget output for download ---\n","print(\"π₯ Initiating download of the new zip file...\")\n","files.download(output_zip_path)\n","\n","# --- Step 6: Cleanup temporary files ---\n","print(\"ποΈ Cleaning up temporary extraction directory...\")\n","shutil.rmtree(extraction_dir)\n","print(\"β
Cleanup complete.\")\n","print(\"You can find the downloaded zip file in your local downloads.\")"]},{"cell_type":"markdown","metadata":{"id":"qisU9VeIyzX2"},"source":["# itch.io (Indie Game Website) coverpage scraper\n"]},{"cell_type":"code","execution_count":null,"metadata":{"id":"HDCueX6d3K00"},"outputs":[],"source":["#@markdown # π itch.io Image-Text Dataset Creator\n","#@markdown **Fetch enumerated thumbnails + matching TXT files β ZIP β Google Drive**\n","\n","#@markdown ---\n","#@markdown ### π Choose your settings below then **Run this cell**\n","\n","num_games = 1000 #@param {type:\"slider\", min:10, max:5000, step:10, description:\"How many games to fetch\"}\n","\n","sort_by = \"Most Recent\" #@param [\"Popular (default)\", \"New & Popular\", \"Top Sellers\", \"Top Rated\", \"Most Recent\"]\n","\n","subcategory = \"None (All Games)\" #@param [\"None (All Games)\", \"Genre: Action\", \"Genre: Adventure\", \"Genre: Arcade\", \"Genre: Card Game\", \"Genre: Educational\", \"Genre: Fighting\", \"Genre: Platformer\", \"Genre: Puzzle\", \"Genre: RPG\", \"Genre: Shooter\", \"Genre: Simulation\", \"Genre: Strategy\", \"Genre: Visual Novel\", \"Platform: Web\", \"Platform: Windows\", \"Platform: macOS\", \"Platform: Linux\", \"Platform: Android\", \"Tag: 2D\", \"Tag: Pixel Art\", \"Tag: Horror\", \"Tag: Multiplayer\", \"Tag: Roguelike\", \"Tag: Retro\"]\n","\n","#@markdown ---\n","\n","#@markdown **After changing the values above, just click the βΆοΈ Run button on this cell.**\n","\n","# ================================================\n","# β
READY-TO-RUN COLAB CELL (single cell version)\n","# ================================================\n","\n","import requests\n","from bs4 import BeautifulSoup\n","import os\n","import re\n","import json\n","import time\n","import shutil\n","import datetime\n","from urllib.parse import urljoin\n","from IPython.display import display, HTML, Image\n","from google.colab import drive\n","\n","print(\"β
Starting itch.io scraper with your chosen settings...\")\n","\n","# ====================== MAP USER CHOICES TO URL SLUGS ======================\n","sort_map = {\n"," \"Popular (default)\": \"\",\n"," \"New & Popular\": \"new-and-popular\",\n"," \"Top Sellers\": \"top-sellers\",\n"," \"Top Rated\": \"top-rated\",\n"," \"Most Recent\": \"newest\"\n","}\n","\n","filter_map = {\n"," \"None (All Games)\": \"\",\n"," \"Genre: Action\": \"genre-action\",\n"," \"Genre: Adventure\": \"genre-adventure\",\n"," \"Genre: Arcade\": \"genre-arcade\",\n"," \"Genre: Card Game\": \"genre-card-game\",\n"," \"Genre: Educational\": \"genre-educational\",\n"," \"Genre: Fighting\": \"genre-fighting\",\n"," \"Genre: Platformer\": \"genre-platformer\",\n"," \"Genre: Puzzle\": \"genre-puzzle\",\n"," \"Genre: RPG\": \"genre-rpg\",\n"," \"Genre: Shooter\": \"genre-shooter\",\n"," \"Genre: Simulation\": \"genre-simulation\",\n"," \"Genre: Strategy\": \"genre-strategy\",\n"," \"Genre: Visual Novel\": \"genre-visual-novel\",\n"," \"Platform: Web\": \"platform-web\",\n"," \"Platform: Windows\": \"platform-windows\",\n"," \"Platform: macOS\": \"platform-macos\",\n"," \"Platform: Linux\": \"platform-linux\",\n"," \"Platform: Android\": \"platform-android\",\n"," \"Tag: 2D\": \"tag-2d\",\n"," \"Tag: Pixel Art\": \"tag-pixel-art\",\n"," \"Tag: Horror\": \"tag-horror\",\n"," \"Tag: Multiplayer\": \"tag-multiplayer\",\n"," \"Tag: Roguelike\": \"tag-roguelike\",\n"," \"Tag: Retro\": \"tag-retro\"\n","}\n","\n","sort_slug = sort_map[sort_by]\n","filter_slug = filter_map[subcategory]\n","\n","# ====================== CONFIG ======================\n","MAX_PAGES = 20\n","DELAY_BETWEEN_PAGES = 1.5\n","HEADERS = {\n"," \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\n"," \"(KHTML, like Gecko) Chrome/134.0 Safari/537.36\"\n","}\n","\n","# Build the base URL according to user selections\n","base_url = \"https://itch.io/games\"\n","if sort_slug:\n"," base_url += f\"/{sort_slug}\"\n","if filter_slug:\n"," base_url += f\"/{filter_slug}\"\n","\n","print(f\"π Target: {base_url} | Fetching up to {num_games} games\")\n","\n","# Mount Google Drive\n","print(\"π Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Create working folder\n","dataset_dir = \"/content/itch_dataset\"\n","os.makedirs(dataset_dir, exist_ok=True)\n","\n","# ====================== SCRAPE MULTIPLE PAGES ======================\n","pairs = []\n","page = 1\n","\n","while len(pairs) < num_games and page <= MAX_PAGES:\n"," url = f\"{base_url}?page={page}\" if page > 1 else base_url\n"," print(f\"π Scraping page {page} β {url}\")\n","\n"," try:\n"," response = requests.get(url, headers=HEADERS, timeout=20)\n"," if response.status_code != 200:\n"," print(f\"β Page {page} failed (HTTP {response.status_code})\")\n"," break\n","\n"," soup = BeautifulSoup(response.text, \"html.parser\")\n"," game_cells = soup.find_all(\"div\", class_=\"game_cell\")\n","\n"," if not game_cells:\n"," print(\"β
No more games on this page.\")\n"," break\n","\n"," added = 0\n"," for cell in game_cells:\n"," if len(pairs) >= num_games:\n"," break\n","\n"," # Clean title (no price spam)\n"," title_tag = cell.find(\"div\", class_=\"game_title\")\n"," if not title_tag:\n"," continue\n"," title_link = title_tag.find(\"a\", class_=\"title\")\n"," title = title_link.get_text(strip=True) if title_link else title_tag.get_text(strip=True).split(\"$\")[0].strip()\n"," if not title:\n"," continue\n","\n"," # Extract thumbnail (robust for current itch.io layout)\n"," img_url = None\n"," img_tag = cell.find(\"img\")\n"," if img_tag:\n"," for attr in [\"data-lazy-src\", \"data-lazy_src\", \"data-src\", \"src\"]:\n"," img_url = img_tag.get(attr)\n"," if img_url:\n"," break\n"," if not img_url and img_tag.get(\"srcset\"):\n"," img_url = img_tag.get(\"srcset\").split(\",\")[0].strip().split(\" \")[0]\n","\n"," # Fallback: background-image\n"," if not img_url:\n"," for el in cell.find_all(lambda t: t.has_attr(\"style\") and \"background-image\" in t.get(\"style\", \"\").lower()):\n"," style = el.get(\"style\", \"\")\n"," match = re.search(r'background-image\\s*:\\s*url\\([\\'\\\"]?([^\\'\\\"]+)[\\'\\\"]?\\)', style, re.IGNORECASE)\n"," if match:\n"," img_url = match.group(1)\n"," break\n","\n"," if img_url:\n"," if img_url.startswith(\"//\"):\n"," img_url = \"https:\" + img_url\n"," elif not img_url.startswith((\"http://\", \"https://\")):\n"," img_url = urljoin(\"https://itch.io\", img_url)\n","\n"," pairs.append((title, img_url))\n"," added += 1\n","\n"," print(f\" β Added {added} games (total so far: {len(pairs)})\")\n","\n"," except Exception as e:\n"," print(f\"β Error on page {page}: {e}\")\n"," break\n","\n"," page += 1\n"," time.sleep(DELAY_BETWEEN_PAGES)\n","\n","if not pairs:\n"," print(\"β No games found with current filters. Try different settings.\")\n","else:\n"," print(f\"\\nβ
Collected {len(pairs)} games. Downloading images and creating TXT files...\")\n","\n"," # ====================== DOWNLOAD ENUMERATED FILES ======================\n"," downloaded = 0\n"," for idx, (title, img_url) in enumerate(pairs, start=1):\n"," num_str = f\"{idx:04d}\"\n"," img_path = f\"{dataset_dir}/{num_str}.jpg\"\n"," txt_path = f\"{dataset_dir}/{num_str}.txt\"\n","\n"," try:\n"," img_response = requests.get(img_url, headers=HEADERS, timeout=15)\n"," if img_response.status_code == 200:\n"," with open(img_path, \"wb\") as f:\n"," f.write(img_response.content)\n"," with open(txt_path, \"w\", encoding=\"utf-8\") as f:\n"," f.write(title)\n"," downloaded += 1\n"," if downloaded % 10 == 0 or downloaded == len(pairs):\n"," print(f\" β
Saved {downloaded:04d}.jpg + {num_str}.txt\")\n"," else:\n"," print(f\"β οΈ Failed to download image {num_str}\")\n"," except Exception as e:\n"," print(f\"β Error downloading {num_str}: {e}\")\n","\n"," # ====================== CREATE ZIP & SAVE TO DRIVE ======================\n"," timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n"," zip_name = f\"itch_games_{timestamp}\"\n"," zip_path_local = f\"/content/{zip_name}.zip\"\n","\n"," print(f\"\\nποΈ Creating ZIP file with {downloaded} image+txt pairs...\")\n"," shutil.make_archive(f\"/content/{zip_name}\", 'zip', dataset_dir)\n","\n"," # Copy to Google Drive\n"," drive_folder = \"/content/drive/MyDrive/itch_datasets\"\n"," os.makedirs(drive_folder, exist_ok=True)\n"," drive_zip_path = f\"{drive_folder}/{zip_name}.zip\"\n"," shutil.copy(zip_path_local, drive_zip_path)\n","\n"," print(\"\\n\" + \"=\"*80)\n"," print(\"π SUCCESS! Your dataset is ready\")\n"," print(f\"π¦ ZIP file: {zip_name}.zip\")\n"," print(f\"π€ Saved to Google Drive β {drive_zip_path}\")\n"," print(f\" β’ Files inside: 0001.jpg + 0001.txt, 0002.jpg + 0002.txt, ...\")\n"," print(f\" β’ Total pairs: {downloaded}\")\n"," print(\"=\"*80)\n","\n"," # ====================== PREVIEW FIRST 3 PAIRS ======================\n"," print(\"\\nοΏ½οΏ½οΏ½οΏ½ Preview of first 3 pairs (click images to enlarge):\")\n"," for i in range(min(3, len(pairs))):\n"," num_str = f\"{i+1:04d}\"\n"," img_file = f\"{dataset_dir}/{num_str}.jpg\"\n"," display(HTML(f\"{num_str}. {pairs[i][0]}
\"))\n"," display(Image(filename=img_file, width=400))\n"," print(\"β\" * 70)\n","\n"," print(f\"\\nβ
All files are also in: {dataset_dir} (you can download the folder manually if needed)\")"]},{"cell_type":"markdown","source":["# VNDB (Visual Novel Database) coverpage fetcher by tag\n"],"metadata":{"id":"0VlyEacYp7Pn"}},{"cell_type":"code","execution_count":null,"metadata":{"id":"70mWljq0EJCT"},"outputs":[],"source":["#@markdown # π VNDB Image-Text Dataset Creator **(OFFSET SUPPORT + METADATA)**\n","#@markdown **β
Added offset slider + metadata in ZIP**\n","#@markdown Now you can fetch any batch (e.g. first 1000 β offset 0, next 1000 β offset 1000, etc.)\n","\n","#@markdown ---\n","#@markdown ### π Choose your settings below then **Run this cell**\n","\n","num_vns = 1000 #@param {type:\"slider\", min:10, max:5000, step:10, description:\"How many visual novels to fetch in this batch\"}\n","\n","offset = 0 #@param {type:\"slider\", min:0, max:20000, step:100}\n","#description:\"Offset: skip this many VNs before starting (0 = first batch, 1000 = second batch, etc.)\"}\n","\n","sort_by = \"Most Recent (released desc)\" #@param [\"Most Recent (released desc)\", \"Highest Rated\", \"Most Voted\"]\n","\n","#@markdown **Tag ID** (from your link: https://vndb.org/g3560)\n","tag_id = \"g3560\" #@param {type:\"string\", description:\"VNDB tag ID (e.g. g3560 = 3D Graphics)\"}\n","\n","#@markdown ---\n","\n","#@markdown **After changing the values above, just click the βΆοΈ Run button on this cell.**\n","\n","# ================================================\n","# β
FULLY DEBUGGED + OFFSET + METADATA READY-TO-RUN COLAB CELL\n","# ================================================\n","\n","import requests\n","import os\n","import json\n","import time\n","import shutil\n","import datetime\n","from IPython.display import display, HTML, Image\n","from google.colab import drive\n","\n","print(\"β
Starting VNDB scraper with OFFSET support...\")\n","\n","# ====================== CONFIG ======================\n","MAX_RESULTS_PER_PAGE = 100\n","DELAY_BETWEEN_PAGES = 0.5\n","HEADERS = {\n"," \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 \"\n"," \"(KHTML, like Gecko) Chrome/134.0 Safari/537.36\",\n"," \"Content-Type\": \"application/json\"\n","}\n","\n","API_URL = \"https://api.vndb.org/kana/vn\"\n","\n","# Sort mapping\n","sort_map = {\n"," \"Most Recent (released desc)\": {\"sort\": \"released\", \"reverse\": True},\n"," \"Highest Rated\": {\"sort\": \"rating\", \"reverse\": True},\n"," \"Most Voted\": {\"sort\": \"votecount\", \"reverse\": True}\n","}\n","\n","selected_sort = sort_map[sort_by]\n","\n","print(f\"π Target: VNDB Tag {tag_id} | Offset: {offset} | Fetching up to {num_vns} VNs (sorted by {sort_by})\")\n","\n","# Mount Google Drive\n","print(\"π Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=True)\n","\n","# Create working folder\n","dataset_dir = \"/content/vndb_dataset\"\n","os.makedirs(dataset_dir, exist_ok=True)\n","\n","# ====================== CALCULATE PAGINATION WITH OFFSET ======================\n","start_page = (offset // MAX_RESULTS_PER_PAGE) + 1\n","skip_in_first_page = offset % MAX_RESULTS_PER_PAGE\n","\n","print(f\"π Calculated start_page = {start_page}, skip first {skip_in_first_page} items on that page\")\n","\n","# ====================== FETCH VIA VNDB KANA API ======================\n","pairs = []\n","page = start_page\n","items_collected = 0\n","\n","while len(pairs) < num_vns:\n"," payload = {\n"," \"filters\": [\"tag\", \"=\", tag_id],\n"," \"fields\": \"id, title, image.url\",\n"," \"sort\": selected_sort[\"sort\"],\n"," \"reverse\": selected_sort[\"reverse\"],\n"," \"results\": MAX_RESULTS_PER_PAGE,\n"," \"page\": page\n"," }\n","\n"," # ==================== FULL DEBUG PRINT ====================\n"," print(f\"\\nπ === API REQUEST PAGE {page} (offset={offset}) ===\")\n"," print(\"Payload sent:\")\n"," print(json.dumps(payload, indent=2))\n"," # ========================================================\n","\n"," try:\n"," response = requests.post(API_URL, headers=HEADERS, json=payload, timeout=30)\n","\n"," print(f\" π‘ Status code: {response.status_code}\")\n","\n"," if response.status_code != 200:\n"," print(\" β ERROR RESPONSE BODY:\")\n"," try:\n"," error_json = response.json()\n"," print(json.dumps(error_json, indent=2))\n"," except:\n"," print(response.text[:1000])\n"," break\n","\n"," data = response.json()\n"," results = data.get(\"results\", [])\n","\n"," if not results:\n"," print(\"β
No more results.\")\n"," break\n","\n"," # Handle offset skipping on the very first page we fetch\n"," if page == start_page and skip_in_first_page > 0:\n"," print(f\" βοΈ Skipping first {skip_in_first_page} items due to offset\")\n"," results = results[skip_in_first_page:]\n"," skip_in_first_page = 0\n","\n"," added = 0\n"," for vn in results:\n"," if len(pairs) >= num_vns:\n"," break\n","\n"," title = vn.get(\"title\", \"\").strip()\n"," if not title:\n"," continue\n","\n"," img_url = vn.get(\"image\", {}).get(\"url\") if isinstance(vn.get(\"image\"), dict) else None\n","\n"," if img_url:\n"," pairs.append((title, img_url))\n"," added += 1\n"," items_collected += 1\n","\n"," print(f\" β Added {added} VNs (total so far: {len(pairs)})\")\n","\n"," if not data.get(\"more\", False):\n"," print(\"β
Reached the end of results.\")\n"," break\n","\n"," except Exception as e:\n"," print(f\"β Exception on API page {page}: {e}\")\n"," break\n","\n"," page += 1\n"," time.sleep(DELAY_BETWEEN_PAGES)\n","\n","if not pairs:\n"," print(\"\\nβ No visual novels found in this offset range. Check debug output above.\")\n","else:\n"," print(f\"\\nβ
Collected {len(pairs)} visual novels (offset {offset}). Downloading images and creating TXT files...\")\n","\n"," # ====================== DOWNLOAD ENUMERATED FILES ======================\n"," downloaded = 0\n"," for idx, (title, img_url) in enumerate(pairs, start=1):\n"," num_str = f\"{idx:04d}\"\n"," img_path = f\"{dataset_dir}/{num_str}.jpg\"\n"," txt_path = f\"{dataset_dir}/{num_str}.txt\"\n","\n"," try:\n"," img_response = requests.get(img_url, headers=HEADERS, timeout=15)\n"," if img_response.status_code == 200:\n"," with open(img_path, \"wb\") as f:\n"," f.write(img_response.content)\n"," with open(txt_path, \"w\", encoding=\"utf-8\") as f:\n"," f.write(title)\n"," downloaded += 1\n"," if downloaded % 10 == 0 or downloaded == len(pairs):\n"," print(f\" β
Saved {num_str}.jpg + {num_str}.txt\")\n"," else:\n"," print(f\"β οΈ Failed to download image {num_str} (HTTP {img_response.status_code})\")\n"," except Exception as e:\n"," print(f\"β Error downloading {num_str}: {e}\")\n","\n"," # ====================== WRITE METADATA (index/offset/tag) ======================\n"," timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n"," with open(f\"{dataset_dir}/INFO.txt\", \"w\", encoding=\"utf-8\") as f:\n"," f.write(f\"VNDB Tag ID : {tag_id}\\n\")\n"," f.write(f\"Offset : {offset}\\n\")\n"," f.write(f\"Batch Size : {num_vns}\\n\")\n"," f.write(f\"Actual Downloaded: {downloaded}\\n\")\n"," f.write(f\"Sort Order : {sort_by}\\n\")\n"," f.write(f\"Start Page : {start_page}\\n\")\n"," f.write(f\"Collected on : {timestamp}\\n\")\n"," f.write(f\"File index 0001 = VN #{offset + 1} in the full tag list\\n\")\n","\n"," print(\"π Metadata INFO.txt written (contains index/offset/tag info)\")\n","\n"," # ====================== CREATE ZIP & SAVE TO DRIVE ======================\n"," zip_name = f\"vndb_{tag_id}_offset{offset:04d}_{num_vns}vns_{timestamp}\"\n"," zip_path_local = f\"/content/{zip_name}.zip\"\n","\n"," print(f\"\\nποΈ Creating ZIP file with {downloaded} image+txt pairs + INFO.txt...\")\n"," shutil.make_archive(f\"/content/{zip_name}\", 'zip', dataset_dir)\n","\n"," drive_folder = \"/content/drive/MyDrive/vndb_datasets\"\n"," os.makedirs(drive_folder, exist_ok=True)\n"," drive_zip_path = f\"{drive_folder}/{zip_name}.zip\"\n"," shutil.copy(zip_path_local, drive_zip_path)\n","\n"," print(\"\\n\" + \"=\"*80)\n"," print(\"π SUCCESS! Your dataset is ready\")\n"," print(f\"π¦ ZIP file: {zip_name}.zip\")\n"," print(f\"π€ Saved to Google Drive β {drive_zip_path}\")\n"," print(f\" β’ Contains: 0001.jpg + 0001.txt ... + INFO.txt (with offset/tag/index)\")\n"," print(f\" β’ Total pairs: {downloaded}\")\n"," print(\"=\"*80)\n","\n"," # ====================== PREVIEW FIRST 3 PAIRS ======================\n"," print(\"\\nπΈ Preview of first 3 pairs (click images to enlarge):\")\n"," for i in range(min(3, len(pairs))):\n"," num_str = f\"{i+1:04d}\"\n"," img_file = f\"{dataset_dir}/{num_str}.jpg\"\n"," display(HTML(f\"{num_str}. {pairs[i][0]}
\"))\n"," display(Image(filename=img_file, width=400))\n"," print(\"β\" * 70)\n","\n"," print(f\"\\nβ
All files are also in: {dataset_dir} (you can download the folder manually if needed)\")"]},{"cell_type":"markdown","source":["# Pinterest fetch"],"metadata":{"id":"JrtsI98cmAxB"}},{"cell_type":"markdown","metadata":{"id":"HO3NmF03QDpt"},"source":["Pinterest board downloader\n","1. Install the EditThisCookie (or \"Get cookies.txt LOCALLY\") Chrome extension.\n","2. Log into Pinterest in Chrome β open your board.\n","3. Click the extension icon β Export cookies for pinterest.com β save as cookies.txt (plain text / Netscape format).\n","4. In Colab, click the folder icon (left sidebar) β upload cookies.txt to your google drive."]},{"cell_type":"code","source":["# ==================== SINGLE CELL - FULL PINTEREST BOARD DOWNLOADER (Cookies from Google Drive) ====================\n","\n","# Install gallery-dl\n","!pip install -q gallery-dl\n","\n","import os\n","from google.colab import files\n","from google.colab import drive\n","import shutil # Import shutil for copying files\n","\n","# ====================== CONFIGURATION ======================\n","# 1. Paste your board URL here\n","board_url = \"\" #@param {type:\"string\"}\n","\n","# 2. Path to your cookies file on Google Drive (change only if it's in a subfolder)\n","cookies_file = \"/content/drive/MyDrive/pinterest_cookies.txt\" #@param {type:\"string\"}\n","\n","# Optional: custom board name (auto-detected from URL by default)\n","board_name = board_url.rstrip(\"/\").split(\"/\")[-1]\n","#or \"pinterest_board\" #@param {type:\"string\"}\n","\n","print(\"β
Board URL:\", board_url)\n","print(\"π Board name:\", board_name)\n","print(\"π Cookies path:\", cookies_file)\n","\n","# ====================== MOUNT GOOGLE DRIVE ======================\n","print(\"π Mounting Google Drive...\")\n","drive.mount('/content/drive', force_remount=False)\n","\n","# Check if cookies file exists\n","if os.path.exists(cookies_file):\n"," print(\"β
Cookies file found! Full board (700+ images) will be downloaded.\")\n","else:\n"," print(\"β Cookies file NOT found at the path above. Only ~200 images will download.\")\n","\n","# ====================== CREATE OUTPUT FOLDER ======================\n","output_dir = f\"/content/{board_name}\"\n","os.makedirs(output_dir, exist_ok=True)\n","\n","print(\"π Starting download... (this can take a while for large boards)\")\n","\n","# ====================== BUILD & RUN GALLERY-DL COMMAND ======================\n","cmd = f'gallery-dl --dest \"{output_dir}\"'\n","if os.path.exists(cookies_file):\n"," cmd += f' --cookies \"{cookies_file}\"'\n","cmd += f' \"{board_url}\"'\n","\n","# Execute the download\n","!{cmd}\n","\n","# Count downloaded files\n","total_files = sum([len(files) for r, d, files in os.walk(output_dir)])\n","print(f\"β
Download finished! {total_files} files saved in {output_dir}\")\n","\n","# ====================== ZIP & AUTO-DOWNLOAD ======================\n","zip_path = f\"/content/{board_name}.zip\"\n","print(\"π¦ Zipping all images...\")\n","!zip -r -q \"{zip_path}\" \"{output_dir}\"\n","\n","print(f\"β
Zipped everything β {zip_path}\")\n","\n","# Save to Google Drive\n","drive_zip_destination = f\"/content/drive/MyDrive/{board_name}.zip\"\n","shutil.copy2(zip_path, drive_zip_destination)\n","print(f\"β
Copied '{zip_path}' to Google Drive at '{drive_zip_destination}'\")\n","\n","# Auto-download the zip to your computer\n","#files.download(zip_path)\n","\n","#print(\"π All done! Your full Pinterest board is now downloaded, zipped, and available in your Google Drive and local downloads.\")"],"metadata":{"id":"E7vSr64JkWWa","cellView":"form"},"execution_count":null,"outputs":[]},{"cell_type":"code","execution_count":null,"metadata":{"id":"g0524iIvU1I_"},"outputs":[],"source":["# Auto-download the zip file\n","files.download(zip_path)"]}],"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/fetch_from_reddit.ipynb","timestamp":1775894617939},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/fetch_from_reddit.ipynb","timestamp":1775893490401},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/fetch_from_reddit.ipynb","timestamp":1775849356252},{"file_id":"1mdMfryfIudXrlN1MWsiNjoXQjondKOah","timestamp":1775845568290},{"file_id":"/v2/external/notebooks/intro.ipynb","timestamp":1775575177068}],"collapsed_sections":["LaxF_cnIGNMh","SbX9rloh3iZb","qisU9VeIyzX2","0VlyEacYp7Pn","JrtsI98cmAxB"]},"kernelspec":{"display_name":"Python 3","name":"python3"}},"nbformat":4,"nbformat_minor":0}
\ No newline at end of file