| """ |
| Download videos listed in video_urls.json using yt-dlp. |
| Supports resuming (skips already downloaded videos). |
| Uses parallel workers for speed. |
| """ |
|
|
| import json |
| import subprocess |
| import sys |
| import os |
| import time |
| import argparse |
| from pathlib import Path |
| from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
| SCRIPT_DIR = Path(__file__).parent |
| URL_FILE = SCRIPT_DIR / "video_urls.json" |
| VIDEO_DIR = SCRIPT_DIR / "videos" |
| LOG_FILE = SCRIPT_DIR / "download_log.jsonl" |
|
|
| DENO_PATH = Path.home() / ".deno" / "bin" |
|
|
|
|
| def download_one(video_id: str, url: str, output_dir: Path, |
| cookies_file: str | None = None, |
| use_oauth2: bool = False, |
| sleep_interval: float = 0) -> dict: |
| """Download a single video. Returns status dict.""" |
| video_dir = output_dir / video_id |
| video_dir.mkdir(parents=True, exist_ok=True) |
|
|
| existing = list(video_dir.glob("*.mp4")) + list(video_dir.glob("*.mkv")) + list(video_dir.glob("*.webm")) |
| if existing: |
| return {"video_id": video_id, "status": "skipped", "msg": "already exists"} |
|
|
| cmd = [ |
| "yt-dlp", |
| "--no-playlist", |
| "--remote-components", "ejs:github", |
| "-f", "bestvideo[ext=mp4]+bestaudio[ext=m4a]/best[ext=mp4]/best", |
| "--merge-output-format", "mp4", |
| "-o", str(video_dir / "%(id)s.%(ext)s"), |
| "--retries", "3", |
| "--socket-timeout", "30", |
| "--no-overwrites", |
| "--quiet", |
| "--no-warnings", |
| ] |
| if cookies_file: |
| cmd.extend(["--cookies", cookies_file]) |
| if use_oauth2: |
| cmd.extend(["--username", "oauth2", "--password", "", |
| "--extractor-args", "youtube:player_skip=initial_data"]) |
| if sleep_interval > 0: |
| cmd.extend(["--sleep-interval", str(int(sleep_interval)), |
| "--max-sleep-interval", str(int(sleep_interval * 2))]) |
| cmd.append(url) |
|
|
| env = os.environ.copy() |
| if DENO_PATH.exists(): |
| env["PATH"] = str(DENO_PATH) + ":" + env.get("PATH", "") |
|
|
| try: |
| result = subprocess.run(cmd, capture_output=True, text=True, timeout=600, env=env) |
| if result.returncode == 0: |
| return {"video_id": video_id, "status": "ok"} |
| else: |
| return {"video_id": video_id, "status": "failed", "msg": result.stderr.strip()[-200:]} |
| except subprocess.TimeoutExpired: |
| return {"video_id": video_id, "status": "timeout"} |
| except Exception as e: |
| return {"video_id": video_id, "status": "error", "msg": str(e)} |
|
|
|
|
| def load_done_ids(log_file: Path, include_failed: bool = True) -> set: |
| """Load IDs to skip. If include_failed=True, also skip previously failed IDs.""" |
| done = set() |
| if log_file.exists(): |
| with open(log_file) as f: |
| for line in f: |
| try: |
| entry = json.loads(line) |
| status = entry.get("status", "") |
| if status in ("ok", "skipped"): |
| done.add(entry["video_id"]) |
| elif include_failed and status in ("failed", "timeout", "error"): |
| done.add(entry["video_id"]) |
| except json.JSONDecodeError: |
| pass |
| return done |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Download NitroGen videos") |
| parser.add_argument("--workers", type=int, default=4, help="parallel download workers (default: 4)") |
| parser.add_argument("--limit", type=int, default=0, help="max videos to download (0 = all)") |
| parser.add_argument("--source", type=str, default="", help="filter by source (e.g. 'youtube', 'twitch')") |
| parser.add_argument("--cookies", type=str, default="", help="path to cookies.txt (Netscape format) for YouTube") |
| parser.add_argument("--reset-failed", action="store_true", help="retry previously failed downloads") |
| parser.add_argument("--sleep", type=float, default=0, help="sleep seconds between requests per worker (anti-throttle)") |
| parser.add_argument("--oauth2", action="store_true", help="use YouTube OAuth2 authentication instead of cookies") |
| parser.add_argument("--retry-ids", type=str, default="", help="JSON file with list of video IDs to retry") |
| args = parser.parse_args() |
|
|
| if not URL_FILE.exists(): |
| print(f"ERROR: {URL_FILE} not found. Run extract_video_urls.py first.") |
| sys.exit(1) |
|
|
| with open(URL_FILE) as f: |
| all_videos = json.load(f) |
|
|
| if args.source: |
| all_videos = {k: v for k, v in all_videos.items() if v.get("source") == args.source} |
|
|
| cookies_file = args.cookies if args.cookies else None |
| if cookies_file and not Path(cookies_file).exists(): |
| print(f"ERROR: cookies file not found: {cookies_file}") |
| sys.exit(1) |
|
|
| VIDEO_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| if args.retry_ids: |
| with open(args.retry_ids) as f: |
| retry_set = set(json.load(f)) |
| todo = {k: v for k, v in all_videos.items() if k in retry_set} |
| else: |
| done_ids = load_done_ids(LOG_FILE, include_failed=not args.reset_failed) |
| todo = {k: v for k, v in all_videos.items() if k not in done_ids} |
|
|
| if args.limit > 0: |
| todo = dict(list(todo.items())[:args.limit]) |
|
|
| print(f"Total videos: {len(all_videos)}") |
| print(f"Already done: {len(all_videos) - len(todo)}") |
| print(f"To download: {len(todo)}") |
| print(f"Workers: {args.workers}") |
| print(f"Cookies: {cookies_file or 'None'}") |
| print(f"Output dir: {VIDEO_DIR}") |
| print() |
|
|
| if not todo: |
| print("Nothing to download.") |
| return |
|
|
| ok_count = 0 |
| fail_count = 0 |
| rate_limit_count = 0 |
|
|
| with open(LOG_FILE, "a") as log_f: |
| if args.workers == 1 and args.sleep > 0: |
| for i, (vid, info) in enumerate(todo.items(), 1): |
| result = download_one(vid, info["url"], VIDEO_DIR, |
| cookies_file, args.oauth2, args.sleep) |
| log_f.write(json.dumps(result) + "\n") |
| log_f.flush() |
|
|
| status_icon = "OK" if result["status"] in ("ok", "skipped") else "FAIL" |
| if result["status"] in ("ok", "skipped"): |
| ok_count += 1 |
| else: |
| fail_count += 1 |
| if "rate limit" in result.get("msg", "").lower() or "add a delay" in result.get("msg", "").lower(): |
| rate_limit_count += 1 |
| if rate_limit_count >= 3: |
| wait = min(rate_limit_count * 30, 300) |
| print(f" Rate limited {rate_limit_count} times, backing off {wait}s...") |
| time.sleep(wait) |
|
|
| print(f"[{i:5d}/{len(todo)}] {status_icon} {result['video_id']} - {result['status']}" |
| + (f" ({result.get('msg', '')[:80]})" if result.get("msg") else "")) |
|
|
| if result["status"] in ("ok",): |
| rate_limit_count = 0 |
| time.sleep(args.sleep) |
| elif result["status"] == "skipped": |
| pass |
| else: |
| time.sleep(args.sleep) |
| else: |
| with ThreadPoolExecutor(max_workers=args.workers) as executor: |
| futures = {} |
| for vid, info in todo.items(): |
| fut = executor.submit(download_one, vid, info["url"], VIDEO_DIR, |
| cookies_file, args.oauth2, args.sleep) |
| futures[fut] = vid |
|
|
| for i, fut in enumerate(as_completed(futures), 1): |
| result = fut.result() |
| log_f.write(json.dumps(result) + "\n") |
| log_f.flush() |
|
|
| status_icon = "OK" if result["status"] in ("ok", "skipped") else "FAIL" |
| if result["status"] in ("ok", "skipped"): |
| ok_count += 1 |
| else: |
| fail_count += 1 |
|
|
| print(f"[{i:5d}/{len(todo)}] {status_icon} {result['video_id']} - {result['status']}" |
| + (f" ({result.get('msg', '')[:80]})" if result.get("msg") else "")) |
|
|
| print(f"\nDone! OK: {ok_count}, Failed: {fail_count}") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|