| |
| """ |
| scrape_flights.py — Fetch all flight logs from RocketReviews.com and scrape |
| each detail page, saving structured JSON to source/flights/. |
| |
| The index is fetched from a single date-range endpoint that returns all |
| flights at once. Detail pages are scraped only for flights that have an |
| image/detail URL. Flights without a detail page are saved with index fields |
| only and detail fields set to null. |
| |
| Output |
| ------ |
| source/flights/index.jsonl one record per flight (raw index fields) |
| source/flights/detail/{id}.json full parsed detail per flight |
| |
| Usage |
| ----- |
| python scripts/flights/01_scrape.py |
| python scripts/flights/01_scrape.py --delay 1.0 --limit 10 |
| python scripts/flights/01_scrape.py --force # re-scrape existing files |
| """ |
|
|
| from __future__ import annotations |
|
|
| import argparse |
| import json |
| import logging |
| import re |
| import sys |
| import time |
| from datetime import datetime, timezone |
| from pathlib import Path |
| from typing import Optional |
|
|
| import requests |
| from bs4 import BeautifulSoup, Tag |
| from requests.adapters import HTTPAdapter |
| from urllib3.util.retry import Retry |
|
|
| |
| |
| |
|
|
| BASE_URL = "https://www.rocketreviews.com" |
| USER_AGENT = "RocketReviews-Dataset/1.0" |
| DEFAULT_DELAY = 1.0 |
|
|
| |
| _TODAY = datetime.now().strftime("%Y-%m-%d") |
| INDEX_URL = f"{BASE_URL}/data/flightlog/flightdates.php?search=2000-01-01:{_TODAY}" |
|
|
| ROOT = Path(__file__).parent.parent.parent |
| SOURCE_DIR = ROOT / "source" / "flights" |
| DETAIL_DIR = SOURCE_DIR / "detail" |
|
|
| |
| |
| |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s %(levelname)s %(message)s", |
| handlers=[logging.StreamHandler(sys.stdout)], |
| ) |
| log = logging.getLogger(__name__) |
|
|
| |
| |
| |
|
|
|
|
| def _build_session() -> requests.Session: |
| s = requests.Session() |
| s.headers["User-Agent"] = USER_AGENT |
| retry = Retry( |
| total=3, |
| backoff_factor=2.0, |
| status_forcelist=[429, 500, 502, 503, 504], |
| allowed_methods=["GET"], |
| ) |
| s.mount("https://", HTTPAdapter(max_retries=retry)) |
| s.mount("http://", HTTPAdapter(max_retries=retry)) |
| return s |
|
|
|
|
| class RateLimiter: |
| def __init__(self, delay: float) -> None: |
| self.delay = delay |
| self._last: float = 0.0 |
|
|
| def wait(self) -> None: |
| elapsed = time.monotonic() - self._last |
| if elapsed < self.delay: |
| time.sleep(self.delay - elapsed) |
| self._last = time.monotonic() |
|
|
|
|
| |
| |
| |
|
|
|
|
| def _re_first(pattern: str, text: str, group: int = 1) -> Optional[str]: |
| m = re.search(pattern, text) |
| return m.group(group) if m else None |
|
|
|
|
| def _after_strong(soup: BeautifulSoup, label: str) -> Optional[str]: |
| """ |
| Find '<strong>{label}:</strong>' and return the text immediately after it, |
| stopping at the next <strong> tag. |
| """ |
| strong = soup.find("strong", string=re.compile(rf"^{label}:?$", re.I)) |
| if not strong: |
| return None |
| text = "" |
| for sib in strong.next_siblings: |
| if isinstance(sib, Tag) and sib.name == "strong": |
| break |
| text += sib.get_text() if isinstance(sib, Tag) else str(sib) |
| return text.strip() or None |
|
|
|
|
| def _parse_altitude(raw: Optional[str]) -> Optional[float]: |
| """ |
| Parse altitude from the raw index string. Returns feet as a float or None. |
| |
| Handles: |
| "16,504 Feet" -> 16504.0 |
| "600-700 Feet" -> 650.0 (midpoint of range) |
| "-" -> None |
| """ |
| if not raw or raw.strip() == "-": |
| return None |
| cleaned = raw.replace(",", "").lower() |
| |
| m = re.search(r"(\d+(?:\.\d+)?)\s*-\s*(\d+(?:\.\d+)?)", cleaned) |
| if m: |
| return (float(m.group(1)) + float(m.group(2))) / 2 |
| |
| m = re.search(r"(\d+(?:\.\d+)?)", cleaned) |
| return float(m.group(1)) if m else None |
|
|
|
|
| def _parse_conditions(soup: BeautifulSoup) -> dict: |
| """Extract weather conditions from labeled strong fields.""" |
| wind_speed_raw = _after_strong(soup, "Wind Speed") |
| wind_speed = None |
| if wind_speed_raw: |
| m = re.search(r"(\d+(?:\.\d+)?)", wind_speed_raw) |
| wind_speed = float(m.group(1)) if m else None |
|
|
| wind_dir_raw = _after_strong(soup, "Wind Direction") |
| wind_dir = None |
| if wind_dir_raw: |
| |
| m = re.search(r"from\s+the\s+(\w+)", wind_dir_raw, re.I) |
| wind_dir = m.group(1).upper() if m else wind_dir_raw.strip() |
|
|
| temp_raw = _after_strong(soup, "Temperature") |
| temp = None |
| if temp_raw: |
| m = re.search(r"(\d+(?:\.\d+)?)", temp_raw) |
| temp = float(m.group(1)) if m else None |
|
|
| return { |
| "wind_speed_mph": wind_speed, |
| "wind_direction": wind_dir, |
| "temperature_f": temp, |
| } |
|
|
|
|
| def _parse_notes(soup: BeautifulSoup) -> Optional[str]: |
| """ |
| Extract the narrative flight description. Notes appear as plain <p> tags |
| in the main content area with no section heading. |
| """ |
| paragraphs = [ |
| p.get_text(strip=True) |
| for p in soup.find_all("p") |
| if len(p.get_text(strip=True)) > 20 |
| ] |
| return " ".join(paragraphs) if paragraphs else None |
|
|
|
|
| def _parse_rocket_url(html: str) -> Optional[str]: |
| """ |
| Extract the URL for the flyer's specific rocket instance. |
| These use a 12-digit timestamp-based suffix, e.g.: |
| /aerospace-specialty-products-sky-ferry-250708140332.html |
| """ |
| m = re.search(r'href="(/[a-z][a-z0-9-]+-\d{10,}\.html)"', html) |
| return f"{BASE_URL}{m.group(1)}" if m else None |
|
|
|
|
| def _parse_motor_url(html: str, motors: str) -> Optional[str]: |
| """ |
| Extract the motor product page URL using the first motor designation |
| as a search anchor. Motor URLs embed the designation slug, e.g.: |
| /estes-d12-5112.html for motor "D12-5" |
| """ |
| if not motors: |
| return None |
| |
| first_motor = motors.split("+")[0].strip() |
| slug = re.sub(r"[^a-z0-9]+", "-", first_motor.lower()).strip("-") |
| |
| m = re.search( |
| rf'href="(/[a-z0-9-]*{re.escape(slug[:6])}[a-z0-9-]*\.html)"', |
| html, |
| ) |
| return f"{BASE_URL}{m.group(1)}" if m else None |
|
|
|
|
| def _parse_detail(html: str, index_rec: dict) -> dict: |
| """Parse a flight detail page into a structured record.""" |
| soup = BeautifulSoup(html, "lxml") |
| image_path = index_rec.get("image", "") |
|
|
| return { |
| "id": int(index_rec["id"]), |
| "url": f"{BASE_URL}{image_path}" if image_path else None, |
| "date": index_rec.get("date"), |
| "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), |
| "flyer": index_rec.get("flyer"), |
| "rocket": index_rec.get("rocket"), |
| "rocket_url": _parse_rocket_url(html), |
| "kit": index_rec.get("kit") or None, |
| "motors": index_rec.get("motors") or None, |
| "motor_url": _parse_motor_url(html, index_rec.get("motors", "")), |
| "altitude_raw": index_rec.get("altitude") or None, |
| "altitude_ft": _parse_altitude(index_rec.get("altitude")), |
| "launch_site": _after_strong(soup, "Launch Site"), |
| "conditions": _parse_conditions(soup), |
| "notes": _parse_notes(soup), |
| } |
|
|
|
|
| def _index_only_record(index_rec: dict) -> dict: |
| """ |
| Build a detail record from index fields only for flights that have no |
| detail page. Detail-only fields are set to null. |
| """ |
| return { |
| "id": int(index_rec["id"]), |
| "url": None, |
| "date": index_rec.get("date"), |
| "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"), |
| "flyer": index_rec.get("flyer"), |
| "rocket": index_rec.get("rocket"), |
| "rocket_url": None, |
| "kit": index_rec.get("kit") or None, |
| "motors": index_rec.get("motors") or None, |
| "motor_url": None, |
| "altitude_raw": index_rec.get("altitude") or None, |
| "altitude_ft": _parse_altitude(index_rec.get("altitude")), |
| "launch_site": None, |
| "conditions": { |
| "wind_speed_mph": None, |
| "wind_direction": None, |
| "temperature_f": None, |
| }, |
| "notes": None, |
| } |
|
|
|
|
| |
| |
| |
|
|
|
|
| def fetch_index(session: requests.Session) -> list[dict]: |
| log.info("Fetching flight index from %s", INDEX_URL) |
| resp = session.get(INDEX_URL, timeout=30) |
| resp.raise_for_status() |
| records = resp.json().get("records", []) |
| log.info("Index returned %d records.", len(records)) |
| return records |
|
|
|
|
| def scrape_detail( |
| session: requests.Session, |
| rate: RateLimiter, |
| index_rec: dict, |
| force: bool = False, |
| ) -> dict: |
| flight_id = int(index_rec["id"]) |
| shard = f"{flight_id // 1000:03d}" |
| shard_dir = DETAIL_DIR / shard |
| dest = shard_dir / f"{flight_id:06d}.json" |
|
|
| if dest.exists() and not force: |
| log.debug("Already scraped %s, skipping.", flight_id) |
| return None |
|
|
| image_path = index_rec.get("image", "") |
| if not image_path: |
| |
| return _index_only_record(index_rec) |
|
|
| url = f"{BASE_URL}{image_path}" |
| rate.wait() |
|
|
| try: |
| resp = session.get(url, timeout=30) |
| resp.raise_for_status() |
| except requests.RequestException as exc: |
| log.warning("Failed to fetch flight %s: %s", flight_id, exc) |
| return _index_only_record(index_rec) |
|
|
| return _parse_detail(resp.text, index_rec) |
|
|
|
|
| |
| |
| |
|
|
|
|
| def main() -> None: |
| parser = argparse.ArgumentParser(description="Scrape RocketReviews.com flight logs.") |
| parser.add_argument( |
| "--delay", |
| type=float, |
| default=DEFAULT_DELAY, |
| help=f"Seconds between requests (default: {DEFAULT_DELAY})", |
| ) |
| parser.add_argument( |
| "--limit", |
| type=int, |
| default=None, |
| help="Stop after scraping this many records (useful for testing)", |
| ) |
| parser.add_argument( |
| "--force", |
| action="store_true", |
| help="Re-scrape flights that already have a saved detail file", |
| ) |
| args = parser.parse_args() |
|
|
| SOURCE_DIR.mkdir(parents=True, exist_ok=True) |
| DETAIL_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| session = _build_session() |
| rate = RateLimiter(args.delay) |
|
|
| |
| |
| |
| records = fetch_index(session) |
|
|
| scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ") |
| index_path = SOURCE_DIR / "index.jsonl" |
| with index_path.open("w", encoding="utf-8") as f: |
| for rec in records: |
| f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n") |
| log.info("Wrote %d index records to %s", len(records), index_path) |
|
|
| |
| |
| |
| if args.limit: |
| records = records[: args.limit] |
|
|
| ok = skipped = failed = 0 |
| total = len(records) |
|
|
| for i, rec in enumerate(records, 1): |
| result = scrape_detail(session, rate, rec, force=args.force) |
|
|
| if result is None: |
| skipped += 1 |
| continue |
|
|
| flight_id = int(rec["id"]) |
| shard = f"{flight_id // 1000:03d}" |
| shard_dir = DETAIL_DIR / shard |
| shard_dir.mkdir(parents=True, exist_ok=True) |
| dest = shard_dir / f"{flight_id:06d}.json" |
| |
| try: |
| dest.write_text( |
| json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8" |
| ) |
| ok += 1 |
| log.debug("Saved %s", dest.name) |
| except OSError as exc: |
| log.warning("Could not write %s: %s", dest, exc) |
| failed += 1 |
|
|
| if i % 25 == 0 or i == total: |
| log.info( |
| "Progress: %d/%d — ok=%d skipped=%d failed=%d", |
| i, |
| total, |
| ok, |
| skipped, |
| failed, |
| ) |
|
|
| log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|