ppak10 commited on
Commit
25c60c2
·
1 Parent(s): 87b687b

Adds source files.

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +7 -0
  2. .gitignore +72 -0
  3. README.md +86 -0
  4. pyproject.toml +6 -1
  5. scripts/clubs/01_scrape.py +359 -0
  6. scripts/designs/01_scrape.py +422 -0
  7. scripts/flights/01_scrape.py +397 -0
  8. scripts/glossary/01_scrape.py +301 -0
  9. scripts/manufacturers/01_scrape.py +387 -0
  10. scripts/motors/01_scrape.py +399 -0
  11. scripts/plans/01_scrape.py +324 -0
  12. scripts/products/01_scrape.py +409 -0
  13. scripts/reviews/01_scrape.py +391 -0
  14. source/clubs/detail/000001.json +3 -0
  15. source/clubs/detail/000002.json +3 -0
  16. source/clubs/detail/000003.json +3 -0
  17. source/clubs/detail/000021.json +3 -0
  18. source/clubs/detail/000022.json +3 -0
  19. source/clubs/detail/000023.json +3 -0
  20. source/clubs/detail/000026.json +3 -0
  21. source/clubs/detail/000027.json +3 -0
  22. source/clubs/detail/000028.json +3 -0
  23. source/clubs/detail/000029.json +3 -0
  24. source/clubs/detail/000030.json +3 -0
  25. source/clubs/detail/000031.json +3 -0
  26. source/clubs/detail/000032.json +3 -0
  27. source/clubs/detail/000033.json +3 -0
  28. source/clubs/detail/000034.json +3 -0
  29. source/clubs/detail/000035.json +3 -0
  30. source/clubs/detail/000036.json +3 -0
  31. source/clubs/detail/000037.json +3 -0
  32. source/clubs/detail/000038.json +3 -0
  33. source/clubs/detail/000039.json +3 -0
  34. source/clubs/detail/000040.json +3 -0
  35. source/clubs/detail/000041.json +3 -0
  36. source/clubs/detail/000042.json +3 -0
  37. source/clubs/detail/000043.json +3 -0
  38. source/clubs/detail/000044.json +3 -0
  39. source/clubs/detail/000045.json +3 -0
  40. source/clubs/detail/000046.json +3 -0
  41. source/clubs/detail/000047.json +3 -0
  42. source/clubs/detail/000048.json +3 -0
  43. source/clubs/detail/000049.json +3 -0
  44. source/clubs/detail/000050.json +3 -0
  45. source/clubs/detail/000051.json +3 -0
  46. source/clubs/detail/000052.json +3 -0
  47. source/clubs/detail/000053.json +3 -0
  48. source/clubs/detail/000054.json +3 -0
  49. source/clubs/detail/000055.json +3 -0
  50. source/clubs/detail/000056.json +3 -0
.gitattributes CHANGED
@@ -58,3 +58,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
58
  # Video files - compressed
59
  *.mp4 filter=lfs diff=lfs merge=lfs -text
60
  *.webm filter=lfs diff=lfs merge=lfs -text
61
+
62
+ # Dataset specific files
63
+ *.jsonl filter=lfs diff=lfs merge=lfs -text
64
+ *.json filter=lfs diff=lfs merge=lfs -text
65
+ *.ork filter=lfs diff=lfs merge=lfs -text
66
+ *.rkt filter=lfs diff=lfs merge=lfs -text
67
+ *.csv filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # macOS
2
+ .DS_Store
3
+ .AppleDouble
4
+ .LSOverride
5
+
6
+ # Python
7
+ __pycache__/
8
+ *.py[cod]
9
+ *$py.class
10
+ *.so
11
+ *.egg
12
+ *.egg-info/
13
+ dist/
14
+ build/
15
+ eggs/
16
+ parts/
17
+ var/
18
+ sdist/
19
+ wheels/
20
+ *.egg-link
21
+ .installed.cfg
22
+
23
+ # Virtual environments
24
+ .venv/
25
+ venv/
26
+ env/
27
+ ENV/
28
+
29
+ # uv
30
+ .uv/
31
+
32
+ # Distribution / packaging
33
+ MANIFEST
34
+
35
+ # Unit test / coverage
36
+ htmlcov/
37
+ .tox/
38
+ .nox/
39
+ .coverage
40
+ .coverage.*
41
+ .cache
42
+ nosetests.xml
43
+ coverage.xml
44
+ *.cover
45
+ *.py,cover
46
+ .hypothesis/
47
+ .pytest_cache/
48
+
49
+ # Jupyter Notebooks
50
+ .ipynb_checkpoints/
51
+ *.ipynb
52
+
53
+ # Environment variables
54
+ .env
55
+ .env.*
56
+
57
+ # IDE
58
+ .vscode/
59
+ .idea/
60
+ *.swp
61
+ *.swo
62
+
63
+ # Logs
64
+ *.log
65
+ logs/
66
+
67
+ # Data / output files
68
+ *.csv
69
+ *.parquet
70
+ *.jsonl
71
+ data/
72
+ output/
README.md CHANGED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # RocketReviews Dataset
2
+
3
+ A structured dataset scraped from [RocketReviews.com](https://www.rocketreviews.com) for use in AI/ML pipelines and vector databases.
4
+
5
+ ---
6
+
7
+ ## Collection Status
8
+
9
+ Legend: `[ ]` not started · `[~]` in progress · `[x]` complete
10
+
11
+ ### Primary Tables
12
+
13
+ | Table | Description | Script | Output | Status |
14
+ |-------|-------------|--------|--------|--------|
15
+ | `reviews` | Kit and product reviews with ratings and text sections | `scripts/reviews/01_scrape.py` | `source/reviews/` | `[~]` |
16
+ | `flights` | Member flight logs with conditions and notes | `scripts/flights/01_scrape.py` | `source/flights/` | `[~]` |
17
+ | `products` | Full product catalog (kits, motors, components, software, etc.) | `scripts/products/01_scrape.py` | `source/products/` | `[ ]` |
18
+ | `motors` | Motor specifications with thrust and performance data | `scripts/motors/01_scrape.py` | `source/motors/` | `[~]` |
19
+ | `designs` | OpenRocket (.ork) and RockSim (.rkt) design files | `scripts/designs/01_scrape.py` | `source/designs/` | `[~]` |
20
+ | `plans` | Rocket plans and build instructions | `scripts/plans/01_scrape.py` | `source/plans/` | `[~]` |
21
+ | `clubs` | Rocketry clubs directory | `scripts/clubs/01_scrape.py` | `source/clubs/` | `[~]` |
22
+ | `glossary` | Rocketry terms and definitions | `scripts/glossary/01_scrape.py` | `source/glossary/` | `[~]` |
23
+
24
+ ### Relational Tables
25
+
26
+ | Table | Description | Script | Output | Status | Used By |
27
+ |-------|-------------|--------|--------|--------|---------|
28
+ | `manufacturers` | Rocket kit and product manufacturers | `scripts/manufacturers/01_scrape.py` | `source/manufacturers/` | `[~]` | `products`, `reviews` |
29
+ | `designers` | Rocket designers linked to products | `scripts/designers/01_scrape.py` | `source/designers/` | `[ ]` | `products` |
30
+ | `contributors` | Site members who write reviews | `scripts/contributors/01_scrape.py` | `source/contributors/` | `[ ]` | `reviews` |
31
+
32
+ ### Lookup Tables
33
+
34
+ | Table | Description | Script | Output | Status | Used By |
35
+ |-------|-------------|--------|--------|--------|---------|
36
+ | `styles` | Rocket style classifications (Sport, Scale, Upscale, etc.) | `scripts/styles/01_scrape.py` | `source/styles/` | `[ ]` | `products` |
37
+ | `recovery_types` | Recovery method types (Parachute, Streamer, Glide, etc.) | `scripts/recovery_types/01_scrape.py` | `source/recovery_types/` | `[ ]` | `products` |
38
+ | `power_classes` | Motor power classifications (Low, Mid, High Power, etc.) | `scripts/power_classes/01_scrape.py` | `source/power_classes/` | `[ ]` | `products` |
39
+ | `cp_library` | Center of pressure data per product | `scripts/cp_library/01_scrape.py` | `source/cp_library/` | `[ ]` | `products` |
40
+
41
+ ---
42
+
43
+ ## Source Layout
44
+
45
+ ```
46
+ source/
47
+ reviews/
48
+ index.jsonl
49
+ detail/{id}.json
50
+ products/
51
+ index.jsonl
52
+ detail/{id}.json
53
+ motors/
54
+ index.jsonl
55
+ detail/{id}.json
56
+ designs/
57
+ index.jsonl
58
+ detail/{id}.json
59
+ files/
60
+ ork/
61
+ rkt/
62
+ flights/
63
+ index.jsonl
64
+ detail/{id}.json
65
+ plans/
66
+ index.jsonl
67
+ clubs/
68
+ index.jsonl
69
+ glossary/
70
+ index.jsonl
71
+ detail/{slug}.json
72
+ manufacturers/
73
+ index.jsonl
74
+ designers/
75
+ index.jsonl
76
+ contributors/
77
+ index.jsonl
78
+ styles/
79
+ index.jsonl
80
+ recovery_types/
81
+ index.jsonl
82
+ power_classes/
83
+ index.jsonl
84
+ cp_library/
85
+ index.jsonl
86
+ ```
pyproject.toml CHANGED
@@ -4,4 +4,9 @@ version = "0.1.0"
4
  description = "Add your description here"
5
  readme = "README.md"
6
  requires-python = ">=3.10"
7
- dependencies = []
 
 
 
 
 
 
4
  description = "Add your description here"
5
  readme = "README.md"
6
  requires-python = ">=3.10"
7
+ dependencies = [
8
+ "requests>=2.31.0",
9
+ "beautifulsoup4>=4.12.0",
10
+ "urllib3>=2.0.0",
11
+ "lxml>=4.9.0",
12
+ ]
scripts/clubs/01_scrape.py ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_clubs.py — Fetch the RocketReviews.com rocketry clubs index and scrape each
4
+ detail page, saving structured JSON to source/clubs/.
5
+
6
+ Output
7
+ ------
8
+ source/clubs/index.jsonl one record per club (raw index fields)
9
+ source/clubs/detail/{id}.json full parsed detail per club
10
+
11
+ Usage
12
+ -----
13
+ python scripts/clubs/01_scrape.py
14
+ python scripts/clubs/01_scrape.py --delay 1.0 --limit 10
15
+ python scripts/clubs/01_scrape.py --force # re-scrape existing files
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import argparse
21
+ import json
22
+ import logging
23
+ import re
24
+ import sys
25
+ import time
26
+ from datetime import datetime, timezone
27
+ from pathlib import Path
28
+ from typing import Optional
29
+ from urllib.parse import quote
30
+
31
+ import requests
32
+ from bs4 import BeautifulSoup
33
+ from requests.adapters import HTTPAdapter
34
+ from urllib3.util.retry import Retry
35
+
36
+ # ---------------------------------------------------------------------------
37
+ # Config
38
+ # ---------------------------------------------------------------------------
39
+
40
+ BASE_URL = "https://www.rocketreviews.com"
41
+ INDEX_URL = f"{BASE_URL}/index.php?action=clubs&lat=x&lon=x"
42
+ USER_AGENT = "RocketReviews-Dataset/1.0"
43
+ DEFAULT_DELAY = 1.0
44
+
45
+ ROOT = Path(__file__).parent.parent.parent
46
+ SOURCE_DIR = ROOT / "source" / "clubs"
47
+ DETAIL_DIR = SOURCE_DIR / "detail"
48
+
49
+ # ---------------------------------------------------------------------------
50
+ # Logging
51
+ # ---------------------------------------------------------------------------
52
+
53
+ logging.basicConfig(
54
+ level=logging.INFO,
55
+ format="%(asctime)s %(levelname)s %(message)s",
56
+ handlers=[logging.StreamHandler(sys.stdout)],
57
+ )
58
+ log = logging.getLogger(__name__)
59
+
60
+ # ---------------------------------------------------------------------------
61
+ # HTTP session
62
+ # ---------------------------------------------------------------------------
63
+
64
+
65
+ def _build_session() -> requests.Session:
66
+ s = requests.Session()
67
+ s.headers["User-Agent"] = USER_AGENT
68
+ retry = Retry(
69
+ total=3,
70
+ backoff_factor=2.0,
71
+ status_forcelist=[429, 500, 502, 503, 504],
72
+ allowed_methods=["GET"],
73
+ )
74
+ s.mount("https://", HTTPAdapter(max_retries=retry))
75
+ s.mount("http://", HTTPAdapter(max_retries=retry))
76
+ return s
77
+
78
+
79
+ class RateLimiter:
80
+ def __init__(self, delay: float) -> None:
81
+ self.delay = delay
82
+ self._last: float = 0.0
83
+
84
+ def wait(self) -> None:
85
+ elapsed = time.monotonic() - self._last
86
+ if elapsed < self.delay:
87
+ time.sleep(self.delay - elapsed)
88
+ self._last = time.monotonic()
89
+
90
+
91
+ # ---------------------------------------------------------------------------
92
+ # Parsing helpers
93
+ # ---------------------------------------------------------------------------
94
+
95
+ def _normalize_string(val: Optional[str]) -> Optional[str]:
96
+ if not val:
97
+ return None
98
+ val = val.strip()
99
+ if val in ("-Unknown-", "-", "", "Unknown"):
100
+ return None
101
+ return val
102
+
103
+ def _parse_index(html: str) -> list[dict]:
104
+ """
105
+ Parse the index.php?action=clubs page to extract the inline JSON array
106
+ containing the club index records.
107
+ """
108
+ m = re.search(r"data\s*=\s*(\[.*?\]);", html, re.DOTALL)
109
+ if not m:
110
+ log.warning("Could not find the clubs data array in the index HTML.")
111
+ return []
112
+
113
+ data_str = m.group(1)
114
+
115
+ # The JSON array embedded in the page uses single quotes, making it invalid strict JSON.
116
+ # We must manually parse or convert it to double quotes carefully.
117
+ # To be safe from inner single quotes, we can parse using an eval-like approach or regex substitution.
118
+ # Since we know the structure is a list of flat dicts, let's clean it up:
119
+
120
+ # Replace single quotes enclosing keys/values with double quotes, being careful of internal ones
121
+ def _repair_json(s: str) -> str:
122
+ # replace {' with {"
123
+ s = re.sub(r"\{\s*'", '{"', s)
124
+ # replace ': with ":
125
+ s = re.sub(r"'\s*:", '":', s)
126
+ # replace , ' with , "
127
+ s = re.sub(r",\s*'", ', "', s)
128
+ # replace ': ' with ": "
129
+ s = re.sub(r":\s*'", ': "', s)
130
+ # replace '} with "}
131
+ s = re.sub(r"'\s*\}", '"}', s)
132
+ # replace ', with ",
133
+ s = re.sub(r"'\s*,", '",', s)
134
+ return s
135
+
136
+ try:
137
+ # We might still have issues if there are apostrophes inside the strings.
138
+ # Python's ast.literal_eval is safer for single-quoted Python dict representations.
139
+ import ast
140
+ records = ast.literal_eval(data_str)
141
+ except Exception as exc:
142
+ log.warning("Failed to parse embedded JSON with ast.literal_eval, falling back to manual: %s", exc)
143
+ try:
144
+ records = json.loads(_repair_json(data_str))
145
+ except json.JSONDecodeError as exc2:
146
+ log.error("Failed to parse clubs JSON entirely: %s", exc2)
147
+ return []
148
+
149
+ cleaned_records = []
150
+ for rec in records:
151
+ club_id = int(rec["id"])
152
+ raw_title = rec.get("title", "")
153
+
154
+ # Determine affiliations based on logo tags embedded in the title string
155
+ has_nar = "NAR-Logo" in raw_title
156
+ has_tripoli = "Tripoli-Logo" in raw_title
157
+
158
+ # Clean title
159
+ title = raw_title.replace("NAR-Logo", "").replace("Tripoli-Logo", "").strip()
160
+
161
+ # The site dynamically constructs the redirect URL:
162
+ # index.php?autoredir={encoded_title}&action=displayclub&clubid={id}
163
+ url = f"{BASE_URL}/index.php?autoredir={quote(title)}&action=displayclub&clubid={club_id}"
164
+
165
+ cleaned_records.append({
166
+ "id": club_id,
167
+ "name": title,
168
+ "city": _normalize_string(rec.get("city")),
169
+ "state": _normalize_string(rec.get("state")),
170
+ "country": _normalize_string(rec.get("country")),
171
+ "has_nar": has_nar,
172
+ "has_tripoli": has_tripoli,
173
+ "url": url # Note: this is a redirect URL. Detail parsing will resolve the canonical URL.
174
+ })
175
+
176
+ return cleaned_records
177
+
178
+
179
+ def _parse_detail(html: str, index_rec: dict, resolved_url: str) -> dict:
180
+ """Merge index-level fields with full detail page information."""
181
+ soup = BeautifulSoup(html, "lxml")
182
+
183
+ data = {
184
+ **index_rec,
185
+ "url": resolved_url,
186
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
187
+ "nar_section": None,
188
+ "tripoli_prefecture": None,
189
+ "website": None,
190
+ "location": None,
191
+ "description": None
192
+ }
193
+
194
+ # Try to grab the exact clean title from the h2 (removes external link image)
195
+ h2 = soup.find('h2')
196
+ if h2:
197
+ data["name"] = _normalize_string(h2.get_text(strip=True))
198
+
199
+ ul = soup.find('ul', class_='mt-1')
200
+ if ul:
201
+ for li in ul.find_all('li'):
202
+ text = li.get_text(separator=' ', strip=True)
203
+ if ':' in text:
204
+ key, val = [part.strip() for part in text.split(':', 1)]
205
+ key_lower = key.lower()
206
+ if "nar" in key_lower:
207
+ try:
208
+ data["nar_section"] = int(re.sub(r"[^\d]", "", val))
209
+ except ValueError:
210
+ pass
211
+ elif "tripoli" in key_lower:
212
+ try:
213
+ data["tripoli_prefecture"] = int(re.sub(r"[^\d]", "", val))
214
+ except ValueError:
215
+ pass
216
+ elif "web site" in key_lower or "website" in key_lower:
217
+ data["website"] = val if val.startswith("http") else f"http://{val}"
218
+ elif "location" in key_lower:
219
+ data["location"] = val
220
+
221
+ # Try to get the descriptive text
222
+ # Usually it's in a <p> tag without a class containing the club name.
223
+ for p in soup.find_all('p'):
224
+ text = p.get_text(separator=' ', strip=True)
225
+ if data.get('name') and data['name'] in text and 'What You Can Do' not in text:
226
+ data['description'] = text
227
+ break
228
+
229
+ return data
230
+
231
+
232
+ # ---------------------------------------------------------------------------
233
+ # Fetch helpers
234
+ # ---------------------------------------------------------------------------
235
+
236
+
237
+ def fetch_index(session: requests.Session) -> list[dict]:
238
+ log.info("Fetching clubs index from %s", INDEX_URL)
239
+ resp = session.get(INDEX_URL, timeout=30)
240
+ resp.raise_for_status()
241
+ records = _parse_index(resp.text)
242
+ log.info("Index returned %d records.", len(records))
243
+ return records
244
+
245
+
246
+ def scrape_detail(
247
+ session: requests.Session,
248
+ rate: RateLimiter,
249
+ index_rec: dict,
250
+ force: bool = False,
251
+ ) -> Optional[dict]:
252
+ club_id = index_rec["id"]
253
+ dest = DETAIL_DIR / f"{club_id:06d}.json"
254
+
255
+ if dest.exists() and not force:
256
+ log.debug("Already scraped %s, skipping.", club_id)
257
+ return None
258
+
259
+ url = index_rec["url"]
260
+ rate.wait()
261
+
262
+ try:
263
+ # This request will automatically follow the index.php?autoredir= redirect
264
+ # to the clean SEO URL (e.g. /4-corners-rocketry-association.html)
265
+ resp = session.get(url, timeout=30)
266
+ resp.raise_for_status()
267
+ except requests.RequestException as exc:
268
+ log.warning("Failed to fetch club %s: %s", club_id, exc)
269
+ return None
270
+
271
+ return _parse_detail(resp.text, index_rec, resp.url)
272
+
273
+
274
+ # ---------------------------------------------------------------------------
275
+ # Main
276
+ # ---------------------------------------------------------------------------
277
+
278
+
279
+ def main() -> None:
280
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com rocketry clubs.")
281
+ parser.add_argument(
282
+ "--delay",
283
+ type=float,
284
+ default=DEFAULT_DELAY,
285
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
286
+ )
287
+ parser.add_argument(
288
+ "--limit",
289
+ type=int,
290
+ default=None,
291
+ help="Stop after scraping this many detail pages (useful for testing)",
292
+ )
293
+ parser.add_argument(
294
+ "--force",
295
+ action="store_true",
296
+ help="Re-scrape clubs that already have a saved detail file",
297
+ )
298
+ args = parser.parse_args()
299
+
300
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
301
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
302
+
303
+ session = _build_session()
304
+ rate = RateLimiter(args.delay)
305
+
306
+ # ------------------------------------------------------------------
307
+ # Step 1: fetch index and write index.jsonl
308
+ # ------------------------------------------------------------------
309
+ records = fetch_index(session)
310
+
311
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
312
+ index_path = SOURCE_DIR / "index.jsonl"
313
+ with index_path.open("w", encoding="utf-8") as f:
314
+ for rec in records:
315
+ f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n")
316
+ log.info("Wrote %d index records to %s", len(records), index_path)
317
+
318
+ # ------------------------------------------------------------------
319
+ # Step 2: scrape each detail page
320
+ # ------------------------------------------------------------------
321
+ if args.limit:
322
+ records = records[: args.limit]
323
+
324
+ ok = skipped = failed = 0
325
+ total = len(records)
326
+
327
+ for i, rec in enumerate(records, 1):
328
+ result = scrape_detail(session, rate, rec, force=args.force)
329
+
330
+ if result is None:
331
+ skipped += 1
332
+ continue
333
+
334
+ dest = DETAIL_DIR / f"{rec['id']:06d}.json"
335
+ try:
336
+ dest.write_text(
337
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
338
+ )
339
+ ok += 1
340
+ log.debug("Saved %s", dest.name)
341
+ except OSError as exc:
342
+ log.warning("Could not write %s: %s", dest, exc)
343
+ failed += 1
344
+
345
+ if i % 25 == 0 or i == total:
346
+ log.info(
347
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
348
+ i,
349
+ total,
350
+ ok,
351
+ skipped,
352
+ failed,
353
+ )
354
+
355
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
356
+
357
+
358
+ if __name__ == "__main__":
359
+ main()
scripts/designs/01_scrape.py ADDED
@@ -0,0 +1,422 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_designs.py — Fetch the RocketReviews.com OpenRocket and RockSim indexes
4
+ and scrape each detail page, downloading the binary design file and saving
5
+ structured JSON metadata to source/designs/.
6
+
7
+ Output
8
+ ------
9
+ source/designs/index.jsonl one record per design (raw index fields)
10
+ source/designs/detail/{id}.json full parsed metadata per design
11
+ source/designs/files/ork/{id}.ork downloaded OpenRocket files
12
+ source/designs/files/rkt/{id}.rkt downloaded RockSim files
13
+
14
+ Usage
15
+ -----
16
+ python scripts/designs/01_scrape.py
17
+ python scripts/designs/01_scrape.py --delay 1.0 --limit 10
18
+ python scripts/designs/01_scrape.py --force # re-scrape existing files
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ import argparse
24
+ import json
25
+ import logging
26
+ import re
27
+ import sys
28
+ import time
29
+ from datetime import datetime, timezone
30
+ from pathlib import Path
31
+ from typing import Optional, Tuple
32
+
33
+ import requests
34
+ from bs4 import BeautifulSoup
35
+ from requests.adapters import HTTPAdapter
36
+ from urllib3.util.retry import Retry
37
+
38
+ # ---------------------------------------------------------------------------
39
+ # Config
40
+ # ---------------------------------------------------------------------------
41
+
42
+ BASE_URL = "https://www.rocketreviews.com"
43
+ ENDPOINTS = [
44
+ {
45
+ "format": "openrocket",
46
+ "url": f"{BASE_URL}/data/openrocket/openrocket.php?search=&optimized=&type=",
47
+ "ext": "ork"
48
+ },
49
+ {
50
+ "format": "rocksim",
51
+ "url": f"{BASE_URL}/data/rocksim/rocksim.php?search=&optimized=&type=",
52
+ "ext": "rkt"
53
+ }
54
+ ]
55
+
56
+ USER_AGENT = "RocketReviews-Dataset/1.0"
57
+ DEFAULT_DELAY = 1.0
58
+
59
+ ROOT = Path(__file__).parent.parent.parent
60
+ SOURCE_DIR = ROOT / "source" / "designs"
61
+ DETAIL_DIR = SOURCE_DIR / "detail"
62
+ FILES_DIR = SOURCE_DIR / "files"
63
+
64
+ # ---------------------------------------------------------------------------
65
+ # Logging
66
+ # ---------------------------------------------------------------------------
67
+
68
+ logging.basicConfig(
69
+ level=logging.INFO,
70
+ format="%(asctime)s %(levelname)s %(message)s",
71
+ handlers=[logging.StreamHandler(sys.stdout)],
72
+ )
73
+ log = logging.getLogger(__name__)
74
+
75
+ # ---------------------------------------------------------------------------
76
+ # HTTP session
77
+ # ---------------------------------------------------------------------------
78
+
79
+
80
+ def _build_session() -> requests.Session:
81
+ s = requests.Session()
82
+ s.headers["User-Agent"] = USER_AGENT
83
+ retry = Retry(
84
+ total=3,
85
+ backoff_factor=2.0,
86
+ status_forcelist=[429, 500, 502, 503, 504],
87
+ allowed_methods=["GET"],
88
+ )
89
+ s.mount("https://", HTTPAdapter(max_retries=retry))
90
+ s.mount("http://", HTTPAdapter(max_retries=retry))
91
+ return s
92
+
93
+
94
+ class RateLimiter:
95
+ def __init__(self, delay: float) -> None:
96
+ self.delay = delay
97
+ self._last: float = 0.0
98
+
99
+ def wait(self) -> None:
100
+ elapsed = time.monotonic() - self._last
101
+ if elapsed < self.delay:
102
+ time.sleep(self.delay - elapsed)
103
+ self._last = time.monotonic()
104
+
105
+
106
+ # ---------------------------------------------------------------------------
107
+ # Parsing helpers
108
+ # ---------------------------------------------------------------------------
109
+
110
+ def _normalize_string(val: Optional[str]) -> Optional[str]:
111
+ if not val:
112
+ return None
113
+ val = val.strip()
114
+ if val in ("-Unknown-", "-", "", "Unknown"):
115
+ return None
116
+ return val
117
+
118
+ def _parse_measure(text: Optional[str]) -> Tuple[Optional[float], Optional[str]]:
119
+ """Parse '28.6470 inches from front' -> (28.647, 'Front')"""
120
+ if not text:
121
+ return None, None
122
+ m = re.search(r"([\d\.]+)\s+inches\s+from\s+(front|rear)", text, re.I)
123
+ if m:
124
+ return float(m.group(1)), m.group(2).title()
125
+ return None, None
126
+
127
+ def _parse_margin(text: Optional[str]) -> Tuple[Optional[float], Optional[str]]:
128
+ """Parse '4.03 Overstable' -> (4.03, 'Overstable')"""
129
+ if not text:
130
+ return None, None
131
+ m = re.search(r"([\d\.]+)\s+(\w+)", text)
132
+ if m:
133
+ return float(m.group(1)), m.group(2).title()
134
+ return None, None
135
+
136
+ def _extract_person(soup: BeautifulSoup, label: str) -> Optional[dict]:
137
+ """
138
+ Extract a person (Contributor or Designer) and their URL.
139
+ Checks for <strong>Designer:</strong> Name or Contributed by <a href="...">Name</a>
140
+ """
141
+ # Look for <strong>Label:</strong>
142
+ strong = soup.find("strong", string=re.compile(rf"{label}:?", re.I))
143
+ if strong:
144
+ # The name might be in the next sibling or a link
145
+ a_tag = strong.find_next_sibling("a")
146
+ if a_tag and a_tag.get("href"):
147
+ name = a_tag.get_text(strip=True)
148
+ url = a_tag["href"] if a_tag["href"].startswith("http") else f"{BASE_URL}{a_tag['href']}"
149
+ return {"name": _normalize_string(name), "url": url}
150
+ elif strong.next_sibling:
151
+ name = strong.next_sibling.get_text(strip=True) if hasattr(strong.next_sibling, "get_text") else str(strong.next_sibling).strip()
152
+ return {"name": _normalize_string(name), "url": None}
153
+
154
+ # Look for inline text like "Contributed by Name" or "Designer: Name"
155
+ text_node = soup.find(string=re.compile(rf"{label}", re.I))
156
+ if text_node and text_node.parent.name in ("p", "div"):
157
+ parent = text_node.parent
158
+ a_tag = parent.find("a")
159
+ if a_tag and a_tag.get("href"):
160
+ name = a_tag.get_text(strip=True)
161
+ url = a_tag["href"] if a_tag["href"].startswith("http") else f"{BASE_URL}{a_tag['href']}"
162
+ return {"name": _normalize_string(name), "url": url}
163
+
164
+ raw_text = parent.get_text(strip=True)
165
+ name = re.sub(rf"{label}(?: by)?:?\s*", "", raw_text, flags=re.I).strip()
166
+ return {"name": _normalize_string(name), "url": None}
167
+
168
+ return None
169
+
170
+ def _parse_detail(html: str, index_rec: dict, format_ext: str) -> dict:
171
+ soup = BeautifulSoup(html, "lxml")
172
+
173
+ # Find download URL
174
+ dl_link = soup.find('a', href=lambda h: h and ('/file-' in h))
175
+ file_url = None
176
+ if dl_link:
177
+ href = dl_link['href']
178
+ file_url = href if href.startswith("http") else f"{BASE_URL}{href}"
179
+
180
+ # Find external URL (e.g. Rocketry Forum, Ye Olde Rocket Forum)
181
+ external_url = None
182
+ for img in soup.find_all('img', src=lambda s: s and 'download.gif' in s):
183
+ a = img.find_parent('a')
184
+ if a and a.get('href') and '/file-' not in a['href']:
185
+ external_url = a['href']
186
+ break
187
+
188
+ if not external_url:
189
+ ext_link = soup.find('a', href=lambda h: h and 'rocketryforum.com' in h.lower())
190
+ external_url = ext_link['href'] if ext_link else None
191
+
192
+ # People
193
+ contributor = _extract_person(soup, "Contributed")
194
+ designer = _extract_person(soup, "Designer")
195
+
196
+ # Metrics
197
+ cg_raw, cp_raw, margin_raw, comments = None, None, None, None
198
+ for b in soup.find_all(['b', 'strong']):
199
+ text = b.get_text(strip=True)
200
+ if "CG:" in text:
201
+ cg_raw = b.next_sibling.strip() if b.next_sibling and isinstance(b.next_sibling, str) else b.parent.get_text(strip=True).replace("CG:", "").strip()
202
+ elif "CP:" in text:
203
+ cp_raw = b.next_sibling.strip() if b.next_sibling and isinstance(b.next_sibling, str) else b.parent.get_text(strip=True).replace("CP:", "").strip()
204
+ elif "Margin:" in text:
205
+ margin_raw = b.next_sibling.strip() if b.next_sibling and isinstance(b.next_sibling, str) else b.parent.get_text(strip=True).replace("Margin:", "").strip()
206
+ elif "Comments:" in text:
207
+ comments = b.next_sibling.strip() if b.next_sibling and isinstance(b.next_sibling, str) else b.parent.get_text(strip=True).replace("Comments:", "").strip()
208
+
209
+ cg_loc, cg_from = _parse_measure(cg_raw)
210
+ cp_loc, cp_from = _parse_measure(cp_raw)
211
+ margin_val, margin_status = _parse_margin(margin_raw)
212
+
213
+ design_id = int(index_rec["id"])
214
+ local_path = f"files/{format_ext}/{design_id:06d}.{format_ext}"
215
+
216
+ return {
217
+ "id": design_id,
218
+ "format": index_rec["format"],
219
+ "name": _normalize_string(index_rec.get("name")),
220
+ "title": _normalize_string(index_rec.get("title")),
221
+ "type": _normalize_string(index_rec.get("type")),
222
+ "optimized": _normalize_string(index_rec.get("optimized")),
223
+ "manufacturer": _normalize_string(index_rec.get("manufacturer")),
224
+ "added_date": index_rec.get("added"),
225
+ "url": index_rec.get("url"),
226
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
227
+ "contributor": contributor,
228
+ "designer": designer,
229
+ "comments": _normalize_string(comments),
230
+ "cg": {"location_in": cg_loc, "location_from": cg_from} if cg_loc is not None else None,
231
+ "cp": {"location_in": cp_loc, "location_from": cp_from} if cp_loc is not None else None,
232
+ "margin": margin_val,
233
+ "margin_status": margin_status,
234
+ "file_url": file_url,
235
+ "external_url": external_url,
236
+ "local_path": local_path if file_url else None
237
+ }
238
+
239
+
240
+ # ---------------------------------------------------------------------------
241
+ # Fetch helpers
242
+ # ---------------------------------------------------------------------------
243
+
244
+ def fetch_index(session: requests.Session, endpoint: dict) -> list[dict]:
245
+ log.info("Fetching %s index from %s", endpoint["format"], endpoint["url"])
246
+ resp = session.get(endpoint["url"], timeout=30)
247
+ resp.raise_for_status()
248
+ records = resp.json().get("records", [])
249
+
250
+ # Normalize index record to add format and construct full URL
251
+ normalized = []
252
+ for rec in records:
253
+ path = rec.get("url", "")
254
+ url = f"{BASE_URL}{path}" if path.startswith("/") else path
255
+ normalized.append({
256
+ **rec,
257
+ "format": endpoint["format"],
258
+ "url": url,
259
+ "_ext": endpoint["ext"] # internal use
260
+ })
261
+
262
+ log.info("Index returned %d records for %s.", len(normalized), endpoint["format"])
263
+ return normalized
264
+
265
+ def download_file(session: requests.Session, url: str, dest: Path) -> bool:
266
+ """Download a file streaming to disk. Returns True if successful."""
267
+ try:
268
+ with session.get(url, stream=True, timeout=30) as r:
269
+ r.raise_for_status()
270
+ with dest.open("wb") as f:
271
+ for chunk in r.iter_content(chunk_size=8192):
272
+ f.write(chunk)
273
+ return True
274
+ except requests.RequestException as exc:
275
+ log.warning("Failed to download file %s: %s", url, exc)
276
+ # Clean up partial file
277
+ if dest.exists():
278
+ dest.unlink()
279
+ return False
280
+
281
+ def scrape_detail(
282
+ session: requests.Session,
283
+ rate: RateLimiter,
284
+ index_rec: dict,
285
+ force: bool = False,
286
+ ) -> Optional[dict]:
287
+ design_id = int(index_rec["id"])
288
+ dest_json = DETAIL_DIR / f"{design_id:06d}.json"
289
+ format_ext = index_rec["_ext"]
290
+ dest_file = FILES_DIR / format_ext / f"{design_id:06d}.{format_ext}"
291
+
292
+ if dest_json.exists() and not force:
293
+ try:
294
+ with dest_json.open("r", encoding="utf-8") as f:
295
+ cached = json.load(f)
296
+ if cached.get("local_path") is None or dest_file.exists():
297
+ log.debug("Already scraped %s, skipping.", design_id)
298
+ return None
299
+ except (OSError, json.JSONDecodeError):
300
+ pass
301
+
302
+ url = index_rec["url"]
303
+ rate.wait()
304
+
305
+ try:
306
+ resp = session.get(url, timeout=30)
307
+ resp.raise_for_status()
308
+ except requests.RequestException as exc:
309
+ log.warning("Failed to fetch design %s: %s", design_id, exc)
310
+ return None
311
+
312
+ # Parse metadata
313
+ metadata = _parse_detail(resp.text, index_rec, format_ext)
314
+
315
+ # Download file if URL found
316
+ file_url = metadata.get("file_url")
317
+ if file_url:
318
+ rate.wait()
319
+ success = download_file(session, file_url, dest_file)
320
+ if not success:
321
+ log.warning("Could not download binary file for %s, skipping metadata.", design_id)
322
+ return None
323
+ elif metadata.get("external_url"):
324
+ log.info("No direct file for %s, but external link found. Saving metadata.", design_id)
325
+ else:
326
+ log.warning("No file download link or external link found for %s on page %s", design_id, url)
327
+
328
+ return metadata
329
+
330
+
331
+ # ---------------------------------------------------------------------------
332
+ # Main
333
+ # ---------------------------------------------------------------------------
334
+
335
+ def main() -> None:
336
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com designs (OpenRocket & RockSim).")
337
+ parser.add_argument(
338
+ "--delay",
339
+ type=float,
340
+ default=DEFAULT_DELAY,
341
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
342
+ )
343
+ parser.add_argument(
344
+ "--limit",
345
+ type=int,
346
+ default=None,
347
+ help="Stop after scraping this many detail pages (useful for testing)",
348
+ )
349
+ parser.add_argument(
350
+ "--force",
351
+ action="store_true",
352
+ help="Re-scrape designs that already have saved files",
353
+ )
354
+ args = parser.parse_args()
355
+
356
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
357
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
358
+ (FILES_DIR / "ork").mkdir(parents=True, exist_ok=True)
359
+ (FILES_DIR / "rkt").mkdir(parents=True, exist_ok=True)
360
+
361
+ session = _build_session()
362
+ rate = RateLimiter(args.delay)
363
+
364
+ # ------------------------------------------------------------------
365
+ # Step 1: fetch indexes and write index.jsonl
366
+ # ------------------------------------------------------------------
367
+ all_records = []
368
+ for endpoint in ENDPOINTS:
369
+ records = fetch_index(session, endpoint)
370
+ all_records.extend(records)
371
+
372
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
373
+ index_path = SOURCE_DIR / "index.jsonl"
374
+ with index_path.open("w", encoding="utf-8") as f:
375
+ for rec in all_records:
376
+ # Strip out internal use `_ext` before writing to index.jsonl
377
+ out_rec = {k: v for k, v in rec.items() if k != "_ext"}
378
+ f.write(json.dumps({**out_rec, "scraped_at": scraped_at}) + "\n")
379
+ log.info("Wrote %d total index records to %s", len(all_records), index_path)
380
+
381
+ # ------------------------------------------------------------------
382
+ # Step 2: scrape each detail page & download file
383
+ # ------------------------------------------------------------------
384
+ if args.limit:
385
+ all_records = all_records[: args.limit]
386
+
387
+ ok = skipped = failed = 0
388
+ total = len(all_records)
389
+
390
+ for i, rec in enumerate(all_records, 1):
391
+ result = scrape_detail(session, rate, rec, force=args.force)
392
+
393
+ if result is None:
394
+ skipped += 1
395
+ continue
396
+
397
+ dest = DETAIL_DIR / f"{int(rec['id']):06d}.json"
398
+ try:
399
+ dest.write_text(
400
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
401
+ )
402
+ ok += 1
403
+ log.debug("Saved metadata %s", dest.name)
404
+ except OSError as exc:
405
+ log.warning("Could not write %s: %s", dest, exc)
406
+ failed += 1
407
+
408
+ if i % 25 == 0 or i == total:
409
+ log.info(
410
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
411
+ i,
412
+ total,
413
+ ok,
414
+ skipped,
415
+ failed,
416
+ )
417
+
418
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
419
+
420
+
421
+ if __name__ == "__main__":
422
+ main()
scripts/flights/01_scrape.py ADDED
@@ -0,0 +1,397 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_flights.py — Fetch all flight logs from RocketReviews.com and scrape
4
+ each detail page, saving structured JSON to source/flights/.
5
+
6
+ The index is fetched from a single date-range endpoint that returns all
7
+ flights at once. Detail pages are scraped only for flights that have an
8
+ image/detail URL. Flights without a detail page are saved with index fields
9
+ only and detail fields set to null.
10
+
11
+ Output
12
+ ------
13
+ source/flights/index.jsonl one record per flight (raw index fields)
14
+ source/flights/detail/{id}.json full parsed detail per flight
15
+
16
+ Usage
17
+ -----
18
+ python scripts/flights/01_scrape.py
19
+ python scripts/flights/01_scrape.py --delay 1.0 --limit 10
20
+ python scripts/flights/01_scrape.py --force # re-scrape existing files
21
+ """
22
+
23
+ from __future__ import annotations
24
+
25
+ import argparse
26
+ import json
27
+ import logging
28
+ import re
29
+ import sys
30
+ import time
31
+ from datetime import datetime, timezone
32
+ from pathlib import Path
33
+ from typing import Optional
34
+
35
+ import requests
36
+ from bs4 import BeautifulSoup, Tag
37
+ from requests.adapters import HTTPAdapter
38
+ from urllib3.util.retry import Retry
39
+
40
+ # ---------------------------------------------------------------------------
41
+ # Config
42
+ # ---------------------------------------------------------------------------
43
+
44
+ BASE_URL = "https://www.rocketreviews.com"
45
+ USER_AGENT = "RocketReviews-Dataset/1.0"
46
+ DEFAULT_DELAY = 1.0
47
+
48
+ # Fetch all flights from the earliest recorded date to today
49
+ _TODAY = datetime.now().strftime("%Y-%m-%d")
50
+ INDEX_URL = f"{BASE_URL}/data/flightlog/flightdates.php?search=2000-01-01:{_TODAY}"
51
+
52
+ ROOT = Path(__file__).parent.parent.parent
53
+ SOURCE_DIR = ROOT / "source" / "flights"
54
+ DETAIL_DIR = SOURCE_DIR / "detail"
55
+
56
+ # ---------------------------------------------------------------------------
57
+ # Logging
58
+ # ---------------------------------------------------------------------------
59
+
60
+ logging.basicConfig(
61
+ level=logging.INFO,
62
+ format="%(asctime)s %(levelname)s %(message)s",
63
+ handlers=[logging.StreamHandler(sys.stdout)],
64
+ )
65
+ log = logging.getLogger(__name__)
66
+
67
+ # ---------------------------------------------------------------------------
68
+ # HTTP session
69
+ # ---------------------------------------------------------------------------
70
+
71
+
72
+ def _build_session() -> requests.Session:
73
+ s = requests.Session()
74
+ s.headers["User-Agent"] = USER_AGENT
75
+ retry = Retry(
76
+ total=3,
77
+ backoff_factor=2.0,
78
+ status_forcelist=[429, 500, 502, 503, 504],
79
+ allowed_methods=["GET"],
80
+ )
81
+ s.mount("https://", HTTPAdapter(max_retries=retry))
82
+ s.mount("http://", HTTPAdapter(max_retries=retry))
83
+ return s
84
+
85
+
86
+ class RateLimiter:
87
+ def __init__(self, delay: float) -> None:
88
+ self.delay = delay
89
+ self._last: float = 0.0
90
+
91
+ def wait(self) -> None:
92
+ elapsed = time.monotonic() - self._last
93
+ if elapsed < self.delay:
94
+ time.sleep(self.delay - elapsed)
95
+ self._last = time.monotonic()
96
+
97
+
98
+ # ---------------------------------------------------------------------------
99
+ # Parsing helpers
100
+ # ---------------------------------------------------------------------------
101
+
102
+
103
+ def _re_first(pattern: str, text: str, group: int = 1) -> Optional[str]:
104
+ m = re.search(pattern, text)
105
+ return m.group(group) if m else None
106
+
107
+
108
+ def _after_strong(soup: BeautifulSoup, label: str) -> Optional[str]:
109
+ """
110
+ Find '<strong>{label}:</strong>' and return the text immediately after it,
111
+ stopping at the next <strong> tag.
112
+ """
113
+ strong = soup.find("strong", string=re.compile(rf"^{label}:?$", re.I))
114
+ if not strong:
115
+ return None
116
+ text = ""
117
+ for sib in strong.next_siblings:
118
+ if isinstance(sib, Tag) and sib.name == "strong":
119
+ break
120
+ text += sib.get_text() if isinstance(sib, Tag) else str(sib)
121
+ return text.strip() or None
122
+
123
+
124
+ def _parse_altitude(raw: Optional[str]) -> Optional[float]:
125
+ """
126
+ Parse altitude from the raw index string. Returns feet as a float or None.
127
+
128
+ Handles:
129
+ "16,504 Feet" -> 16504.0
130
+ "600-700 Feet" -> 650.0 (midpoint of range)
131
+ "-" -> None
132
+ """
133
+ if not raw or raw.strip() == "-":
134
+ return None
135
+ cleaned = raw.replace(",", "").lower()
136
+ # Range: "600-700 feet" -> midpoint
137
+ m = re.search(r"(\d+(?:\.\d+)?)\s*-\s*(\d+(?:\.\d+)?)", cleaned)
138
+ if m:
139
+ return (float(m.group(1)) + float(m.group(2))) / 2
140
+ # Single value
141
+ m = re.search(r"(\d+(?:\.\d+)?)", cleaned)
142
+ return float(m.group(1)) if m else None
143
+
144
+
145
+ def _parse_conditions(soup: BeautifulSoup) -> dict:
146
+ """Extract weather conditions from labeled strong fields."""
147
+ wind_speed_raw = _after_strong(soup, "Wind Speed")
148
+ wind_speed = None
149
+ if wind_speed_raw:
150
+ m = re.search(r"(\d+(?:\.\d+)?)", wind_speed_raw)
151
+ wind_speed = float(m.group(1)) if m else None
152
+
153
+ wind_dir_raw = _after_strong(soup, "Wind Direction")
154
+ wind_dir = None
155
+ if wind_dir_raw:
156
+ # "From the WSW" -> "WSW"
157
+ m = re.search(r"from\s+the\s+(\w+)", wind_dir_raw, re.I)
158
+ wind_dir = m.group(1).upper() if m else wind_dir_raw.strip()
159
+
160
+ temp_raw = _after_strong(soup, "Temperature")
161
+ temp = None
162
+ if temp_raw:
163
+ m = re.search(r"(\d+(?:\.\d+)?)", temp_raw)
164
+ temp = float(m.group(1)) if m else None
165
+
166
+ return {
167
+ "wind_speed_mph": wind_speed,
168
+ "wind_direction": wind_dir,
169
+ "temperature_f": temp,
170
+ }
171
+
172
+
173
+ def _parse_notes(soup: BeautifulSoup) -> Optional[str]:
174
+ """
175
+ Extract the narrative flight description. Notes appear as plain <p> tags
176
+ in the main content area with no section heading.
177
+ """
178
+ paragraphs = [
179
+ p.get_text(strip=True)
180
+ for p in soup.find_all("p")
181
+ if len(p.get_text(strip=True)) > 20
182
+ ]
183
+ return " ".join(paragraphs) if paragraphs else None
184
+
185
+
186
+ def _parse_rocket_url(html: str) -> Optional[str]:
187
+ """
188
+ Extract the URL for the flyer's specific rocket instance.
189
+ These use a 12-digit timestamp-based suffix, e.g.:
190
+ /aerospace-specialty-products-sky-ferry-250708140332.html
191
+ """
192
+ m = re.search(r'href="(/[a-z][a-z0-9-]+-\d{10,}\.html)"', html)
193
+ return f"{BASE_URL}{m.group(1)}" if m else None
194
+
195
+
196
+ def _parse_motor_url(html: str, motors: str) -> Optional[str]:
197
+ """
198
+ Extract the motor product page URL using the first motor designation
199
+ as a search anchor. Motor URLs embed the designation slug, e.g.:
200
+ /estes-d12-5112.html for motor "D12-5"
201
+ """
202
+ if not motors:
203
+ return None
204
+ # Use the first motor designation (split on + for multi-motor configs)
205
+ first_motor = motors.split("+")[0].strip()
206
+ slug = re.sub(r"[^a-z0-9]+", "-", first_motor.lower()).strip("-")
207
+ # Match a link href containing the motor slug prefix
208
+ m = re.search(
209
+ rf'href="(/[a-z0-9-]*{re.escape(slug[:6])}[a-z0-9-]*\.html)"',
210
+ html,
211
+ )
212
+ return f"{BASE_URL}{m.group(1)}" if m else None
213
+
214
+
215
+ def _parse_detail(html: str, index_rec: dict) -> dict:
216
+ """Parse a flight detail page into a structured record."""
217
+ soup = BeautifulSoup(html, "lxml")
218
+ image_path = index_rec.get("image", "")
219
+
220
+ return {
221
+ "id": int(index_rec["id"]),
222
+ "url": f"{BASE_URL}{image_path}" if image_path else None,
223
+ "date": index_rec.get("date"),
224
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
225
+ "flyer": index_rec.get("flyer"),
226
+ "rocket": index_rec.get("rocket"),
227
+ "rocket_url": _parse_rocket_url(html),
228
+ "kit": index_rec.get("kit") or None,
229
+ "motors": index_rec.get("motors") or None,
230
+ "motor_url": _parse_motor_url(html, index_rec.get("motors", "")),
231
+ "altitude_raw": index_rec.get("altitude") or None,
232
+ "altitude_ft": _parse_altitude(index_rec.get("altitude")),
233
+ "launch_site": _after_strong(soup, "Launch Site"),
234
+ "conditions": _parse_conditions(soup),
235
+ "notes": _parse_notes(soup),
236
+ }
237
+
238
+
239
+ def _index_only_record(index_rec: dict) -> dict:
240
+ """
241
+ Build a detail record from index fields only for flights that have no
242
+ detail page. Detail-only fields are set to null.
243
+ """
244
+ return {
245
+ "id": int(index_rec["id"]),
246
+ "url": None,
247
+ "date": index_rec.get("date"),
248
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
249
+ "flyer": index_rec.get("flyer"),
250
+ "rocket": index_rec.get("rocket"),
251
+ "rocket_url": None,
252
+ "kit": index_rec.get("kit") or None,
253
+ "motors": index_rec.get("motors") or None,
254
+ "motor_url": None,
255
+ "altitude_raw": index_rec.get("altitude") or None,
256
+ "altitude_ft": _parse_altitude(index_rec.get("altitude")),
257
+ "launch_site": None,
258
+ "conditions": {
259
+ "wind_speed_mph": None,
260
+ "wind_direction": None,
261
+ "temperature_f": None,
262
+ },
263
+ "notes": None,
264
+ }
265
+
266
+
267
+ # ---------------------------------------------------------------------------
268
+ # Fetch helpers
269
+ # ---------------------------------------------------------------------------
270
+
271
+
272
+ def fetch_index(session: requests.Session) -> list[dict]:
273
+ log.info("Fetching flight index from %s", INDEX_URL)
274
+ resp = session.get(INDEX_URL, timeout=30)
275
+ resp.raise_for_status()
276
+ records = resp.json().get("records", [])
277
+ log.info("Index returned %d records.", len(records))
278
+ return records
279
+
280
+
281
+ def scrape_detail(
282
+ session: requests.Session,
283
+ rate: RateLimiter,
284
+ index_rec: dict,
285
+ force: bool = False,
286
+ ) -> dict:
287
+ flight_id = index_rec["id"]
288
+ dest = DETAIL_DIR / f"{int(flight_id):06d}.json"
289
+
290
+ if dest.exists() and not force:
291
+ log.debug("Already scraped %s, skipping.", flight_id)
292
+ return None
293
+
294
+ image_path = index_rec.get("image", "")
295
+ if not image_path:
296
+ # No detail page — save index-only record without making a request
297
+ return _index_only_record(index_rec)
298
+
299
+ url = f"{BASE_URL}{image_path}"
300
+ rate.wait()
301
+
302
+ try:
303
+ resp = session.get(url, timeout=30)
304
+ resp.raise_for_status()
305
+ except requests.RequestException as exc:
306
+ log.warning("Failed to fetch flight %s: %s", flight_id, exc)
307
+ return _index_only_record(index_rec)
308
+
309
+ return _parse_detail(resp.text, index_rec)
310
+
311
+
312
+ # ---------------------------------------------------------------------------
313
+ # Main
314
+ # ---------------------------------------------------------------------------
315
+
316
+
317
+ def main() -> None:
318
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com flight logs.")
319
+ parser.add_argument(
320
+ "--delay",
321
+ type=float,
322
+ default=DEFAULT_DELAY,
323
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
324
+ )
325
+ parser.add_argument(
326
+ "--limit",
327
+ type=int,
328
+ default=None,
329
+ help="Stop after scraping this many records (useful for testing)",
330
+ )
331
+ parser.add_argument(
332
+ "--force",
333
+ action="store_true",
334
+ help="Re-scrape flights that already have a saved detail file",
335
+ )
336
+ args = parser.parse_args()
337
+
338
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
339
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
340
+
341
+ session = _build_session()
342
+ rate = RateLimiter(args.delay)
343
+
344
+ # ------------------------------------------------------------------
345
+ # Step 1: fetch and write the full index
346
+ # ------------------------------------------------------------------
347
+ records = fetch_index(session)
348
+
349
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
350
+ index_path = SOURCE_DIR / "index.jsonl"
351
+ with index_path.open("w", encoding="utf-8") as f:
352
+ for rec in records:
353
+ f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n")
354
+ log.info("Wrote %d index records to %s", len(records), index_path)
355
+
356
+ # ------------------------------------------------------------------
357
+ # Step 2: scrape each detail page
358
+ # ------------------------------------------------------------------
359
+ if args.limit:
360
+ records = records[: args.limit]
361
+
362
+ ok = skipped = failed = 0
363
+ total = len(records)
364
+
365
+ for i, rec in enumerate(records, 1):
366
+ result = scrape_detail(session, rate, rec, force=args.force)
367
+
368
+ if result is None:
369
+ skipped += 1
370
+ continue
371
+
372
+ dest = DETAIL_DIR / f"{int(rec['id']):06d}.json"
373
+ try:
374
+ dest.write_text(
375
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
376
+ )
377
+ ok += 1
378
+ log.debug("Saved %s", dest.name)
379
+ except OSError as exc:
380
+ log.warning("Could not write %s: %s", dest, exc)
381
+ failed += 1
382
+
383
+ if i % 25 == 0 or i == total:
384
+ log.info(
385
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
386
+ i,
387
+ total,
388
+ ok,
389
+ skipped,
390
+ failed,
391
+ )
392
+
393
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
394
+
395
+
396
+ if __name__ == "__main__":
397
+ main()
scripts/glossary/01_scrape.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_glossary.py — Fetch the RocketReviews.com glossary index and scrape each
4
+ detail page, saving structured JSON to source/glossary/.
5
+
6
+ Output
7
+ ------
8
+ source/glossary/index.jsonl one record per term (raw index fields)
9
+ source/glossary/detail/{slug}.json full parsed detail per term
10
+
11
+ Usage
12
+ -----
13
+ python scripts/glossary/01_scrape.py
14
+ python scripts/glossary/01_scrape.py --delay 1.0 --limit 10
15
+ python scripts/glossary/01_scrape.py --force # re-scrape existing files
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import argparse
21
+ import json
22
+ import logging
23
+ import re
24
+ import sys
25
+ import time
26
+ from datetime import datetime, timezone
27
+ from pathlib import Path
28
+ from typing import Optional
29
+
30
+ import requests
31
+ from bs4 import BeautifulSoup, Tag
32
+ from requests.adapters import HTTPAdapter
33
+ from urllib3.util.retry import Retry
34
+
35
+ # ---------------------------------------------------------------------------
36
+ # Config
37
+ # ---------------------------------------------------------------------------
38
+
39
+ BASE_URL = "https://www.rocketreviews.com"
40
+ INDEX_URL = f"{BASE_URL}/glossary.html"
41
+ USER_AGENT = "RocketReviews-Dataset/1.0"
42
+ DEFAULT_DELAY = 1.0
43
+
44
+ ROOT = Path(__file__).parent.parent.parent
45
+ SOURCE_DIR = ROOT / "source" / "glossary"
46
+ DETAIL_DIR = SOURCE_DIR / "detail"
47
+
48
+ # ---------------------------------------------------------------------------
49
+ # Logging
50
+ # ---------------------------------------------------------------------------
51
+
52
+ logging.basicConfig(
53
+ level=logging.INFO,
54
+ format="%(asctime)s %(levelname)s %(message)s",
55
+ handlers=[logging.StreamHandler(sys.stdout)],
56
+ )
57
+ log = logging.getLogger(__name__)
58
+
59
+ # ---------------------------------------------------------------------------
60
+ # HTTP session
61
+ # ---------------------------------------------------------------------------
62
+
63
+
64
+ def _build_session() -> requests.Session:
65
+ s = requests.Session()
66
+ s.headers["User-Agent"] = USER_AGENT
67
+ retry = Retry(
68
+ total=3,
69
+ backoff_factor=2.0,
70
+ status_forcelist=[429, 500, 502, 503, 504],
71
+ allowed_methods=["GET"],
72
+ )
73
+ s.mount("https://", HTTPAdapter(max_retries=retry))
74
+ s.mount("http://", HTTPAdapter(max_retries=retry))
75
+ return s
76
+
77
+
78
+ class RateLimiter:
79
+ def __init__(self, delay: float) -> None:
80
+ self.delay = delay
81
+ self._last: float = 0.0
82
+
83
+ def wait(self) -> None:
84
+ elapsed = time.monotonic() - self._last
85
+ if elapsed < self.delay:
86
+ time.sleep(self.delay - elapsed)
87
+ self._last = time.monotonic()
88
+
89
+
90
+ # ---------------------------------------------------------------------------
91
+ # Index parsing
92
+ # ---------------------------------------------------------------------------
93
+
94
+
95
+ def _slug_from_path(path: str) -> str:
96
+ """
97
+ Extract the slug from a url path.
98
+ e.g. '/1010-extrusion-230702231652.html' -> '1010-extrusion'
99
+ """
100
+ name = path.lstrip("/").removesuffix(".html")
101
+ name = re.sub(r"-\d{10,}$", "", name)
102
+ return name
103
+
104
+
105
+ def _parse_index(html: str) -> list[dict]:
106
+ """
107
+ Parse the glossary.html static page and return one record
108
+ per term with name and url.
109
+ """
110
+ soup = BeautifulSoup(html, "lxml")
111
+ records = []
112
+
113
+ # Glossary terms are usually in <a class="entry">
114
+ for link in soup.find_all("a", class_="entry"):
115
+ path = link.get("href")
116
+ if not path or not path.endswith(".html"):
117
+ continue
118
+
119
+ name = link.get_text(strip=True)
120
+ if not name:
121
+ continue
122
+
123
+ slug = _slug_from_path(path)
124
+ url = f"{BASE_URL}{path}" if path.startswith("/") else path
125
+
126
+ # Find the short description in the following blockquote
127
+ description = None
128
+ parent_p = link.find_parent("p")
129
+ if parent_p:
130
+ blockquote = parent_p.find_next_sibling("blockquote")
131
+ if blockquote:
132
+ # Remove the [Read More] link text
133
+ read_more = blockquote.find("a", string=re.compile(r"Read More", re.I))
134
+ if read_more:
135
+ # Also try to remove the surrounding brackets if they exist as text nodes
136
+ for sib in read_more.previous_siblings:
137
+ if isinstance(sib, str) and "[" in sib:
138
+ sib.replace_with(sib.replace("[", ""))
139
+ for sib in read_more.next_siblings:
140
+ if isinstance(sib, str) and "]" in sib:
141
+ sib.replace_with(sib.replace("]", ""))
142
+ read_more.extract()
143
+ description = blockquote.get_text(separator=" ", strip=True) or None
144
+
145
+ records.append({
146
+ "slug": slug,
147
+ "url": url,
148
+ "term": name,
149
+ "short_description": description,
150
+ })
151
+
152
+ return records
153
+
154
+
155
+ # ---------------------------------------------------------------------------
156
+ # Detail page parsing
157
+ # ---------------------------------------------------------------------------
158
+
159
+
160
+ def _parse_detail(html: str, index_rec: dict) -> dict:
161
+ """Extract description from the detail page."""
162
+ soup = BeautifulSoup(html, "lxml")
163
+
164
+ article = soup.find("div", class_="article")
165
+ description = None
166
+ if article:
167
+ description = article.get_text(separator="\n", strip=True)
168
+
169
+ return {
170
+ **index_rec,
171
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
172
+ "description": description,
173
+ }
174
+
175
+
176
+ # ---------------------------------------------------------------------------
177
+ # Fetch helpers
178
+ # ---------------------------------------------------------------------------
179
+
180
+
181
+ def fetch_index(session: requests.Session) -> list[dict]:
182
+ log.info("Fetching glossary index from %s", INDEX_URL)
183
+ resp = session.get(INDEX_URL, timeout=30)
184
+ resp.raise_for_status()
185
+ records = _parse_index(resp.text)
186
+ log.info("Index returned %d records.", len(records))
187
+ return records
188
+
189
+
190
+ def scrape_detail(
191
+ session: requests.Session,
192
+ rate: RateLimiter,
193
+ index_rec: dict,
194
+ force: bool = False,
195
+ ) -> Optional[dict]:
196
+ slug = index_rec["slug"]
197
+ dest = DETAIL_DIR / f"{slug}.json"
198
+
199
+ if dest.exists() and not force:
200
+ log.debug("Already scraped %s, skipping.", slug)
201
+ return None
202
+
203
+ url = index_rec["url"]
204
+ rate.wait()
205
+
206
+ try:
207
+ resp = session.get(url, timeout=30)
208
+ resp.raise_for_status()
209
+ except requests.RequestException as exc:
210
+ log.warning("Failed to fetch glossary term %s: %s", slug, exc)
211
+ return None
212
+
213
+ return _parse_detail(resp.text, index_rec)
214
+
215
+
216
+ # ---------------------------------------------------------------------------
217
+ # Main
218
+ # ---------------------------------------------------------------------------
219
+
220
+
221
+ def main() -> None:
222
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com glossary.")
223
+ parser.add_argument(
224
+ "--delay",
225
+ type=float,
226
+ default=DEFAULT_DELAY,
227
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
228
+ )
229
+ parser.add_argument(
230
+ "--limit",
231
+ type=int,
232
+ default=None,
233
+ help="Stop after scraping this many detail pages (useful for testing)",
234
+ )
235
+ parser.add_argument(
236
+ "--force",
237
+ action="store_true",
238
+ help="Re-scrape terms that already have a saved detail file",
239
+ )
240
+ args = parser.parse_args()
241
+
242
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
243
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
244
+
245
+ session = _build_session()
246
+ rate = RateLimiter(args.delay)
247
+
248
+ # ------------------------------------------------------------------
249
+ # Step 1: fetch index and write index.jsonl
250
+ # ------------------------------------------------------------------
251
+ records = fetch_index(session)
252
+
253
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
254
+ index_path = SOURCE_DIR / "index.jsonl"
255
+ with index_path.open("w", encoding="utf-8") as f:
256
+ for rec in records:
257
+ f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n")
258
+ log.info("Wrote %d index records to %s", len(records), index_path)
259
+
260
+ # ------------------------------------------------------------------
261
+ # Step 2: scrape each detail page
262
+ # ------------------------------------------------------------------
263
+ if args.limit:
264
+ records = records[: args.limit]
265
+
266
+ ok = skipped = failed = 0
267
+ total = len(records)
268
+
269
+ for i, rec in enumerate(records, 1):
270
+ result = scrape_detail(session, rate, rec, force=args.force)
271
+
272
+ if result is None:
273
+ skipped += 1
274
+ continue
275
+
276
+ dest = DETAIL_DIR / f"{rec['slug']}.json"
277
+ try:
278
+ dest.write_text(
279
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
280
+ )
281
+ ok += 1
282
+ log.debug("Saved %s", dest.name)
283
+ except OSError as exc:
284
+ log.warning("Could not write %s: %s", dest, exc)
285
+ failed += 1
286
+
287
+ if i % 25 == 0 or i == total:
288
+ log.info(
289
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
290
+ i,
291
+ total,
292
+ ok,
293
+ skipped,
294
+ failed,
295
+ )
296
+
297
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
298
+
299
+
300
+ if __name__ == "__main__":
301
+ main()
scripts/manufacturers/01_scrape.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_manufacturers.py — Scrape the RocketReviews.com manufacturers list and
4
+ each detail page, saving structured JSON to source/manufacturers/.
5
+
6
+ The index is a single static HTML page (/manufacturers-by-name.html) with
7
+ alphabetical sections. Detail pages add AKA names, operational dates, and
8
+ product listings.
9
+
10
+ Output
11
+ ------
12
+ source/manufacturers/index.jsonl one record per manufacturer
13
+ source/manufacturers/detail/{slug}.json full parsed detail per manufacturer
14
+
15
+ Usage
16
+ -----
17
+ python scripts/manufacturers/01_scrape.py
18
+ python scripts/manufacturers/01_scrape.py --delay 1.0 --limit 10
19
+ python scripts/manufacturers/01_scrape.py --force # re-scrape existing files
20
+ """
21
+
22
+ from __future__ import annotations
23
+
24
+ import argparse
25
+ import json
26
+ import logging
27
+ import re
28
+ import sys
29
+ import time
30
+ from datetime import datetime, timezone
31
+ from pathlib import Path
32
+ from typing import Optional
33
+
34
+ import requests
35
+ from bs4 import BeautifulSoup, NavigableString, Tag
36
+ from requests.adapters import HTTPAdapter
37
+ from urllib3.util.retry import Retry
38
+
39
+ # ---------------------------------------------------------------------------
40
+ # Config
41
+ # ---------------------------------------------------------------------------
42
+
43
+ BASE_URL = "https://www.rocketreviews.com"
44
+ INDEX_PATH = "/manufacturers-by-name.html"
45
+ USER_AGENT = "RocketReviews-Dataset/1.0"
46
+ DEFAULT_DELAY = 1.0
47
+
48
+ ROOT = Path(__file__).parent.parent.parent
49
+ SOURCE_DIR = ROOT / "source" / "manufacturers"
50
+ DETAIL_DIR = SOURCE_DIR / "detail"
51
+
52
+ # ---------------------------------------------------------------------------
53
+ # Logging
54
+ # ---------------------------------------------------------------------------
55
+
56
+ logging.basicConfig(
57
+ level=logging.INFO,
58
+ format="%(asctime)s %(levelname)s %(message)s",
59
+ handlers=[logging.StreamHandler(sys.stdout)],
60
+ )
61
+ log = logging.getLogger(__name__)
62
+
63
+ # ---------------------------------------------------------------------------
64
+ # HTTP session
65
+ # ---------------------------------------------------------------------------
66
+
67
+
68
+ def _build_session() -> requests.Session:
69
+ s = requests.Session()
70
+ s.headers["User-Agent"] = USER_AGENT
71
+ retry = Retry(
72
+ total=3,
73
+ backoff_factor=2.0,
74
+ status_forcelist=[429, 500, 502, 503, 504],
75
+ allowed_methods=["GET"],
76
+ )
77
+ s.mount("https://", HTTPAdapter(max_retries=retry))
78
+ s.mount("http://", HTTPAdapter(max_retries=retry))
79
+ return s
80
+
81
+
82
+ class RateLimiter:
83
+ def __init__(self, delay: float) -> None:
84
+ self.delay = delay
85
+ self._last: float = 0.0
86
+
87
+ def wait(self) -> None:
88
+ elapsed = time.monotonic() - self._last
89
+ if elapsed < self.delay:
90
+ time.sleep(self.delay - elapsed)
91
+ self._last = time.monotonic()
92
+
93
+
94
+ # ---------------------------------------------------------------------------
95
+ # Index parsing
96
+ # ---------------------------------------------------------------------------
97
+
98
+
99
+ def _slug_from_path(path: str) -> str:
100
+ """
101
+ Extract the slug from a manufacturer URL path.
102
+ e.g. '/estes-230703173737.html' -> 'estes'
103
+ '/aardvark-rockets-230703173737.html' -> 'aardvark-rockets'
104
+ """
105
+ # Strip leading slash and .html
106
+ name = path.lstrip("/").removesuffix(".html")
107
+ # Remove trailing -NNNNNNNNNNNN timestamp (12-digit numeric suffix)
108
+ name = re.sub(r"-\d{10,}$", "", name)
109
+ return name
110
+
111
+
112
+ def _parse_index(html: str) -> list[dict]:
113
+ """
114
+ Parse the manufacturers-by-name.html static page and return one record
115
+ per manufacturer with name, url, description, and external website.
116
+ """
117
+ soup = BeautifulSoup(html, "lxml")
118
+ records = []
119
+
120
+ for link in soup.find_all("a", href=re.compile(r"^/[a-z][a-z0-9-]+-\d{10,}\.html$")):
121
+ path = link["href"]
122
+ name = link.get_text(strip=True)
123
+ if not name:
124
+ continue
125
+
126
+ slug = _slug_from_path(path)
127
+ url = f"{BASE_URL}{path}"
128
+
129
+ # Description: text node(s) immediately after the link, before <br> or next <a>
130
+ description_parts = []
131
+ for sib in link.next_siblings:
132
+ if isinstance(sib, Tag):
133
+ if sib.name in ("a", "br"):
134
+ break
135
+ text = sib.get_text(strip=True)
136
+ if text:
137
+ description_parts.append(text)
138
+ elif isinstance(sib, NavigableString):
139
+ text = str(sib).strip(" \t\r\n—–-").strip()
140
+ if text:
141
+ description_parts.append(text)
142
+
143
+ description = " ".join(description_parts).strip() or None
144
+
145
+ # External website link following the manufacturer link
146
+ website = None
147
+ for sib in link.next_siblings:
148
+ if isinstance(sib, Tag) and sib.name == "a":
149
+ href = sib.get("href", "")
150
+ if href.startswith("http") and "rocketreviews.com" not in href:
151
+ website = href
152
+ break
153
+
154
+ records.append({
155
+ "slug": slug,
156
+ "url": url,
157
+ "name": name,
158
+ "description": description,
159
+ "website": website,
160
+ })
161
+
162
+ return records
163
+
164
+
165
+ # ---------------------------------------------------------------------------
166
+ # Detail page parsing
167
+ # ---------------------------------------------------------------------------
168
+
169
+
170
+ def _after_strong(soup: BeautifulSoup, label: str) -> Optional[str]:
171
+ """
172
+ Find '<strong>{label}:</strong>' and return the text immediately after it,
173
+ stopping at the next tag boundary.
174
+ """
175
+ strong = soup.find("strong", string=re.compile(rf"^{re.escape(label)}:?$", re.I))
176
+ if not strong:
177
+ return None
178
+ text = ""
179
+ for sib in strong.next_siblings:
180
+ if isinstance(sib, Tag) and sib.name in ("strong", "h4", "p", "br"):
181
+ break
182
+ text += sib.get_text() if isinstance(sib, Tag) else str(sib)
183
+ return text.strip() or None
184
+
185
+
186
+ def _parse_h4_list(soup: BeautifulSoup, heading: str) -> list[str]:
187
+ """
188
+ Find '<h4>{heading}:</h4>' and return text items from the following <ul>.
189
+ """
190
+ h4 = soup.find("h4", string=re.compile(rf"^{re.escape(heading)}:?$", re.I))
191
+ if not h4:
192
+ return []
193
+ ul = h4.find_next_sibling("ul")
194
+ if not ul:
195
+ return []
196
+ return [li.get_text(strip=True) for li in ul.find_all("li") if li.get_text(strip=True)]
197
+
198
+
199
+ def _parse_h4_link_list(soup: BeautifulSoup, heading: str) -> list[dict]:
200
+ """
201
+ Find '<h4>{heading}:</h4>' and return {name, url} dicts from links in the following <ul>.
202
+ """
203
+ h4 = soup.find("h4", string=re.compile(rf"^{re.escape(heading)}:?$", re.I))
204
+ if not h4:
205
+ return []
206
+ ul = h4.find_next_sibling("ul")
207
+ if not ul:
208
+ return []
209
+ results = []
210
+ for li in ul.find_all("li"):
211
+ a = li.find("a")
212
+ if a and a.get("href"):
213
+ href = a["href"]
214
+ full_url = href if href.startswith("http") else f"{BASE_URL}{href}"
215
+ results.append({
216
+ "name": a.get_text(strip=True) or None,
217
+ "url": full_url,
218
+ })
219
+ return results
220
+
221
+
222
+ def _parse_description(soup: BeautifulSoup) -> Optional[str]:
223
+ """
224
+ Extract the narrative description paragraphs from the detail page,
225
+ excluding boilerplate navigation/header content.
226
+ """
227
+ paragraphs = [
228
+ p.get_text(separator=" ", strip=True)
229
+ for p in soup.find_all("p")
230
+ if len(p.get_text(strip=True)) > 30
231
+ ]
232
+ return " ".join(paragraphs) if paragraphs else None
233
+
234
+
235
+ def _parse_detail(html: str, index_rec: dict) -> dict:
236
+ """Merge index-level fields with detail page data."""
237
+ soup = BeautifulSoup(html, "lxml")
238
+
239
+ began = _after_strong(soup, "Began Operations")
240
+ ceased = _after_strong(soup, "Ceased Operations")
241
+
242
+ # Normalize "?" to None
243
+ if began and began.strip() in ("?", "-", ""):
244
+ began = None
245
+ if ceased and ceased.strip() in ("?", "-", ""):
246
+ ceased = None
247
+
248
+ aka = _parse_h4_list(soup, "AKA")
249
+ products = _parse_h4_link_list(soup, "Products")
250
+
251
+ return {
252
+ **index_rec,
253
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
254
+ "aka": aka,
255
+ "began_operations": began,
256
+ "ceased_operations": ceased,
257
+ "products": products,
258
+ }
259
+
260
+
261
+ # ---------------------------------------------------------------------------
262
+ # Fetch helpers
263
+ # ---------------------------------------------------------------------------
264
+
265
+
266
+ def fetch_index(session: requests.Session) -> list[dict]:
267
+ url = f"{BASE_URL}{INDEX_PATH}"
268
+ log.info("Fetching manufacturer index from %s", url)
269
+ resp = session.get(url, timeout=30)
270
+ resp.raise_for_status()
271
+ records = _parse_index(resp.text)
272
+ log.info("Index returned %d records.", len(records))
273
+ return records
274
+
275
+
276
+ def scrape_detail(
277
+ session: requests.Session,
278
+ rate: RateLimiter,
279
+ index_rec: dict,
280
+ force: bool = False,
281
+ ) -> Optional[dict]:
282
+ slug = index_rec["slug"]
283
+ dest = DETAIL_DIR / f"{slug}.json"
284
+
285
+ if dest.exists() and not force:
286
+ log.debug("Already scraped %s, skipping.", slug)
287
+ return None
288
+
289
+ url = index_rec["url"]
290
+ rate.wait()
291
+
292
+ try:
293
+ resp = session.get(url, timeout=30)
294
+ resp.raise_for_status()
295
+ except requests.RequestException as exc:
296
+ log.warning("Failed to fetch manufacturer %s: %s", slug, exc)
297
+ return None
298
+
299
+ return _parse_detail(resp.text, index_rec)
300
+
301
+
302
+ # ---------------------------------------------------------------------------
303
+ # Main
304
+ # ---------------------------------------------------------------------------
305
+
306
+
307
+ def main() -> None:
308
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com manufacturers.")
309
+ parser.add_argument(
310
+ "--delay",
311
+ type=float,
312
+ default=DEFAULT_DELAY,
313
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
314
+ )
315
+ parser.add_argument(
316
+ "--limit",
317
+ type=int,
318
+ default=None,
319
+ help="Stop after scraping this many detail pages (useful for testing)",
320
+ )
321
+ parser.add_argument(
322
+ "--force",
323
+ action="store_true",
324
+ help="Re-scrape manufacturers that already have a saved detail file",
325
+ )
326
+ args = parser.parse_args()
327
+
328
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
329
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
330
+
331
+ session = _build_session()
332
+ rate = RateLimiter(args.delay)
333
+
334
+ # ------------------------------------------------------------------
335
+ # Step 1: fetch index and write index.jsonl
336
+ # ------------------------------------------------------------------
337
+ records = fetch_index(session)
338
+
339
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
340
+ index_path = SOURCE_DIR / "index.jsonl"
341
+ with index_path.open("w", encoding="utf-8") as f:
342
+ for rec in records:
343
+ f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n")
344
+ log.info("Wrote %d index records to %s", len(records), index_path)
345
+
346
+ # ------------------------------------------------------------------
347
+ # Step 2: scrape each detail page
348
+ # ------------------------------------------------------------------
349
+ if args.limit:
350
+ records = records[: args.limit]
351
+
352
+ ok = skipped = failed = 0
353
+ total = len(records)
354
+
355
+ for i, rec in enumerate(records, 1):
356
+ result = scrape_detail(session, rate, rec, force=args.force)
357
+
358
+ if result is None:
359
+ skipped += 1
360
+ continue
361
+
362
+ dest = DETAIL_DIR / f"{rec['slug']}.json"
363
+ try:
364
+ dest.write_text(
365
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
366
+ )
367
+ ok += 1
368
+ log.debug("Saved %s", dest.name)
369
+ except OSError as exc:
370
+ log.warning("Could not write %s: %s", dest, exc)
371
+ failed += 1
372
+
373
+ if i % 25 == 0 or i == total:
374
+ log.info(
375
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
376
+ i,
377
+ total,
378
+ ok,
379
+ skipped,
380
+ failed,
381
+ )
382
+
383
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
384
+
385
+
386
+ if __name__ == "__main__":
387
+ main()
scripts/motors/01_scrape.py ADDED
@@ -0,0 +1,399 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_motors.py — Fetch the RocketReviews.com motor index and scrape each
4
+ detail page, saving structured JSON to source/motors/.
5
+
6
+ The index API already contains ~30 fields per motor. Detail pages add thrust
7
+ curve coordinates and propellant/formula/casing data.
8
+
9
+ Output
10
+ ------
11
+ source/motors/index.jsonl one record per motor (raw index fields)
12
+ source/motors/detail/{id}.json full parsed detail per motor
13
+
14
+ Usage
15
+ -----
16
+ python scripts/motors/01_scrape.py
17
+ python scripts/motors/01_scrape.py --delay 1.0 --limit 10
18
+ python scripts/motors/01_scrape.py --force # re-scrape existing files
19
+ """
20
+
21
+ from __future__ import annotations
22
+
23
+ import argparse
24
+ import json
25
+ import logging
26
+ import re
27
+ import sys
28
+ import time
29
+ from datetime import datetime, timezone
30
+ from pathlib import Path
31
+ from typing import Optional
32
+ from urllib.parse import quote
33
+
34
+ import requests
35
+ from bs4 import BeautifulSoup, Tag
36
+ from requests.adapters import HTTPAdapter
37
+ from urllib3.util.retry import Retry
38
+
39
+ # ---------------------------------------------------------------------------
40
+ # Config
41
+ # ---------------------------------------------------------------------------
42
+
43
+ BASE_URL = "https://www.rocketreviews.com"
44
+ INDEX_URL = f"{BASE_URL}/data/motors/motors.php?search=&status="
45
+ USER_AGENT = "RocketReviews-Dataset/1.0"
46
+ DEFAULT_DELAY = 1.0
47
+
48
+ ROOT = Path(__file__).parent.parent.parent
49
+ SOURCE_DIR = ROOT / "source" / "motors"
50
+ DETAIL_DIR = SOURCE_DIR / "detail"
51
+
52
+ # Fields where "0.0000" means no data and should be stored as null
53
+ _ZERO_AS_NULL = {"MassFraction", "SpecificImpulse", "ThroatDiameter", "ExitDiameter"}
54
+
55
+ # ---------------------------------------------------------------------------
56
+ # Logging
57
+ # ---------------------------------------------------------------------------
58
+
59
+ logging.basicConfig(
60
+ level=logging.INFO,
61
+ format="%(asctime)s %(levelname)s %(message)s",
62
+ handlers=[logging.StreamHandler(sys.stdout)],
63
+ )
64
+ log = logging.getLogger(__name__)
65
+
66
+ # ---------------------------------------------------------------------------
67
+ # HTTP session
68
+ # ---------------------------------------------------------------------------
69
+
70
+
71
+ def _build_session() -> requests.Session:
72
+ s = requests.Session()
73
+ s.headers["User-Agent"] = USER_AGENT
74
+ retry = Retry(
75
+ total=3,
76
+ backoff_factor=2.0,
77
+ status_forcelist=[429, 500, 502, 503, 504],
78
+ allowed_methods=["GET"],
79
+ )
80
+ s.mount("https://", HTTPAdapter(max_retries=retry))
81
+ s.mount("http://", HTTPAdapter(max_retries=retry))
82
+ return s
83
+
84
+
85
+ class RateLimiter:
86
+ def __init__(self, delay: float) -> None:
87
+ self.delay = delay
88
+ self._last: float = 0.0
89
+
90
+ def wait(self) -> None:
91
+ elapsed = time.monotonic() - self._last
92
+ if elapsed < self.delay:
93
+ time.sleep(self.delay - elapsed)
94
+ self._last = time.monotonic()
95
+
96
+
97
+ # ---------------------------------------------------------------------------
98
+ # Index field parsers
99
+ # ---------------------------------------------------------------------------
100
+
101
+
102
+ def _strip_units(val: str) -> Optional[float]:
103
+ """Extract the leading float from a string like '18.0 mm' or '10.75 N'."""
104
+ if not val or not val.strip():
105
+ return None
106
+ m = re.match(r"\s*([0-9]+(?:\.[0-9]+)?)", val.strip())
107
+ return float(m.group(1)) if m else None
108
+
109
+
110
+ def _zero_as_null(val: str) -> Optional[float]:
111
+ """Parse float, returning None if the value is zero (missing data sentinel)."""
112
+ f = _strip_units(val)
113
+ return f if f else None
114
+
115
+
116
+ def _yn_bool(val: str) -> Optional[bool]:
117
+ """Convert 'Y'/'N'/'' to True/False/None."""
118
+ if val == "Y":
119
+ return True
120
+ if val == "N":
121
+ return False
122
+ return None
123
+
124
+
125
+ def _parse_delays(val: str) -> list[str]:
126
+ """Split delay string into a list, handling plugged/variable formats."""
127
+ if not val or not val.strip():
128
+ return []
129
+ return [d.strip() for d in val.split(",") if d.strip()]
130
+
131
+
132
+ def _motor_url(designation: str, motor_id: str) -> str:
133
+ return (
134
+ f"{BASE_URL}/index.php"
135
+ f"?autoredir={quote(designation)}&action=displaymotor&motorid={motor_id}"
136
+ )
137
+
138
+
139
+ def _parse_index_record(rec: dict) -> dict:
140
+ """Transform a raw API record into a cleaned index-level document."""
141
+ motor_id = int(rec["Motor_ID"])
142
+ designation = rec.get("Designation", "").strip()
143
+ kit_id_raw = rec.get("Kit_ID", "0").strip()
144
+ kit_url = (
145
+ f"{BASE_URL}/product-{kit_id_raw}.html"
146
+ if kit_id_raw and kit_id_raw != "0"
147
+ else None
148
+ )
149
+
150
+ return {
151
+ "id": motor_id,
152
+ "url": _motor_url(designation, rec["Motor_ID"]),
153
+ "kit_url": kit_url,
154
+ "title": rec.get("Title", "").strip() or None,
155
+ "designation": designation or None,
156
+ "short_name": rec.get("ShortName", "").strip() or None,
157
+ "letter": rec.get("Letter", "").strip() or None,
158
+ "equiv": rec.get("Equiv", "").strip() or None,
159
+ "manufacturer": rec.get("Manufacturer", "").strip() or None,
160
+ "motor_type": rec.get("MotorType", "").strip() or None,
161
+ "in_production": _yn_bool(rec.get("InProduction", "")),
162
+ "custom": _yn_bool(rec.get("Custom", "")),
163
+ "last_updated": rec.get("LastUpdated", "").strip() or None,
164
+ "thrustcurve_id": int(rec.get("Thrustcurve_ID", 0)) or None,
165
+ "delays": _parse_delays(rec.get("Delays", "")),
166
+ "variable_delay": _yn_bool(rec.get("VariableDelay", "")),
167
+ "description": rec.get("Description", "").strip() or None,
168
+ "physical": {
169
+ "diameter_mm": _strip_units(rec.get("Diameter", "")),
170
+ "length_mm": _zero_as_null(rec.get("Length", "")),
171
+ "propellant_weight_g": _zero_as_null(rec.get("PropellantWeight", "")),
172
+ "total_weight_g": _zero_as_null(rec.get("TotalWeight", "")),
173
+ "throat_diameter_mm": _zero_as_null(rec.get("ThroatDiameter", "")),
174
+ "exit_diameter_mm": _zero_as_null(rec.get("ExitDiameter", "")),
175
+ },
176
+ "performance": {
177
+ "average_thrust_n": _strip_units(rec.get("AverageThrust", "")),
178
+ "peak_thrust_n": _strip_units(rec.get("PeakThrust", "")),
179
+ "total_impulse_ns": _strip_units(rec.get("TotalImpulse", "")),
180
+ "thrust_duration_s": _strip_units(rec.get("ThrustDuration", "")),
181
+ "mass_fraction": _zero_as_null(rec.get("MassFraction", "")),
182
+ "specific_impulse": _zero_as_null(rec.get("SpecificImpulse", "")),
183
+ },
184
+ }
185
+
186
+
187
+ # ---------------------------------------------------------------------------
188
+ # Detail page parsers
189
+ # ---------------------------------------------------------------------------
190
+
191
+
192
+ def _parse_thrust_curve(html: str) -> list[dict]:
193
+ """
194
+ Extract thrust curve coordinate pairs from the Google Charts
195
+ arrayToDataTable call embedded in the page scripts.
196
+
197
+ Format in source:
198
+ var data = google.visualization.arrayToDataTable([
199
+ ['Time (seconds)', 'Thrust (N)'],
200
+ [0.0, 0.0],
201
+ [0.13, 20.61],
202
+ ...
203
+ ]);
204
+ """
205
+ # Extract the full arrayToDataTable block
206
+ m = re.search(
207
+ r"arrayToDataTable\(\s*\[(.*?)\]\s*\)",
208
+ html,
209
+ re.DOTALL,
210
+ )
211
+ if not m:
212
+ return []
213
+
214
+ content = m.group(1)
215
+ # Find all [number, number] pairs, skipping the string header row
216
+ pairs = re.findall(
217
+ r"\[\s*(-?\d+(?:\.\d+)?)\s*,\s*(-?\d+(?:\.\d+)?)\s*\]",
218
+ content,
219
+ )
220
+ return [{"time_s": float(t), "thrust_n": float(n)} for t, n in pairs]
221
+
222
+
223
+ def _parse_linked_field(html: str, pattern: str) -> Optional[dict]:
224
+ """
225
+ Extract a name+url pair from a href matching the given pattern.
226
+ e.g. pattern r'/propellant([^.]+)\.html' → {name, url}
227
+ """
228
+ m = re.search(pattern, html)
229
+ if not m:
230
+ return None
231
+ path = m.group(0) if m.lastindex == 0 else None
232
+ # Re-search for the full href
233
+ href_m = re.search(rf'href="({re.escape(m.group(0)) if m.lastindex == 0 else m.group(1)})"', html)
234
+ return None # fallback — use _parse_linked_href instead
235
+
236
+
237
+ def _parse_linked_href(
238
+ soup: BeautifulSoup, href_pattern: str
239
+ ) -> Optional[dict]:
240
+ """
241
+ Find an <a> tag whose href matches href_pattern.
242
+ Returns {"name": link_text, "url": full_url}.
243
+ """
244
+ link = soup.find("a", href=re.compile(href_pattern))
245
+ if not link:
246
+ return None
247
+ return {
248
+ "name": link.get_text(strip=True) or None,
249
+ "url": f"{BASE_URL}{link['href']}",
250
+ }
251
+
252
+
253
+ def _parse_propellant(soup: BeautifulSoup) -> dict:
254
+ """Extract propellant type, formula, and casing as name+url objects."""
255
+ return {
256
+ "type": _parse_linked_href(soup, r"^/propellant"),
257
+ "formula": _parse_linked_href(soup, r"^/formula"),
258
+ "casing": _parse_linked_href(soup, r"^/casing"),
259
+ }
260
+
261
+
262
+ def _parse_detail(html: str, index_doc: dict) -> dict:
263
+ """Merge index-level fields with detail page data."""
264
+ soup = BeautifulSoup(html, "lxml")
265
+ return {
266
+ **index_doc,
267
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
268
+ "propellant": _parse_propellant(soup),
269
+ "thrust_curve": _parse_thrust_curve(html),
270
+ }
271
+
272
+
273
+ # ---------------------------------------------------------------------------
274
+ # Fetch helpers
275
+ # ---------------------------------------------------------------------------
276
+
277
+
278
+ def fetch_index(session: requests.Session) -> list[dict]:
279
+ log.info("Fetching motor index from %s", INDEX_URL)
280
+ resp = session.get(INDEX_URL, timeout=30)
281
+ resp.raise_for_status()
282
+ records = resp.json().get("records", [])
283
+ log.info("Index returned %d records.", len(records))
284
+ return records
285
+
286
+
287
+ def scrape_detail(
288
+ session: requests.Session,
289
+ rate: RateLimiter,
290
+ index_doc: dict,
291
+ force: bool = False,
292
+ ) -> Optional[dict]:
293
+ motor_id = index_doc["id"]
294
+ dest = DETAIL_DIR / f"{motor_id:06d}.json"
295
+
296
+ if dest.exists() and not force:
297
+ log.debug("Already scraped %s, skipping.", motor_id)
298
+ return None
299
+
300
+ url = index_doc["url"]
301
+ rate.wait()
302
+
303
+ try:
304
+ resp = session.get(url, timeout=30)
305
+ resp.raise_for_status()
306
+ except requests.RequestException as exc:
307
+ log.warning("Failed to fetch motor %s: %s", motor_id, exc)
308
+ return None
309
+
310
+ return _parse_detail(resp.text, index_doc)
311
+
312
+
313
+ # ---------------------------------------------------------------------------
314
+ # Main
315
+ # ---------------------------------------------------------------------------
316
+
317
+
318
+ def main() -> None:
319
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com motors.")
320
+ parser.add_argument(
321
+ "--delay",
322
+ type=float,
323
+ default=DEFAULT_DELAY,
324
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
325
+ )
326
+ parser.add_argument(
327
+ "--limit",
328
+ type=int,
329
+ default=None,
330
+ help="Stop after scraping this many detail pages (useful for testing)",
331
+ )
332
+ parser.add_argument(
333
+ "--force",
334
+ action="store_true",
335
+ help="Re-scrape motors that already have a saved detail file",
336
+ )
337
+ args = parser.parse_args()
338
+
339
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
340
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
341
+
342
+ session = _build_session()
343
+ rate = RateLimiter(args.delay)
344
+
345
+ # ------------------------------------------------------------------
346
+ # Step 1: fetch index, clean records, and write index.jsonl
347
+ # ------------------------------------------------------------------
348
+ raw_records = fetch_index(session)
349
+ index_docs = [_parse_index_record(r) for r in raw_records]
350
+
351
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
352
+ index_path = SOURCE_DIR / "index.jsonl"
353
+ with index_path.open("w", encoding="utf-8") as f:
354
+ for doc in index_docs:
355
+ f.write(json.dumps({**doc, "scraped_at": scraped_at}) + "\n")
356
+ log.info("Wrote %d index records to %s", len(index_docs), index_path)
357
+
358
+ # ------------------------------------------------------------------
359
+ # Step 2: scrape each detail page
360
+ # ------------------------------------------------------------------
361
+ if args.limit:
362
+ index_docs = index_docs[: args.limit]
363
+
364
+ ok = skipped = failed = 0
365
+ total = len(index_docs)
366
+
367
+ for i, doc in enumerate(index_docs, 1):
368
+ result = scrape_detail(session, rate, doc, force=args.force)
369
+
370
+ if result is None:
371
+ skipped += 1
372
+ continue
373
+
374
+ dest = DETAIL_DIR / f"{doc['id']:06d}.json"
375
+ try:
376
+ dest.write_text(
377
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
378
+ )
379
+ ok += 1
380
+ log.debug("Saved %s", dest.name)
381
+ except OSError as exc:
382
+ log.warning("Could not write %s: %s", dest, exc)
383
+ failed += 1
384
+
385
+ if i % 25 == 0 or i == total:
386
+ log.info(
387
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
388
+ i,
389
+ total,
390
+ ok,
391
+ skipped,
392
+ failed,
393
+ )
394
+
395
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
396
+
397
+
398
+ if __name__ == "__main__":
399
+ main()
scripts/plans/01_scrape.py ADDED
@@ -0,0 +1,324 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_plans.py — Fetch the RocketReviews.com plans index and scrape each
4
+ detail page, saving structured JSON to source/plans/.
5
+
6
+ Output
7
+ ------
8
+ source/plans/index.jsonl one record per plan (raw index fields)
9
+ source/plans/detail/{slug}.json full parsed detail per plan
10
+
11
+ Usage
12
+ -----
13
+ python scripts/plans/01_scrape.py
14
+ python scripts/plans/01_scrape.py --delay 1.0 --limit 10
15
+ python scripts/plans/01_scrape.py --force # re-scrape existing files
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import argparse
21
+ import json
22
+ import logging
23
+ import re
24
+ import sys
25
+ import time
26
+ from datetime import datetime, timezone
27
+ from pathlib import Path
28
+ from typing import Optional
29
+
30
+ import requests
31
+ from bs4 import BeautifulSoup
32
+ from requests.adapters import HTTPAdapter
33
+ from urllib3.util.retry import Retry
34
+
35
+ # ---------------------------------------------------------------------------
36
+ # Config
37
+ # ---------------------------------------------------------------------------
38
+
39
+ BASE_URL = "https://www.rocketreviews.com"
40
+ INDEX_URL = f"{BASE_URL}/rocketry-plans.html"
41
+ USER_AGENT = "RocketReviews-Dataset/1.0"
42
+ DEFAULT_DELAY = 1.0
43
+
44
+ ROOT = Path(__file__).parent.parent.parent
45
+ SOURCE_DIR = ROOT / "source" / "plans"
46
+ DETAIL_DIR = SOURCE_DIR / "detail"
47
+
48
+ # ---------------------------------------------------------------------------
49
+ # Logging
50
+ # ---------------------------------------------------------------------------
51
+
52
+ logging.basicConfig(
53
+ level=logging.INFO,
54
+ format="%(asctime)s %(levelname)s %(message)s",
55
+ handlers=[logging.StreamHandler(sys.stdout)],
56
+ )
57
+ log = logging.getLogger(__name__)
58
+
59
+ # ---------------------------------------------------------------------------
60
+ # HTTP session
61
+ # ---------------------------------------------------------------------------
62
+
63
+
64
+ def _build_session() -> requests.Session:
65
+ s = requests.Session()
66
+ s.headers["User-Agent"] = USER_AGENT
67
+ retry = Retry(
68
+ total=3,
69
+ backoff_factor=2.0,
70
+ status_forcelist=[429, 500, 502, 503, 504],
71
+ allowed_methods=["GET"],
72
+ )
73
+ s.mount("https://", HTTPAdapter(max_retries=retry))
74
+ s.mount("http://", HTTPAdapter(max_retries=retry))
75
+ return s
76
+
77
+
78
+ class RateLimiter:
79
+ def __init__(self, delay: float) -> None:
80
+ self.delay = delay
81
+ self._last: float = 0.0
82
+
83
+ def wait(self) -> None:
84
+ elapsed = time.monotonic() - self._last
85
+ if elapsed < self.delay:
86
+ time.sleep(self.delay - elapsed)
87
+ self._last = time.monotonic()
88
+
89
+
90
+ # ---------------------------------------------------------------------------
91
+ # Parsing helpers
92
+ # ---------------------------------------------------------------------------
93
+
94
+
95
+ def _slug_from_path(path: str) -> str:
96
+ """
97
+ Extract the slug from a url path.
98
+ e.g. '/1940-exploratory-planetary-cargo-ferry-180703114912.html' ->
99
+ '1940-exploratory-planetary-cargo-ferry'
100
+ """
101
+ name = path.lstrip("/").removesuffix(".html")
102
+ # Remove trailing -NNNNNNNNNNNN timestamp (12-digit numeric suffix)
103
+ name = re.sub(r"-\d{10,}$", "", name)
104
+ return name
105
+
106
+
107
+ def _parse_index(html: str) -> list[dict]:
108
+ """
109
+ Parse the rocketry-plans.html static page and return one record
110
+ per plan from the main data table.
111
+ """
112
+ soup = BeautifulSoup(html, "lxml")
113
+ records = []
114
+
115
+ # The plans table is typically the last large table on the page
116
+ tables = soup.find_all("table")
117
+ table = tables[-1] if tables else None
118
+
119
+ if not table:
120
+ log.warning("Could not find the plans table on the index page.")
121
+ return records
122
+
123
+ # Skip the header row
124
+ for row in table.find_all("tr")[1:]:
125
+ cells = row.find_all(["td", "th"])
126
+ if len(cells) < 5:
127
+ continue
128
+
129
+ # 1: Source, 2: Title (link to detail), 3: Style, 4: Site (external link)
130
+ source = cells[1].get_text(strip=True) or None
131
+
132
+ title_a = cells[2].find("a")
133
+ title_trunc = title_a.get_text(strip=True) if title_a else cells[2].get_text(strip=True)
134
+
135
+ detail_path = title_a["href"] if title_a and title_a.has_attr("href") else None
136
+
137
+ # Skip if there's no detail page link to use as an identifier
138
+ if not detail_path:
139
+ continue
140
+
141
+ detail_url = detail_path if detail_path.startswith("http") else f"{BASE_URL}{detail_path}"
142
+ slug = _slug_from_path(detail_path)
143
+
144
+ style = cells[3].get_text(strip=True) or None
145
+
146
+ site_a = cells[4].find("a")
147
+ site_name = site_a.get_text(strip=True) if site_a else cells[4].get_text(strip=True)
148
+ external_url = site_a["href"] if site_a and site_a.has_attr("href") else None
149
+
150
+ # In case it's just plain text without a link, normalize empty string to None
151
+ if not site_name:
152
+ site_name = None
153
+
154
+ records.append({
155
+ "slug": slug,
156
+ "title_truncated": title_trunc,
157
+ "source": source,
158
+ "style": style,
159
+ "site": {
160
+ "name": site_name,
161
+ "url": external_url
162
+ },
163
+ "url": detail_url
164
+ })
165
+
166
+ return records
167
+
168
+
169
+ def _parse_detail(html: str, index_rec: dict) -> dict:
170
+ """Merge index-level fields with full title from the detail page."""
171
+ soup = BeautifulSoup(html, "lxml")
172
+
173
+ # Try to extract the full title from the h1 on the detail page
174
+ h1 = soup.find("h1")
175
+ full_title = None
176
+ if h1:
177
+ # Example: "Rocketry Plans/Instructions - Model Rocket News 1940 Exploratory Planetary Cargo Ferry"
178
+ raw_h1 = h1.get_text(strip=True)
179
+ full_title = re.sub(r"^Rocketry Plans/Instructions\s*-\s*", "", raw_h1, flags=re.I).strip()
180
+
181
+ # If the h1 wasn't found or was weird, fall back to what we had
182
+ if not full_title:
183
+ full_title = index_rec.get("title_truncated")
184
+
185
+ # Exclude title_truncated from the final output and replace with full title
186
+ out_rec = {k: v for k, v in index_rec.items() if k != "title_truncated"}
187
+
188
+ return {
189
+ **out_rec,
190
+ "title": full_title,
191
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
192
+ }
193
+
194
+
195
+ # ---------------------------------------------------------------------------
196
+ # Fetch helpers
197
+ # ---------------------------------------------------------------------------
198
+
199
+
200
+ def fetch_index(session: requests.Session) -> list[dict]:
201
+ log.info("Fetching plans index from %s", INDEX_URL)
202
+ resp = session.get(INDEX_URL, timeout=30)
203
+ resp.raise_for_status()
204
+ records = _parse_index(resp.text)
205
+ log.info("Index returned %d records.", len(records))
206
+ return records
207
+
208
+
209
+ def scrape_detail(
210
+ session: requests.Session,
211
+ rate: RateLimiter,
212
+ index_rec: dict,
213
+ force: bool = False,
214
+ ) -> Optional[dict]:
215
+ slug = index_rec["slug"]
216
+ dest = DETAIL_DIR / f"{slug}.json"
217
+
218
+ if dest.exists() and not force:
219
+ log.debug("Already scraped %s, skipping.", slug)
220
+ return None
221
+
222
+ url = index_rec["url"]
223
+ rate.wait()
224
+
225
+ try:
226
+ resp = session.get(url, timeout=30)
227
+ resp.raise_for_status()
228
+ except requests.RequestException as exc:
229
+ log.warning("Failed to fetch plan %s: %s", slug, exc)
230
+ return None
231
+
232
+ return _parse_detail(resp.text, index_rec)
233
+
234
+
235
+ # ---------------------------------------------------------------------------
236
+ # Main
237
+ # ---------------------------------------------------------------------------
238
+
239
+
240
+ def main() -> None:
241
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com plans.")
242
+ parser.add_argument(
243
+ "--delay",
244
+ type=float,
245
+ default=DEFAULT_DELAY,
246
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
247
+ )
248
+ parser.add_argument(
249
+ "--limit",
250
+ type=int,
251
+ default=None,
252
+ help="Stop after scraping this many detail pages (useful for testing)",
253
+ )
254
+ parser.add_argument(
255
+ "--force",
256
+ action="store_true",
257
+ help="Re-scrape plans that already have a saved detail file",
258
+ )
259
+ args = parser.parse_args()
260
+
261
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
262
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
263
+
264
+ session = _build_session()
265
+ rate = RateLimiter(args.delay)
266
+
267
+ # ------------------------------------------------------------------
268
+ # Step 1: fetch index and write index.jsonl
269
+ # ------------------------------------------------------------------
270
+ records = fetch_index(session)
271
+
272
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
273
+ index_path = SOURCE_DIR / "index.jsonl"
274
+ with index_path.open("w", encoding="utf-8") as f:
275
+ for rec in records:
276
+ # We want to output full 'title' to index.jsonl if possible,
277
+ # but since we only have truncated title right now, we'll write that as 'title'
278
+ out_rec = {k: v for k, v in rec.items() if k != "title_truncated"}
279
+ out_rec["title"] = rec["title_truncated"]
280
+ f.write(json.dumps({**out_rec, "scraped_at": scraped_at}) + "\n")
281
+ log.info("Wrote %d index records to %s", len(records), index_path)
282
+
283
+ # ------------------------------------------------------------------
284
+ # Step 2: scrape each detail page
285
+ # ------------------------------------------------------------------
286
+ if args.limit:
287
+ records = records[: args.limit]
288
+
289
+ ok = skipped = failed = 0
290
+ total = len(records)
291
+
292
+ for i, rec in enumerate(records, 1):
293
+ result = scrape_detail(session, rate, rec, force=args.force)
294
+
295
+ if result is None:
296
+ skipped += 1
297
+ continue
298
+
299
+ dest = DETAIL_DIR / f"{rec['slug']}.json"
300
+ try:
301
+ dest.write_text(
302
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
303
+ )
304
+ ok += 1
305
+ log.debug("Saved %s", dest.name)
306
+ except OSError as exc:
307
+ log.warning("Could not write %s: %s", dest, exc)
308
+ failed += 1
309
+
310
+ if i % 25 == 0 or i == total:
311
+ log.info(
312
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
313
+ i,
314
+ total,
315
+ ok,
316
+ skipped,
317
+ failed,
318
+ )
319
+
320
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
321
+
322
+
323
+ if __name__ == "__main__":
324
+ main()
scripts/products/01_scrape.py ADDED
@@ -0,0 +1,409 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_products.py — Fetch the RocketReviews.com product index and scrape each
4
+ detail page, saving structured JSON to source/products/.
5
+
6
+ Output
7
+ ------
8
+ source/products/index.jsonl one record per product (raw index fields)
9
+ source/products/detail/{id}.json full parsed detail per product
10
+
11
+ Usage
12
+ -----
13
+ python scripts/products/01_scrape.py
14
+ python scripts/products/01_scrape.py --delay 2.0 --limit 10
15
+ python scripts/products/01_scrape.py --force # re-scrape existing files
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import argparse
21
+ import json
22
+ import logging
23
+ import re
24
+ import sys
25
+ import time
26
+ from datetime import datetime, timezone
27
+ from pathlib import Path
28
+ from typing import Optional
29
+
30
+ import requests
31
+ from bs4 import BeautifulSoup, NavigableString, Tag
32
+ from requests.adapters import HTTPAdapter
33
+ from urllib3.util.retry import Retry
34
+
35
+ # ---------------------------------------------------------------------------
36
+ # Config
37
+ # ---------------------------------------------------------------------------
38
+
39
+ BASE_URL = "https://www.rocketreviews.com"
40
+ INDEX_URL = f"{BASE_URL}/data/products/products.php?search=&type="
41
+ USER_AGENT = "RocketReviews-Dataset/1.0"
42
+ DEFAULT_DELAY = 1.0 # seconds between requests
43
+
44
+ ROOT = Path(__file__).parent.parent.parent
45
+ SOURCE_DIR = ROOT / "source" / "products"
46
+ DETAIL_DIR = SOURCE_DIR / "detail"
47
+
48
+ # ---------------------------------------------------------------------------
49
+ # Logging
50
+ # ---------------------------------------------------------------------------
51
+
52
+ logging.basicConfig(
53
+ level=logging.INFO,
54
+ format="%(asctime)s %(levelname)s %(message)s",
55
+ handlers=[logging.StreamHandler(sys.stdout)],
56
+ )
57
+ log = logging.getLogger(__name__)
58
+
59
+ # ---------------------------------------------------------------------------
60
+ # HTTP session
61
+ # ---------------------------------------------------------------------------
62
+
63
+
64
+ def _build_session() -> requests.Session:
65
+ s = requests.Session()
66
+ s.headers["User-Agent"] = USER_AGENT
67
+ retry = Retry(
68
+ total=3,
69
+ backoff_factor=2.0,
70
+ status_forcelist=[429, 500, 502, 503, 504],
71
+ allowed_methods=["GET"],
72
+ )
73
+ s.mount("https://", HTTPAdapter(max_retries=retry))
74
+ s.mount("http://", HTTPAdapter(max_retries=retry))
75
+ return s
76
+
77
+
78
+ class RateLimiter:
79
+ def __init__(self, delay: float) -> None:
80
+ self.delay = delay
81
+ self._last: float = 0.0
82
+
83
+ def wait(self) -> None:
84
+ elapsed = time.monotonic() - self._last
85
+ if elapsed < self.delay:
86
+ time.sleep(self.delay - elapsed)
87
+ self._last = time.monotonic()
88
+
89
+
90
+ # ---------------------------------------------------------------------------
91
+ # Parsing helpers
92
+ # ---------------------------------------------------------------------------
93
+
94
+
95
+ def _re_first(pattern: str, text: str, group: int = 1) -> Optional[str]:
96
+ m = re.search(pattern, text)
97
+ return m.group(group) if m else None
98
+
99
+
100
+ def _slug_to_label(slug: str) -> str:
101
+ """Convert a hyphenated slug to a title-cased label."""
102
+ return slug.replace("-", " ").title()
103
+
104
+
105
+ def _kit_index_int(field: str, html: str) -> Optional[int]:
106
+ """
107
+ Extract an integer value from a /kit-index-{field}-{value}.html href.
108
+ Used for fields stored as integers (e.g. skill-level).
109
+ """
110
+ val = _re_first(rf"/kit-index-{field}-(\d+)\.html", html)
111
+ return int(val) if val else None
112
+
113
+
114
+ def _kit_index_scaled(field: str, html: str, scale: int = 10000) -> Optional[float]:
115
+ """
116
+ Extract a scaled integer from a /kit-index-{field}-{value}.html href
117
+ and divide by scale. Used for diameter, length, weight (4 decimal places).
118
+ """
119
+ val = _re_first(rf"/kit-index-{field}-(\d+)\.html", html)
120
+ return int(val) / scale if val else None
121
+
122
+
123
+ def _kit_index_slug(field: str, html: str) -> Optional[str]:
124
+ """
125
+ Extract and humanize a slug value from a /kit-index-{field}-{slug}.html href.
126
+ Used for power class, recovery type, status.
127
+ """
128
+ val = _re_first(rf"/kit-index-{field}-([a-z][a-z-]*)\.html", html)
129
+ return _slug_to_label(val) if val else None
130
+
131
+
132
+ def _parse_manufacturer(html: str, soup: BeautifulSoup) -> dict:
133
+ """Extract manufacturer name and URLs from the page."""
134
+ # Canonical manufacturer page: e.g. /estes-1093.html
135
+ mfr_canonical = _re_first(r'href="(/[a-z][a-z0-9-]+-\d+\.html)"', html)
136
+ url = f"{BASE_URL}{mfr_canonical}" if mfr_canonical else None
137
+
138
+ # Alias: kit-index filter page e.g. /kit-index-manufacturer-estes.html
139
+ mfr_alias = _re_first(r"(/kit-index-manufacturer-[^\"']+\.html)", html)
140
+ url_alias = f"{BASE_URL}{mfr_alias}" if mfr_alias else None
141
+
142
+ mfr_link = soup.find("a", href=re.compile(r"/kit-index-manufacturer-"))
143
+ name = mfr_link.get_text(strip=True) if mfr_link else None
144
+ return {
145
+ "name": name,
146
+ "url": url,
147
+ "url_alias": url_alias,
148
+ }
149
+
150
+
151
+ def _parse_designer(html: str) -> Optional[dict]:
152
+ """
153
+ Extract designer name and URL from /kit-index-designer-{name}-{id}.html.
154
+ Returns None if no designer is present.
155
+ """
156
+ m = re.search(r"(/kit-index-designer-([a-z][a-z-]*)-\d+\.html)", html)
157
+ if not m:
158
+ return None
159
+ return {
160
+ "name": _slug_to_label(m.group(2)),
161
+ "url": f"{BASE_URL}{m.group(1)}",
162
+ }
163
+
164
+
165
+ def _parse_styles(html: str) -> list[str]:
166
+ """Extract all style labels from /kit-index-style-{name}-{id}.html hrefs."""
167
+ return [
168
+ _slug_to_label(m)
169
+ for m in re.findall(r"/kit-index-style-([a-z][a-z-]*)-\d+\.html", html)
170
+ ]
171
+
172
+
173
+ def _parse_recommended_motors(soup: BeautifulSoup) -> list[str]:
174
+ """
175
+ Find the 'Recommended Motors' label and extract the motor designations
176
+ that follow it as comma-separated text.
177
+ """
178
+ strong = soup.find("strong", string=re.compile(r"Recommended Motors?", re.I))
179
+ if not strong:
180
+ return []
181
+ # Collect text from following siblings until next <strong>
182
+ text = ""
183
+ for sib in strong.next_siblings:
184
+ if isinstance(sib, Tag) and sib.name == "strong":
185
+ break
186
+ text += sib.get_text() if isinstance(sib, Tag) else str(sib)
187
+ motors = [m.strip() for m in re.split(r"[,\s]+", text.strip()) if m.strip()]
188
+ return motors
189
+
190
+
191
+ def _parse_cp(html: str) -> Optional[dict]:
192
+ """
193
+ Extract CP location and method if present on the product page.
194
+ CP location href: /kit-index-cp-{value}.html (value = inches * 10000)
195
+ CP method href: /kit-index-cpmethod-{slug}.html
196
+ Direction (Front/Rear) extracted from surrounding text.
197
+ """
198
+ location_raw = _re_first(r"/kit-index-cp-(\d+)\.html", html)
199
+ if not location_raw:
200
+ return None
201
+
202
+ location_in = int(location_raw) / 10000
203
+
204
+ # Determine direction from surrounding text
205
+ direction = "Front"
206
+ m = re.search(r"(\d+\.\d+)\s+inches\s+from\s+(Front|Rear)", html, re.I)
207
+ if m:
208
+ direction = m.group(2).title()
209
+
210
+ method_slug = _re_first(r"/kit-index-cpmethod-([a-z][a-z/]*[a-z-]*)\.html", html)
211
+ method = _slug_to_label(method_slug) if method_slug else None
212
+
213
+ return {
214
+ "location_in": location_in,
215
+ "location_from": direction,
216
+ "method": method,
217
+ }
218
+
219
+
220
+ def _parse_specs(html: str, soup: BeautifulSoup) -> Optional[dict]:
221
+ """
222
+ Parse kit-specific structured specs from /kit-index-{field}-{value}.html
223
+ link patterns. Returns None for non-kit product types that lack specs.
224
+ """
225
+ diameter = _kit_index_scaled("diameter", html)
226
+ length = _kit_index_scaled("length", html)
227
+ weight = _kit_index_scaled("weight", html)
228
+ motor_size = _kit_index_int("motor-size", html)
229
+ skill_level = _kit_index_int("skill-level", html)
230
+ power_class = _kit_index_slug("power", html)
231
+ recovery = _kit_index_slug("recovery", html)
232
+ status_slug = _re_first(r"/kit-index-status-([a-z][a-z-]*)\.html", html)
233
+ status = _slug_to_label(status_slug) if status_slug else None
234
+ styles = _parse_styles(html)
235
+ recommended_motors = _parse_recommended_motors(soup)
236
+
237
+ # If none of the kit-specific fields are present, this isn't a kit-type page
238
+ if not any([diameter, length, weight, motor_size, skill_level, power_class]):
239
+ return None
240
+
241
+ return {
242
+ "diameter_in": diameter,
243
+ "length_in": length,
244
+ "weight_oz": weight,
245
+ "motor_size_mm": motor_size,
246
+ "power_class": power_class,
247
+ "skill_level": skill_level,
248
+ "style": styles,
249
+ "recovery": recovery,
250
+ "status": status,
251
+ "recommended_motors": recommended_motors,
252
+ }
253
+
254
+
255
+ def _parse_detail(html: str, index_rec: dict) -> dict:
256
+ soup = BeautifulSoup(html, "lxml")
257
+ product_id = int(index_rec["id"])
258
+
259
+ # Canonical URL is directly constructable from the product ID
260
+ url = f"{BASE_URL}/product-{product_id}.html"
261
+
262
+ # Alias URL from the index
263
+ alias_path = index_rec.get("url", "")
264
+ url_alias = f"{BASE_URL}{alias_path}" if alias_path else None
265
+
266
+ return {
267
+ "id": product_id,
268
+ "url": url,
269
+ "url_alias": url_alias,
270
+ "name": index_rec.get("name"),
271
+ "title": index_rec.get("title"),
272
+ "type": index_rec.get("type"),
273
+ "model": index_rec.get("model") or None,
274
+ "years": index_rec.get("years") or None,
275
+ "manufacturer": _parse_manufacturer(html, soup),
276
+ "designer": _parse_designer(html),
277
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
278
+ "specs": _parse_specs(html, soup),
279
+ "cp": _parse_cp(html),
280
+ }
281
+
282
+
283
+ # ---------------------------------------------------------------------------
284
+ # Fetch helpers
285
+ # ---------------------------------------------------------------------------
286
+
287
+
288
+ def fetch_index(session: requests.Session) -> list[dict]:
289
+ log.info("Fetching product index from %s", INDEX_URL)
290
+ resp = session.get(INDEX_URL, timeout=30)
291
+ resp.raise_for_status()
292
+ records = resp.json().get("records", [])
293
+ log.info("Index returned %d records.", len(records))
294
+ return records
295
+
296
+
297
+ def scrape_detail(
298
+ session: requests.Session,
299
+ rate: RateLimiter,
300
+ index_rec: dict,
301
+ force: bool = False,
302
+ ) -> Optional[dict]:
303
+ product_id = index_rec["id"]
304
+ dest = DETAIL_DIR / f"{int(product_id):06d}.json"
305
+
306
+ if dest.exists() and not force:
307
+ log.debug("Already scraped %s, skipping.", product_id)
308
+ return None
309
+
310
+ # Use canonical URL directly — constructable from the ID
311
+ url = f"{BASE_URL}/product-{product_id}.html"
312
+ rate.wait()
313
+
314
+ try:
315
+ resp = session.get(url, timeout=30)
316
+ resp.raise_for_status()
317
+ except requests.RequestException as exc:
318
+ log.warning("Failed to fetch product %s: %s", product_id, exc)
319
+ return None
320
+
321
+ return _parse_detail(resp.text, index_rec)
322
+
323
+
324
+ # ---------------------------------------------------------------------------
325
+ # Main
326
+ # ---------------------------------------------------------------------------
327
+
328
+
329
+ def main() -> None:
330
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com products.")
331
+ parser.add_argument(
332
+ "--delay",
333
+ type=float,
334
+ default=DEFAULT_DELAY,
335
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
336
+ )
337
+ parser.add_argument(
338
+ "--limit",
339
+ type=int,
340
+ default=None,
341
+ help="Stop after scraping this many detail pages (useful for testing)",
342
+ )
343
+ parser.add_argument(
344
+ "--force",
345
+ action="store_true",
346
+ help="Re-scrape products that already have a saved detail file",
347
+ )
348
+ args = parser.parse_args()
349
+
350
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
351
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
352
+
353
+ session = _build_session()
354
+ rate = RateLimiter(args.delay)
355
+
356
+ # ------------------------------------------------------------------
357
+ # Step 1: fetch and write the full index
358
+ # ------------------------------------------------------------------
359
+ records = fetch_index(session)
360
+
361
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
362
+ index_path = SOURCE_DIR / "index.jsonl"
363
+ with index_path.open("w", encoding="utf-8") as f:
364
+ for rec in records:
365
+ f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n")
366
+ log.info("Wrote %d index records to %s", len(records), index_path)
367
+
368
+ # ------------------------------------------------------------------
369
+ # Step 2: scrape each detail page
370
+ # ------------------------------------------------------------------
371
+ if args.limit:
372
+ records = records[: args.limit]
373
+
374
+ ok = skipped = failed = 0
375
+ total = len(records)
376
+
377
+ for i, rec in enumerate(records, 1):
378
+ result = scrape_detail(session, rate, rec, force=args.force)
379
+
380
+ if result is None:
381
+ skipped += 1
382
+ continue
383
+
384
+ dest = DETAIL_DIR / f"{int(rec['id']):06d}.json"
385
+ try:
386
+ dest.write_text(
387
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
388
+ )
389
+ ok += 1
390
+ log.debug("Saved %s", dest.name)
391
+ except OSError as exc:
392
+ log.warning("Could not write %s: %s", dest, exc)
393
+ failed += 1
394
+
395
+ if i % 25 == 0 or i == total:
396
+ log.info(
397
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
398
+ i,
399
+ total,
400
+ ok,
401
+ skipped,
402
+ failed,
403
+ )
404
+
405
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
406
+
407
+
408
+ if __name__ == "__main__":
409
+ main()
scripts/reviews/01_scrape.py ADDED
@@ -0,0 +1,391 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ scrape_reviews.py — Fetch the RocketReviews.com review index and scrape each
4
+ detail page, saving structured JSON to source/reviews/.
5
+
6
+ Output
7
+ ------
8
+ source/reviews/index.jsonl one record per review (raw index fields)
9
+ source/reviews/detail/{id}.json full parsed detail per review
10
+
11
+ Usage
12
+ -----
13
+ python scripts/scrape_reviews.py
14
+ python scripts/scrape_reviews.py --delay 2.0 --limit 10
15
+ python scripts/scrape_reviews.py --force # re-scrape existing files
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ import argparse
21
+ import json
22
+ import logging
23
+ import re
24
+ import sys
25
+ import time
26
+ from datetime import datetime, timezone
27
+ from pathlib import Path
28
+ from typing import Optional
29
+
30
+ import requests
31
+ from bs4 import BeautifulSoup, NavigableString, Tag
32
+ from requests.adapters import HTTPAdapter
33
+ from urllib3.util.retry import Retry
34
+
35
+ # ---------------------------------------------------------------------------
36
+ # Config
37
+ # ---------------------------------------------------------------------------
38
+
39
+ BASE_URL = "https://www.rocketreviews.com"
40
+ INDEX_URL = f"{BASE_URL}/data/reviews/reviews.php"
41
+ USER_AGENT = "RocketReviews-Dataset/1.0"
42
+ DEFAULT_DELAY = 1.0 # seconds between requests
43
+
44
+ ROOT = Path(__file__).parent.parent.parent
45
+ SOURCE_DIR = ROOT / "source" / "reviews"
46
+ DETAIL_DIR = SOURCE_DIR / "detail"
47
+
48
+ # ---------------------------------------------------------------------------
49
+ # Logging
50
+ # ---------------------------------------------------------------------------
51
+
52
+ logging.basicConfig(
53
+ level=logging.INFO,
54
+ format="%(asctime)s %(levelname)s %(message)s",
55
+ handlers=[logging.StreamHandler(sys.stdout)],
56
+ )
57
+ log = logging.getLogger(__name__)
58
+
59
+ # ---------------------------------------------------------------------------
60
+ # HTTP session
61
+ # ---------------------------------------------------------------------------
62
+
63
+
64
+ def _build_session() -> requests.Session:
65
+ s = requests.Session()
66
+ s.headers["User-Agent"] = USER_AGENT
67
+ retry = Retry(
68
+ total=3,
69
+ backoff_factor=2.0,
70
+ status_forcelist=[429, 500, 502, 503, 504],
71
+ allowed_methods=["GET"],
72
+ )
73
+ s.mount("https://", HTTPAdapter(max_retries=retry))
74
+ s.mount("http://", HTTPAdapter(max_retries=retry))
75
+ return s
76
+
77
+
78
+ class RateLimiter:
79
+ def __init__(self, delay: float) -> None:
80
+ self.delay = delay
81
+ self._last: float = 0.0
82
+
83
+ def wait(self) -> None:
84
+ elapsed = time.monotonic() - self._last
85
+ if elapsed < self.delay:
86
+ time.sleep(self.delay - elapsed)
87
+ self._last = time.monotonic()
88
+
89
+
90
+ # ---------------------------------------------------------------------------
91
+ # Parsing helpers
92
+ # ---------------------------------------------------------------------------
93
+
94
+
95
+ def _re_first(pattern: str, text: str, group: int = 1) -> Optional[str]:
96
+ m = re.search(pattern, text)
97
+ return m.group(group) if m else None
98
+
99
+
100
+ def _parse_rating_block(soup: BeautifulSoup, label: str) -> Optional[int]:
101
+ """
102
+ Find '<strong>{label} Rating:</strong>' and count filled Material Icons
103
+ stars ('star') that follow it, excluding 'star_border' (empty stars).
104
+ """
105
+ strong = soup.find("strong", string=re.compile(rf"^{label} Rating:?$", re.I))
106
+ if not strong:
107
+ return None
108
+
109
+ # Collect raw text from siblings until the next <strong> tag
110
+ text = ""
111
+ for sib in strong.next_siblings:
112
+ if isinstance(sib, Tag) and sib.name == "strong":
113
+ break
114
+ text += sib.get_text() if isinstance(sib, Tag) else str(sib)
115
+
116
+ # Count 'star' occurrences that are NOT 'star_border'
117
+ filled = len(re.findall(r"\bstar\b(?!_border)", text))
118
+ return filled if filled else None
119
+
120
+
121
+ def _parse_ratings(soup: BeautifulSoup) -> dict:
122
+ return {
123
+ "construction": _parse_rating_block(soup, "Construction"),
124
+ "flight": _parse_rating_block(soup, "Flight"),
125
+ "overall": _parse_rating_block(soup, "Overall"),
126
+ }
127
+
128
+
129
+ def _parse_product(soup: BeautifulSoup) -> dict:
130
+ html = str(soup)
131
+
132
+ # Diameter: /kit-index-diameter-13260.html → 13260 / 10000 = 1.3260 inches
133
+ diameter_raw = _re_first(r"/kit-index-diameter-(\d+)\.html", html)
134
+ diameter = int(diameter_raw) / 10000 if diameter_raw else None
135
+
136
+ # Length: /kit-index-length-290000.html → 290000 / 10000 = 29.0000 inches
137
+ length_raw = _re_first(r"/kit-index-length-(\d+)\.html", html)
138
+ length = int(length_raw) / 10000 if length_raw else None
139
+
140
+ # Skill level: /kit-index-skilllevel-1.html → 1
141
+ skill_raw = _re_first(r"/kit-index-skilllevel-(\d+)\.html", html)
142
+ skill = int(skill_raw) if skill_raw else None
143
+
144
+ # Style: /kit-index-style-{name}-{id}.html — one or more values
145
+ styles = [
146
+ m.replace("-", " ").title()
147
+ for m in re.findall(r"/kit-index-style-([a-z][a-z-]*)-\d+\.html", html)
148
+ ]
149
+
150
+ # Price: first dollar amount mentioned in the page text
151
+ price_raw = _re_first(r"\$\s*(\d+(?:\.\d{1,2})?)", soup.get_text())
152
+ price = float(price_raw) if price_raw else None
153
+
154
+ return {
155
+ "diameter_in": diameter,
156
+ "length_in": length,
157
+ "skill_level": skill,
158
+ "style": styles,
159
+ "price_usd": price,
160
+ }
161
+
162
+
163
+ # Section headings that contain grids/tables rather than review text
164
+ _SKIP_SECTION_RE = re.compile(
165
+ r"^\s*(flights?|.*reviews?|what you can do|sign in|create account)\s*$",
166
+ re.I,
167
+ )
168
+
169
+
170
+ def _parse_sections(soup: BeautifulSoup) -> dict[str, str]:
171
+ """
172
+ Walk all h2/h4 headings and collect the plain text that follows each one
173
+ until the next heading of the same or higher level. Skips headings that
174
+ correspond to data grids (flight logs, related reviews).
175
+ """
176
+ sections: dict[str, str] = {}
177
+
178
+ for heading in soup.find_all(["h2", "h4"]):
179
+ title = heading.get_text(strip=True)
180
+ if not title or _SKIP_SECTION_RE.match(title):
181
+ continue
182
+
183
+ parts: list[str] = []
184
+ for sib in heading.next_siblings:
185
+ if isinstance(sib, Tag) and sib.name in ("h2", "h4"):
186
+ break
187
+ if isinstance(sib, Tag):
188
+ text = sib.get_text(separator=" ", strip=True)
189
+ elif isinstance(sib, NavigableString):
190
+ text = str(sib).strip()
191
+ else:
192
+ continue
193
+ if text:
194
+ parts.append(text)
195
+
196
+ content = " ".join(parts).strip()
197
+ if content:
198
+ sections[title] = content
199
+
200
+ return sections
201
+
202
+
203
+ def _parse_detail(html: str, index_rec: dict) -> dict:
204
+ soup = BeautifulSoup(html, "lxml")
205
+
206
+ # Canonical URL from any /review-{N}.html href in the page
207
+ canonical_path = _re_first(r'href=["\']?(/review-\d+\.html)', html)
208
+ canonical_url = f"{BASE_URL}{canonical_path}" if canonical_path else None
209
+
210
+ # Alias URL — slug URL from the index
211
+ alias_path = index_rec.get("url", "")
212
+ url_alias = f"{BASE_URL}{alias_path}" if alias_path else None
213
+
214
+ # Kit URL constructed from the Kit_ID in the JS data source
215
+ # e.g. data/flightlog/flights.php?column=Kit_ID&value=7163
216
+ kit_id_raw = _re_first(r"Kit_ID&value=(-?\d+)", html)
217
+ kit_id_int = int(kit_id_raw) if kit_id_raw else None
218
+ kit_url = f"{BASE_URL}/product-{kit_id_int}.html" if kit_id_int and kit_id_int > 0 else None
219
+
220
+ # Manufacturer canonical URL: e.g. /estes-1093.html
221
+ mfr_canonical = _re_first(r'href="(/[a-z][a-z0-9-]+-\d+\.html)"', html)
222
+ manufacturer_url = f"{BASE_URL}{mfr_canonical}" if mfr_canonical else None
223
+
224
+ # Manufacturer alias URL from the kit-index filter link
225
+ # e.g. /kit-index-manufacturer-estes.html
226
+ mfr_alias = _re_first(r"(/kit-index-manufacturer-[^\"']+\.html)", html)
227
+ manufacturer_url_alias = f"{BASE_URL}{mfr_alias}" if mfr_alias else None
228
+
229
+ # Manufacturer name from the kit-index link (more reliable than the index field)
230
+ manufacturer = index_rec.get("manufacturer") or None
231
+ mfr_link = soup.find("a", href=re.compile(r"/kit-index-manufacturer-"))
232
+ if mfr_link:
233
+ manufacturer = mfr_link.get_text(strip=True) or manufacturer
234
+
235
+ # Contributor URL from profile link with 4+ digit numeric suffix
236
+ # e.g. /darrell-ritchies-darrell-8979.html
237
+ contributor_path = _re_first(r"(/[a-z][a-z0-9-]+-\d{4,}\.html)", html)
238
+ contributor_url = f"{BASE_URL}{contributor_path}" if contributor_path else None
239
+
240
+ return {
241
+ "id": int(index_rec["id"]),
242
+ "url": canonical_url,
243
+ "url_alias": url_alias,
244
+ "date": index_rec.get("date"),
245
+ "contributor": index_rec.get("contributor"),
246
+ "contributor_url": contributor_url,
247
+ "kit": index_rec.get("kit"),
248
+ "kit_url": kit_url,
249
+ "manufacturer": manufacturer,
250
+ "manufacturer_url": manufacturer_url,
251
+ "manufacturer_url_alias": manufacturer_url_alias,
252
+ "type": index_rec.get("type"),
253
+ "scraped_at": datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ"),
254
+ "ratings": _parse_ratings(soup),
255
+ "product": _parse_product(soup),
256
+ "sections": _parse_sections(soup),
257
+ }
258
+
259
+
260
+ # ---------------------------------------------------------------------------
261
+ # Fetch helpers
262
+ # ---------------------------------------------------------------------------
263
+
264
+
265
+ def fetch_index(session: requests.Session) -> list[dict]:
266
+ log.info("Fetching review index from %s", INDEX_URL)
267
+ resp = session.get(INDEX_URL, timeout=30)
268
+ resp.raise_for_status()
269
+ records = resp.json().get("records", [])
270
+ log.info("Index returned %d records.", len(records))
271
+ return records
272
+
273
+
274
+ def scrape_detail(
275
+ session: requests.Session,
276
+ rate: RateLimiter,
277
+ index_rec: dict,
278
+ force: bool = False,
279
+ ) -> Optional[dict]:
280
+ review_id = index_rec["id"]
281
+ dest = DETAIL_DIR / f"{int(review_id):06d}.json"
282
+
283
+ if dest.exists() and not force:
284
+ log.debug("Already scraped %s, skipping.", review_id)
285
+ return None
286
+
287
+ alias_path = index_rec.get("url", "")
288
+ if not alias_path:
289
+ log.warning("No URL for review %s, skipping.", review_id)
290
+ return None
291
+
292
+ url = f"{BASE_URL}{alias_path}"
293
+ rate.wait()
294
+
295
+ try:
296
+ resp = session.get(url, timeout=30)
297
+ resp.raise_for_status()
298
+ except requests.RequestException as exc:
299
+ log.warning("Failed to fetch review %s: %s", review_id, exc)
300
+ return None
301
+
302
+ return _parse_detail(resp.text, index_rec)
303
+
304
+
305
+ # ---------------------------------------------------------------------------
306
+ # Main
307
+ # ---------------------------------------------------------------------------
308
+
309
+
310
+ def main() -> None:
311
+ parser = argparse.ArgumentParser(description="Scrape RocketReviews.com reviews.")
312
+ parser.add_argument(
313
+ "--delay",
314
+ type=float,
315
+ default=DEFAULT_DELAY,
316
+ help=f"Seconds between requests (default: {DEFAULT_DELAY})",
317
+ )
318
+ parser.add_argument(
319
+ "--limit",
320
+ type=int,
321
+ default=None,
322
+ help="Stop after scraping this many detail pages (useful for testing)",
323
+ )
324
+ parser.add_argument(
325
+ "--force",
326
+ action="store_true",
327
+ help="Re-scrape reviews that already have a saved detail file",
328
+ )
329
+ args = parser.parse_args()
330
+
331
+ SOURCE_DIR.mkdir(parents=True, exist_ok=True)
332
+ DETAIL_DIR.mkdir(parents=True, exist_ok=True)
333
+
334
+ session = _build_session()
335
+ rate = RateLimiter(args.delay)
336
+
337
+ # ------------------------------------------------------------------
338
+ # Step 1: fetch and write the full index
339
+ # ------------------------------------------------------------------
340
+ records = fetch_index(session)
341
+
342
+ scraped_at = datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%SZ")
343
+ index_path = SOURCE_DIR / "index.jsonl"
344
+ with index_path.open("w", encoding="utf-8") as f:
345
+ for rec in records:
346
+ f.write(json.dumps({**rec, "scraped_at": scraped_at}) + "\n")
347
+ log.info("Wrote %d index records to %s", len(records), index_path)
348
+
349
+ # ------------------------------------------------------------------
350
+ # Step 2: scrape each detail page
351
+ # ------------------------------------------------------------------
352
+ if args.limit:
353
+ records = records[: args.limit]
354
+
355
+ ok = skipped = failed = 0
356
+ total = len(records)
357
+
358
+ for i, rec in enumerate(records, 1):
359
+ result = scrape_detail(session, rate, rec, force=args.force)
360
+
361
+ if result is None:
362
+ # Already exists and --force not set, or missing URL
363
+ skipped += 1
364
+ continue
365
+
366
+ dest = DETAIL_DIR / f"{int(rec['id']):06d}.json"
367
+ try:
368
+ dest.write_text(
369
+ json.dumps(result, indent=2, ensure_ascii=False), encoding="utf-8"
370
+ )
371
+ ok += 1
372
+ log.debug("Saved %s", dest.name)
373
+ except OSError as exc:
374
+ log.warning("Could not write %s: %s", dest, exc)
375
+ failed += 1
376
+
377
+ if i % 25 == 0 or i == total:
378
+ log.info(
379
+ "Progress: %d/%d — ok=%d skipped=%d failed=%d",
380
+ i,
381
+ total,
382
+ ok,
383
+ skipped,
384
+ failed,
385
+ )
386
+
387
+ log.info("Done — ok=%d skipped=%d failed=%d", ok, skipped, failed)
388
+
389
+
390
+ if __name__ == "__main__":
391
+ main()
source/clubs/detail/000001.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab8ccda78681439010d00f9d96f344b16413669e8931bb0f89b5206c2c164fa2
3
+ size 622
source/clubs/detail/000002.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6bc2d949d5aa311358d57e02717062d28d98a7bc30d8e4a6a5bfb6feb55d9458
3
+ size 728
source/clubs/detail/000003.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1fb747987674a89991b757697c87e878c805f049b4504496d8aa7f9105bf5a51
3
+ size 542
source/clubs/detail/000021.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2bb5c9e9221781a3d0552853137e015233387f260f503c1e06e7a6b7f3895c22
3
+ size 631
source/clubs/detail/000022.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89f6a99a963bfe59caf5de40d89ba0ecedb2915e991f7add14e0ddeaaacb5168
3
+ size 662
source/clubs/detail/000023.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f88e768160a6a023b9ef7520c0e6ba434a617d6f32285a227f0f0096a5b60247
3
+ size 684
source/clubs/detail/000026.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35b3b49c42cee70dd75e5e96d2f341ac92999cf638e756b27f291b858bde74b5
3
+ size 548
source/clubs/detail/000027.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:33b1ecd6978b594020b690a05cfebf24eefdf33e902b6291baa3a8d980659aeb
3
+ size 621
source/clubs/detail/000028.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:012a858e5149e8207b45039d92b73c741ba44909449ad7d59fe5fd1395408c0e
3
+ size 551
source/clubs/detail/000029.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2dc1ebc91c28c5b0b798207637729ae3ffba2157c0000fa22cc8243a70609e62
3
+ size 526
source/clubs/detail/000030.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b09ebfed2cba3d2125df752f4613a96e6b5f96b6e8da815f0bb989389d677bdd
3
+ size 589
source/clubs/detail/000031.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee57cd5a22781b856fd0d21d45de2338b683b7e009cd6a4e175ce7421ca8918a
3
+ size 694
source/clubs/detail/000032.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b2e942b6da6657c288c70be99e65096866b33f8588b3c33f72b75b87b5de4be
3
+ size 693
source/clubs/detail/000033.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94be22603c093330f5909122e4f2ead6183a2f2453c049b9fb50be86d88cdf32
3
+ size 719
source/clubs/detail/000034.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2a51f1eeec7ff8b724d545ee6af0949af71500f2706b95d22698370192e1402
3
+ size 533
source/clubs/detail/000035.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fdc0f1a8b6ae9b348726ccb2c415a15e2c4bc22918224b44f96dd38f66ff40d4
3
+ size 543
source/clubs/detail/000036.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0e908b7ed7d7530694fa75bab78d8f1b3b28298c9392b02be55a1bed2618f194
3
+ size 529
source/clubs/detail/000037.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:64e29a13e57a02eefd8ce2df504c101c98fb033c93bcb49a3caeffb8da2800b9
3
+ size 533
source/clubs/detail/000038.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fd0f55dc7d448035559c57d64e9d31b41f3c0c6fe2cefbc786254c850653d95f
3
+ size 532
source/clubs/detail/000039.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a80e08cd6605ead107de16ceacf8daff5df6cae425e517feaa2716fbc2983f22
3
+ size 641
source/clubs/detail/000040.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90c52d0542b7bd5fe9fb356fbf64ae30a15ce9e061f02f14166c60cb2514b7e
3
+ size 635
source/clubs/detail/000041.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0074407481c9e36145b07ee763613254aabcf193c4a6df5f5f15dca9ba329d7b
3
+ size 569
source/clubs/detail/000042.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d6d264e2c3c30b967d8bd0247c50a12c5cc8be6ed7a5612a231e28325a26cc5
3
+ size 504
source/clubs/detail/000043.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0783822b4163e611adc29ad1b3fc9a20dae5c66ae00ee51e38e05c8a672cac71
3
+ size 642
source/clubs/detail/000044.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e4e501bc9d4674cd7714da687c7baafcbef00bab182fa215449d344db630ab6
3
+ size 676
source/clubs/detail/000045.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ab5df77f1bb8e51908650d87c70ef7a60156fcf1214a9be24f6506d08da4d15
3
+ size 655
source/clubs/detail/000046.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71087d4d5b5ba527209adaa1ef2ccb6fc7a4092c6f6b991afcbe30343041fb1c
3
+ size 605
source/clubs/detail/000047.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fad1419bcaed03e37e936a78c641eeb613c335cfdde17a3acb95c43511b97441
3
+ size 578
source/clubs/detail/000048.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4f957796491dd81dbac9077006056f310c0789483275c30c2f7bea37d6a1ae84
3
+ size 564
source/clubs/detail/000049.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd923602711c777071a82d31369383939ae80b71ac727be207ac0df37119a71c
3
+ size 601
source/clubs/detail/000050.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d74d4d93e4e24744dd6420c428cba08c27fd45fd1d9cf956cdd40804a533df3e
3
+ size 579
source/clubs/detail/000051.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:244b17bd29bdf36014f4e44bd7b00b636ec0dbdc43f43b22660c8c4f5f9fd99c
3
+ size 590
source/clubs/detail/000052.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c311cb80cf42b5c298e132a1dba105fc725a9d159eb69b09e7b1204be65748b5
3
+ size 631
source/clubs/detail/000053.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fb4869ff527f96c6d5cf0202c70bb0e21bddd00246309b8e7597786fa940ca4
3
+ size 693
source/clubs/detail/000054.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e4dc299c05c68bf32e1175e3635ceeee5fbe7d460e89cb4bc117b03e00eb5cd9
3
+ size 607
source/clubs/detail/000055.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8507d367335f2734c48a91923420358cb9e7e960b3b6c14417ae975c92652a5c
3
+ size 505
source/clubs/detail/000056.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c1a91af05a6803553dc8f14dc8de99ef03b16235e731ab165d57f945484dd1f
3
+ size 501