Datasets:
Adds data for remaining sections.
Browse files- README.md +32 -0
- data/clubs.jsonl +3 -0
- data/designs.jsonl +3 -0
- data/glossary.jsonl +3 -0
- data/motors.jsonl +3 -0
- data/parts.jsonl +3 -0
- data/plans.jsonl +3 -0
- data/products.jsonl +3 -0
- scripts/clubs/02_build_data.py +96 -0
- scripts/designs/02_build_data.py +122 -0
- scripts/glossary/02_build_data.py +76 -0
- scripts/motors/02_build_data.py +101 -0
- scripts/parts/02_build_data.py +104 -0
- scripts/plans/02_build_data.py +87 -0
- scripts/products/02_build_data.py +97 -0
README.md
CHANGED
|
@@ -8,6 +8,10 @@ tags:
|
|
| 8 |
- aerospace
|
| 9 |
- simulation
|
| 10 |
configs:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
- config_name: reviews
|
| 12 |
data_files:
|
| 13 |
- split: train
|
|
@@ -16,6 +20,34 @@ configs:
|
|
| 16 |
data_files:
|
| 17 |
- split: train
|
| 18 |
path: data/flights.jsonl
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
---
|
| 20 |
|
| 21 |
# RocketReviews Dataset
|
|
|
|
| 8 |
- aerospace
|
| 9 |
- simulation
|
| 10 |
configs:
|
| 11 |
+
- config_name: default
|
| 12 |
+
data_files:
|
| 13 |
+
- split: train
|
| 14 |
+
path: data/reviews.jsonl
|
| 15 |
- config_name: reviews
|
| 16 |
data_files:
|
| 17 |
- split: train
|
|
|
|
| 20 |
data_files:
|
| 21 |
- split: train
|
| 22 |
path: data/flights.jsonl
|
| 23 |
+
- config_name: products
|
| 24 |
+
data_files:
|
| 25 |
+
- split: train
|
| 26 |
+
path: data/products.jsonl
|
| 27 |
+
- config_name: motors
|
| 28 |
+
data_files:
|
| 29 |
+
- split: train
|
| 30 |
+
path: data/motors.jsonl
|
| 31 |
+
- config_name: parts
|
| 32 |
+
data_files:
|
| 33 |
+
- split: train
|
| 34 |
+
path: data/parts.jsonl
|
| 35 |
+
- config_name: designs
|
| 36 |
+
data_files:
|
| 37 |
+
- split: train
|
| 38 |
+
path: data/designs.jsonl
|
| 39 |
+
- config_name: clubs
|
| 40 |
+
data_files:
|
| 41 |
+
- split: train
|
| 42 |
+
path: data/clubs.jsonl
|
| 43 |
+
- config_name: glossary
|
| 44 |
+
data_files:
|
| 45 |
+
- split: train
|
| 46 |
+
path: data/glossary.jsonl
|
| 47 |
+
- config_name: plans
|
| 48 |
+
data_files:
|
| 49 |
+
- split: train
|
| 50 |
+
path: data/plans.jsonl
|
| 51 |
---
|
| 52 |
|
| 53 |
# RocketReviews Dataset
|
data/clubs.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:819e3864780fb899fbcc884120f88563de03a61920b90f1f04c452df133b17aa
|
| 3 |
+
size 125866
|
data/designs.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eafc945a60b85c2bdfffa6c4db34856be31e7c4d55db4059b479225fd572e498
|
| 3 |
+
size 3910828
|
data/glossary.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0c10d357cd7180de37ed0c802ce6d73db3a69be366252db8128374fde355f49e
|
| 3 |
+
size 299270
|
data/motors.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5fd7db19d6d7777a0a1d34af235cd2739cccc6381548c61df8ffec548f3e2a6a
|
| 3 |
+
size 471885
|
data/parts.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ca1ad0958eaedc898b216089c4c66d7b5875178fd0504967dccbcd13f48869bb
|
| 3 |
+
size 5125543
|
data/plans.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f48f34e3f8bb446140518b1af1a999b180168e8c4b5c7cfb590ece486c2bdcd3
|
| 3 |
+
size 73385
|
data/products.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7c6995ca4f63a6e495a02608140121a334d9c2a5d9f61fc5ce929bc16006045b
|
| 3 |
+
size 1663265
|
scripts/clubs/02_build_data.py
ADDED
|
@@ -0,0 +1,96 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# ---------------------------------------------------------------------------
|
| 7 |
+
# Config
|
| 8 |
+
# ---------------------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
ROOT = Path(__file__).parent.parent.parent
|
| 11 |
+
SOURCE_DIR = ROOT / "source" / "clubs" / "detail"
|
| 12 |
+
OUTPUT_FILE = ROOT / "data" / "clubs.jsonl"
|
| 13 |
+
PREFIX = "club"
|
| 14 |
+
|
| 15 |
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
| 16 |
+
log = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
# ---------------------------------------------------------------------------
|
| 19 |
+
# Mapper
|
| 20 |
+
# ---------------------------------------------------------------------------
|
| 21 |
+
|
| 22 |
+
def transform_club(data: dict) -> dict:
|
| 23 |
+
"""Flatten club JSON into a ChromaDB-ready format."""
|
| 24 |
+
|
| 25 |
+
chroma_id = f"{PREFIX}:{int(data['id']):06d}"
|
| 26 |
+
|
| 27 |
+
# Build searchable document text
|
| 28 |
+
parts = []
|
| 29 |
+
name = data.get('name')
|
| 30 |
+
location = data.get('location') or f"{data.get('city')}, {data.get('state')}"
|
| 31 |
+
|
| 32 |
+
summary = f"Rocketry Club: {name}"
|
| 33 |
+
if location: summary += f" located in {location}"
|
| 34 |
+
summary += "."
|
| 35 |
+
parts.append(summary)
|
| 36 |
+
|
| 37 |
+
affiliations = []
|
| 38 |
+
if data.get("has_nar"): affiliations.append(f"NAR Section {data.get('nar_section') or ''}")
|
| 39 |
+
if data.get("has_tripoli"): affiliations.append(f"Tripoli Prefecture {data.get('tripoli_prefecture') or ''}")
|
| 40 |
+
if affiliations:
|
| 41 |
+
parts.append("Affiliations: " + ", ".join(affiliations).strip() + ".")
|
| 42 |
+
|
| 43 |
+
if data.get("description"):
|
| 44 |
+
parts.append(data["description"])
|
| 45 |
+
|
| 46 |
+
document = " ".join(parts)
|
| 47 |
+
|
| 48 |
+
# Flatten metadata
|
| 49 |
+
metadata = {
|
| 50 |
+
"id": data["id"],
|
| 51 |
+
"name": name,
|
| 52 |
+
"city": data.get("city"),
|
| 53 |
+
"state": data.get("state"),
|
| 54 |
+
"country": data.get("country"),
|
| 55 |
+
"has_nar": data.get("has_nar"),
|
| 56 |
+
"has_tripoli": data.get("has_tripoli"),
|
| 57 |
+
"url": data.get("url")
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
metadata = {k: v for k, v in metadata.items() if v is not None}
|
| 61 |
+
|
| 62 |
+
return {
|
| 63 |
+
"id": chroma_id,
|
| 64 |
+
"document": document,
|
| 65 |
+
"metadata": metadata
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
# ---------------------------------------------------------------------------
|
| 69 |
+
# Main
|
| 70 |
+
# ---------------------------------------------------------------------------
|
| 71 |
+
|
| 72 |
+
def main():
|
| 73 |
+
if not SOURCE_DIR.exists():
|
| 74 |
+
log.error(f"Source directory {SOURCE_DIR} not found.")
|
| 75 |
+
return
|
| 76 |
+
|
| 77 |
+
OUTPUT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 78 |
+
|
| 79 |
+
count = 0
|
| 80 |
+
with OUTPUT_FILE.open("w", encoding="utf-8") as out:
|
| 81 |
+
for shard_dir in sorted(SOURCE_DIR.iterdir()):
|
| 82 |
+
if not shard_dir.is_dir(): continue
|
| 83 |
+
for file_path in sorted(shard_dir.glob("*.json")):
|
| 84 |
+
try:
|
| 85 |
+
with file_path.open("r", encoding="utf-8") as f:
|
| 86 |
+
raw_data = json.load(f)
|
| 87 |
+
processed = transform_club(raw_data)
|
| 88 |
+
out.write(json.dumps(processed, ensure_ascii=False) + "\n")
|
| 89 |
+
count += 1
|
| 90 |
+
except Exception as e:
|
| 91 |
+
log.error(f"Error processing {file_path}: {e}")
|
| 92 |
+
|
| 93 |
+
log.info(f"Successfully built {count} records in {OUTPUT_FILE}")
|
| 94 |
+
|
| 95 |
+
if __name__ == "__main__":
|
| 96 |
+
main()
|
scripts/designs/02_build_data.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# ---------------------------------------------------------------------------
|
| 7 |
+
# Config
|
| 8 |
+
# ---------------------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
ROOT = Path(__file__).parent.parent.parent
|
| 11 |
+
SOURCE_DIR = ROOT / "source" / "designs" / "detail"
|
| 12 |
+
DEEP_PARSED_DIR = ROOT / "source" / "designs" / "files" / "json"
|
| 13 |
+
OUTPUT_FILE = ROOT / "data" / "designs.jsonl"
|
| 14 |
+
PREFIX = "design"
|
| 15 |
+
|
| 16 |
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
| 17 |
+
log = logging.getLogger(__name__)
|
| 18 |
+
|
| 19 |
+
# ---------------------------------------------------------------------------
|
| 20 |
+
# Mapper
|
| 21 |
+
# ---------------------------------------------------------------------------
|
| 22 |
+
|
| 23 |
+
def transform_design(data: dict, deep_data: dict = None) -> dict:
|
| 24 |
+
"""Flatten design JSON into a ChromaDB-ready format."""
|
| 25 |
+
|
| 26 |
+
chroma_id = f"{PREFIX}:{int(data['id']):06d}"
|
| 27 |
+
|
| 28 |
+
# Build searchable document text
|
| 29 |
+
parts = []
|
| 30 |
+
title = data.get('title') or data.get('name')
|
| 31 |
+
mfr = data.get('manufacturer')
|
| 32 |
+
fmt = data.get('format')
|
| 33 |
+
|
| 34 |
+
summary = f"Rocket Design: {title}"
|
| 35 |
+
if mfr: summary += f" (Manufacturer: {mfr})"
|
| 36 |
+
summary += f" in {fmt} format."
|
| 37 |
+
parts.append(summary)
|
| 38 |
+
|
| 39 |
+
designer_obj = data.get("designer")
|
| 40 |
+
if isinstance(designer_obj, dict) and designer_obj.get("name"):
|
| 41 |
+
parts.append(f"Designed by {designer_obj['name']}.")
|
| 42 |
+
|
| 43 |
+
if data.get("comments"):
|
| 44 |
+
parts.append(f"Comments: {data['comments']}")
|
| 45 |
+
|
| 46 |
+
# Stability metrics
|
| 47 |
+
metrics = []
|
| 48 |
+
cg = data.get("cg")
|
| 49 |
+
if isinstance(cg, dict) and cg.get("location_in"):
|
| 50 |
+
metrics.append(f"CG: {cg['location_in']} in")
|
| 51 |
+
|
| 52 |
+
cp = data.get("cp")
|
| 53 |
+
if isinstance(cp, dict) and cp.get("location_in"):
|
| 54 |
+
metrics.append(f"CP: {cp['location_in']} in")
|
| 55 |
+
|
| 56 |
+
if data.get("margin"):
|
| 57 |
+
metrics.append(f"Margin: {data['margin']} {data.get('margin_status', '')}")
|
| 58 |
+
|
| 59 |
+
if metrics:
|
| 60 |
+
parts.append("Metrics: " + ", ".join(metrics) + ".")
|
| 61 |
+
|
| 62 |
+
# Parts List (Relationship)
|
| 63 |
+
if data.get("parts"):
|
| 64 |
+
parts.append(f"Includes components: {', '.join(data['parts'])}.")
|
| 65 |
+
|
| 66 |
+
document = " ".join(parts)
|
| 67 |
+
|
| 68 |
+
# Flatten metadata
|
| 69 |
+
metadata = {
|
| 70 |
+
"id": data["id"],
|
| 71 |
+
"format": fmt,
|
| 72 |
+
"manufacturer": mfr,
|
| 73 |
+
"designer_name": designer_obj.get("name") if isinstance(designer_obj, dict) else None,
|
| 74 |
+
"stage_count": deep_data.get("stage_count") if isinstance(deep_data, dict) else None,
|
| 75 |
+
"url": data.get("url")
|
| 76 |
+
}
|
| 77 |
+
|
| 78 |
+
metadata = {k: v for k, v in metadata.items() if v is not None}
|
| 79 |
+
|
| 80 |
+
return {
|
| 81 |
+
"id": chroma_id,
|
| 82 |
+
"document": document,
|
| 83 |
+
"metadata": metadata
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
# ---------------------------------------------------------------------------
|
| 87 |
+
# Main
|
| 88 |
+
# ---------------------------------------------------------------------------
|
| 89 |
+
|
| 90 |
+
def main():
|
| 91 |
+
if not SOURCE_DIR.exists():
|
| 92 |
+
log.error(f"Source directory {SOURCE_DIR} not found.")
|
| 93 |
+
return
|
| 94 |
+
|
| 95 |
+
OUTPUT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 96 |
+
|
| 97 |
+
count = 0
|
| 98 |
+
with OUTPUT_FILE.open("w", encoding="utf-8") as out:
|
| 99 |
+
for shard_dir in sorted(SOURCE_DIR.iterdir()):
|
| 100 |
+
if not shard_dir.is_dir(): continue
|
| 101 |
+
for file_path in sorted(shard_dir.glob("*.json")):
|
| 102 |
+
try:
|
| 103 |
+
with file_path.open("r", encoding="utf-8") as f:
|
| 104 |
+
raw_data = json.load(f)
|
| 105 |
+
|
| 106 |
+
# Try to find deep parsed data
|
| 107 |
+
deep_data = None
|
| 108 |
+
deep_path = DEEP_PARSED_DIR / f"{int(raw_data['id']):06d}.json"
|
| 109 |
+
if deep_path.exists():
|
| 110 |
+
with deep_path.open("r", encoding="utf-8") as df:
|
| 111 |
+
deep_data = json.load(df)
|
| 112 |
+
|
| 113 |
+
processed = transform_design(raw_data, deep_data)
|
| 114 |
+
out.write(json.dumps(processed, ensure_ascii=False) + "\n")
|
| 115 |
+
count += 1
|
| 116 |
+
except Exception as e:
|
| 117 |
+
log.error(f"Error processing {file_path}: {e}")
|
| 118 |
+
|
| 119 |
+
log.info(f"Successfully built {count} records in {OUTPUT_FILE}")
|
| 120 |
+
|
| 121 |
+
if __name__ == "__main__":
|
| 122 |
+
main()
|
scripts/glossary/02_build_data.py
ADDED
|
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# ---------------------------------------------------------------------------
|
| 7 |
+
# Config
|
| 8 |
+
# ---------------------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
ROOT = Path(__file__).parent.parent.parent
|
| 11 |
+
SOURCE_DIR = ROOT / "source" / "glossary" / "detail"
|
| 12 |
+
OUTPUT_FILE = ROOT / "data" / "glossary.jsonl"
|
| 13 |
+
PREFIX = "glossary"
|
| 14 |
+
|
| 15 |
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
| 16 |
+
log = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
# ---------------------------------------------------------------------------
|
| 19 |
+
# Mapper
|
| 20 |
+
# ---------------------------------------------------------------------------
|
| 21 |
+
|
| 22 |
+
def transform_glossary(data: dict) -> dict:
|
| 23 |
+
"""Flatten glossary JSON into a ChromaDB-ready format."""
|
| 24 |
+
|
| 25 |
+
chroma_id = f"{PREFIX}:{data['slug']}"
|
| 26 |
+
|
| 27 |
+
# Searchable text is the term + full definition
|
| 28 |
+
term = data.get('term')
|
| 29 |
+
definition = data.get('description') or data.get('short_description')
|
| 30 |
+
|
| 31 |
+
document = f"{term}: {definition}"
|
| 32 |
+
|
| 33 |
+
# Metadata
|
| 34 |
+
metadata = {
|
| 35 |
+
"slug": data["slug"],
|
| 36 |
+
"term": term,
|
| 37 |
+
"url": data.get("url")
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
metadata = {k: v for k, v in metadata.items() if v is not None}
|
| 41 |
+
|
| 42 |
+
return {
|
| 43 |
+
"id": chroma_id,
|
| 44 |
+
"document": document,
|
| 45 |
+
"metadata": metadata
|
| 46 |
+
}
|
| 47 |
+
|
| 48 |
+
# ---------------------------------------------------------------------------
|
| 49 |
+
# Main
|
| 50 |
+
# ---------------------------------------------------------------------------
|
| 51 |
+
|
| 52 |
+
def main():
|
| 53 |
+
if not SOURCE_DIR.exists():
|
| 54 |
+
log.error(f"Source directory {SOURCE_DIR} not found.")
|
| 55 |
+
return
|
| 56 |
+
|
| 57 |
+
OUTPUT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 58 |
+
|
| 59 |
+
count = 0
|
| 60 |
+
with OUTPUT_FILE.open("w", encoding="utf-8") as out:
|
| 61 |
+
for shard_dir in sorted(SOURCE_DIR.iterdir()):
|
| 62 |
+
if not shard_dir.is_dir(): continue
|
| 63 |
+
for file_path in sorted(shard_dir.glob("*.json")):
|
| 64 |
+
try:
|
| 65 |
+
with file_path.open("r", encoding="utf-8") as f:
|
| 66 |
+
raw_data = json.load(f)
|
| 67 |
+
processed = transform_glossary(raw_data)
|
| 68 |
+
out.write(json.dumps(processed, ensure_ascii=False) + "\n")
|
| 69 |
+
count += 1
|
| 70 |
+
except Exception as e:
|
| 71 |
+
log.error(f"Error processing {file_path}: {e}")
|
| 72 |
+
|
| 73 |
+
log.info(f"Successfully built {count} records in {OUTPUT_FILE}")
|
| 74 |
+
|
| 75 |
+
if __name__ == "__main__":
|
| 76 |
+
main()
|
scripts/motors/02_build_data.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# ---------------------------------------------------------------------------
|
| 7 |
+
# Config
|
| 8 |
+
# ---------------------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
ROOT = Path(__file__).parent.parent.parent
|
| 11 |
+
SOURCE_DIR = ROOT / "source" / "motors" / "detail"
|
| 12 |
+
OUTPUT_FILE = ROOT / "data" / "motors.jsonl"
|
| 13 |
+
PREFIX = "motor"
|
| 14 |
+
|
| 15 |
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
| 16 |
+
log = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
# ---------------------------------------------------------------------------
|
| 19 |
+
# Mapper
|
| 20 |
+
# ---------------------------------------------------------------------------
|
| 21 |
+
|
| 22 |
+
def transform_motor(data: dict) -> dict:
|
| 23 |
+
"""Flatten motor JSON into a ChromaDB-ready format."""
|
| 24 |
+
|
| 25 |
+
chroma_id = f"{PREFIX}:{int(data['id']):06d}"
|
| 26 |
+
|
| 27 |
+
# Build searchable document text
|
| 28 |
+
parts = []
|
| 29 |
+
title = data.get('title') or data.get('designation')
|
| 30 |
+
mfr = data.get('manufacturer')
|
| 31 |
+
|
| 32 |
+
summary = f"Rocket Motor: {title}"
|
| 33 |
+
if mfr: summary += f" manufactured by {mfr}"
|
| 34 |
+
summary += "."
|
| 35 |
+
parts.append(summary)
|
| 36 |
+
|
| 37 |
+
perf = data.get("performance", {})
|
| 38 |
+
phys = data.get("physical", {})
|
| 39 |
+
|
| 40 |
+
stats = []
|
| 41 |
+
if data.get("letter"): stats.append(f"Class: {data['letter']}")
|
| 42 |
+
if perf.get("total_impulse_ns"): stats.append(f"Total Impulse: {perf['total_impulse_ns']} Ns")
|
| 43 |
+
if perf.get("average_thrust_n"): stats.append(f"Average Thrust: {perf['average_thrust_n']} N")
|
| 44 |
+
if phys.get("diameter_mm"): stats.append(f"Diameter: {phys['diameter_mm']} mm")
|
| 45 |
+
if stats:
|
| 46 |
+
parts.append("Performance: " + ", ".join(stats) + ".")
|
| 47 |
+
|
| 48 |
+
if data.get("description"):
|
| 49 |
+
parts.append(f"Description: {data['description']}")
|
| 50 |
+
|
| 51 |
+
document = " ".join(parts)
|
| 52 |
+
|
| 53 |
+
# Flatten metadata
|
| 54 |
+
metadata = {
|
| 55 |
+
"id": data["id"],
|
| 56 |
+
"designation": data.get("designation"),
|
| 57 |
+
"manufacturer": mfr,
|
| 58 |
+
"letter": data.get("letter"),
|
| 59 |
+
"diameter_mm": phys.get("diameter_mm"),
|
| 60 |
+
"total_impulse_ns": perf.get("total_impulse_ns"),
|
| 61 |
+
"average_thrust_n": perf.get("average_thrust_n"),
|
| 62 |
+
"url": data.get("url")
|
| 63 |
+
}
|
| 64 |
+
|
| 65 |
+
metadata = {k: v for k, v in metadata.items() if v is not None}
|
| 66 |
+
|
| 67 |
+
return {
|
| 68 |
+
"id": chroma_id,
|
| 69 |
+
"document": document,
|
| 70 |
+
"metadata": metadata
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
# ---------------------------------------------------------------------------
|
| 74 |
+
# Main
|
| 75 |
+
# ---------------------------------------------------------------------------
|
| 76 |
+
|
| 77 |
+
def main():
|
| 78 |
+
if not SOURCE_DIR.exists():
|
| 79 |
+
log.error(f"Source directory {SOURCE_DIR} not found.")
|
| 80 |
+
return
|
| 81 |
+
|
| 82 |
+
OUTPUT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 83 |
+
|
| 84 |
+
count = 0
|
| 85 |
+
with OUTPUT_FILE.open("w", encoding="utf-8") as out:
|
| 86 |
+
for shard_dir in sorted(SOURCE_DIR.iterdir()):
|
| 87 |
+
if not shard_dir.is_dir(): continue
|
| 88 |
+
for file_path in sorted(shard_dir.glob("*.json")):
|
| 89 |
+
try:
|
| 90 |
+
with file_path.open("r", encoding="utf-8") as f:
|
| 91 |
+
raw_data = json.load(f)
|
| 92 |
+
processed = transform_motor(raw_data)
|
| 93 |
+
out.write(json.dumps(processed, ensure_ascii=False) + "\n")
|
| 94 |
+
count += 1
|
| 95 |
+
except Exception as e:
|
| 96 |
+
log.error(f"Error processing {file_path}: {e}")
|
| 97 |
+
|
| 98 |
+
log.info(f"Successfully built {count} records in {OUTPUT_FILE}")
|
| 99 |
+
|
| 100 |
+
if __name__ == "__main__":
|
| 101 |
+
main()
|
scripts/parts/02_build_data.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# ---------------------------------------------------------------------------
|
| 7 |
+
# Config
|
| 8 |
+
# ---------------------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
ROOT = Path(__file__).parent.parent.parent
|
| 11 |
+
SOURCE_DIR = ROOT / "source" / "parts" / "detail"
|
| 12 |
+
OUTPUT_FILE = ROOT / "data" / "parts.jsonl"
|
| 13 |
+
PREFIX = "part"
|
| 14 |
+
|
| 15 |
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
| 16 |
+
log = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
# ---------------------------------------------------------------------------
|
| 19 |
+
# Mapper
|
| 20 |
+
# ---------------------------------------------------------------------------
|
| 21 |
+
|
| 22 |
+
def transform_part(data: dict) -> dict:
|
| 23 |
+
"""Flatten part JSON into a ChromaDB-ready format."""
|
| 24 |
+
|
| 25 |
+
chroma_id = f"{PREFIX}:{data['slug']}"
|
| 26 |
+
|
| 27 |
+
# Build searchable document text
|
| 28 |
+
parts = []
|
| 29 |
+
title = data.get('title')
|
| 30 |
+
cat = data.get('category')
|
| 31 |
+
mfr = data.get('manufacturer')
|
| 32 |
+
mat = data.get('material')
|
| 33 |
+
|
| 34 |
+
summary = f"Rocket Part: {title}"
|
| 35 |
+
if cat: summary += f" ({cat})"
|
| 36 |
+
if mfr: summary += f" manufactured by {mfr}"
|
| 37 |
+
summary += "."
|
| 38 |
+
parts.append(summary)
|
| 39 |
+
|
| 40 |
+
# Add physical specs dynamically
|
| 41 |
+
specs = []
|
| 42 |
+
if mat: specs.append(f"material: {mat}")
|
| 43 |
+
|
| 44 |
+
# Capture dimensions (diameter, length, chords, etc)
|
| 45 |
+
for key, val in data.items():
|
| 46 |
+
if key in ["slug", "category", "title", "manufacturer", "manufacturer_url", "material", "related_designs", "scraped_at", "url"]:
|
| 47 |
+
continue
|
| 48 |
+
specs.append(f"{key.replace('_', ' ')}: {val}")
|
| 49 |
+
|
| 50 |
+
if specs:
|
| 51 |
+
parts.append("Specifications: " + ", ".join(specs) + ".")
|
| 52 |
+
|
| 53 |
+
if data.get("related_designs"):
|
| 54 |
+
designs = [d['name'] for d in data['related_designs']]
|
| 55 |
+
parts.append(f"Featured in designs: {', '.join(designs)}.")
|
| 56 |
+
|
| 57 |
+
document = " ".join(parts)
|
| 58 |
+
|
| 59 |
+
# Flatten metadata
|
| 60 |
+
metadata = {
|
| 61 |
+
"slug": data["slug"],
|
| 62 |
+
"category": cat,
|
| 63 |
+
"manufacturer": mfr,
|
| 64 |
+
"material": mat,
|
| 65 |
+
"url": data.get("url")
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
metadata = {k: v for k, v in metadata.items() if v is not None}
|
| 69 |
+
|
| 70 |
+
return {
|
| 71 |
+
"id": chroma_id,
|
| 72 |
+
"document": document,
|
| 73 |
+
"metadata": metadata
|
| 74 |
+
}
|
| 75 |
+
|
| 76 |
+
# ---------------------------------------------------------------------------
|
| 77 |
+
# Main
|
| 78 |
+
# ---------------------------------------------------------------------------
|
| 79 |
+
|
| 80 |
+
def main():
|
| 81 |
+
if not SOURCE_DIR.exists():
|
| 82 |
+
log.error(f"Source directory {SOURCE_DIR} not found.")
|
| 83 |
+
return
|
| 84 |
+
|
| 85 |
+
OUTPUT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 86 |
+
|
| 87 |
+
count = 0
|
| 88 |
+
with OUTPUT_FILE.open("w", encoding="utf-8") as out:
|
| 89 |
+
for shard_dir in sorted(SOURCE_DIR.iterdir()):
|
| 90 |
+
if not shard_dir.is_dir(): continue
|
| 91 |
+
for file_path in sorted(shard_dir.glob("*.json")):
|
| 92 |
+
try:
|
| 93 |
+
with file_path.open("r", encoding="utf-8") as f:
|
| 94 |
+
raw_data = json.load(f)
|
| 95 |
+
processed = transform_part(raw_data)
|
| 96 |
+
out.write(json.dumps(processed, ensure_ascii=False) + "\n")
|
| 97 |
+
count += 1
|
| 98 |
+
except Exception as e:
|
| 99 |
+
log.error(f"Error processing {file_path}: {e}")
|
| 100 |
+
|
| 101 |
+
log.info(f"Successfully built {count} records in {OUTPUT_FILE}")
|
| 102 |
+
|
| 103 |
+
if __name__ == "__main__":
|
| 104 |
+
main()
|
scripts/plans/02_build_data.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# ---------------------------------------------------------------------------
|
| 7 |
+
# Config
|
| 8 |
+
# ---------------------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
ROOT = Path(__file__).parent.parent.parent
|
| 11 |
+
SOURCE_DIR = ROOT / "source" / "plans" / "detail"
|
| 12 |
+
OUTPUT_FILE = ROOT / "data" / "plans.jsonl"
|
| 13 |
+
PREFIX = "plan"
|
| 14 |
+
|
| 15 |
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
| 16 |
+
log = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
# ---------------------------------------------------------------------------
|
| 19 |
+
# Mapper
|
| 20 |
+
# ---------------------------------------------------------------------------
|
| 21 |
+
|
| 22 |
+
def transform_plan(data: dict) -> dict:
|
| 23 |
+
"""Flatten plan JSON into a ChromaDB-ready format."""
|
| 24 |
+
|
| 25 |
+
chroma_id = f"{PREFIX}:{data['slug']}"
|
| 26 |
+
|
| 27 |
+
# Searchable text: Title, source, and style
|
| 28 |
+
title = data.get('title')
|
| 29 |
+
source = data.get('source')
|
| 30 |
+
style = data.get('style')
|
| 31 |
+
site = data.get('site', {}).get('name')
|
| 32 |
+
|
| 33 |
+
summary = f"Rocket Plan: {title}"
|
| 34 |
+
if source: summary += f" (Source: {source})"
|
| 35 |
+
summary += f" in {style or 'Standard'} style."
|
| 36 |
+
if site: summary += f" Hosted on {site}."
|
| 37 |
+
|
| 38 |
+
document = summary
|
| 39 |
+
|
| 40 |
+
# Metadata
|
| 41 |
+
metadata = {
|
| 42 |
+
"slug": data["slug"],
|
| 43 |
+
"title": title,
|
| 44 |
+
"source": source,
|
| 45 |
+
"style": style,
|
| 46 |
+
"site_name": site,
|
| 47 |
+
"url": data.get("url"),
|
| 48 |
+
"external_url": data.get("site", {}).get("url")
|
| 49 |
+
}
|
| 50 |
+
|
| 51 |
+
metadata = {k: v for k, v in metadata.items() if v is not None}
|
| 52 |
+
|
| 53 |
+
return {
|
| 54 |
+
"id": chroma_id,
|
| 55 |
+
"document": document,
|
| 56 |
+
"metadata": metadata
|
| 57 |
+
}
|
| 58 |
+
|
| 59 |
+
# ---------------------------------------------------------------------------
|
| 60 |
+
# Main
|
| 61 |
+
# ---------------------------------------------------------------------------
|
| 62 |
+
|
| 63 |
+
def main():
|
| 64 |
+
if not SOURCE_DIR.exists():
|
| 65 |
+
log.error(f"Source directory {SOURCE_DIR} not found.")
|
| 66 |
+
return
|
| 67 |
+
|
| 68 |
+
OUTPUT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 69 |
+
|
| 70 |
+
count = 0
|
| 71 |
+
with OUTPUT_FILE.open("w", encoding="utf-8") as out:
|
| 72 |
+
for shard_dir in sorted(SOURCE_DIR.iterdir()):
|
| 73 |
+
if not shard_dir.is_dir(): continue
|
| 74 |
+
for file_path in sorted(shard_dir.glob("*.json")):
|
| 75 |
+
try:
|
| 76 |
+
with file_path.open("r", encoding="utf-8") as f:
|
| 77 |
+
raw_data = json.load(f)
|
| 78 |
+
processed = transform_plan(raw_data)
|
| 79 |
+
out.write(json.dumps(processed, ensure_ascii=False) + "\n")
|
| 80 |
+
count += 1
|
| 81 |
+
except Exception as e:
|
| 82 |
+
log.error(f"Error processing {file_path}: {e}")
|
| 83 |
+
|
| 84 |
+
log.info(f"Successfully built {count} records in {OUTPUT_FILE}")
|
| 85 |
+
|
| 86 |
+
if __name__ == "__main__":
|
| 87 |
+
main()
|
scripts/products/02_build_data.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
import json
|
| 3 |
+
import logging
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
|
| 6 |
+
# ---------------------------------------------------------------------------
|
| 7 |
+
# Config
|
| 8 |
+
# ---------------------------------------------------------------------------
|
| 9 |
+
|
| 10 |
+
ROOT = Path(__file__).parent.parent.parent
|
| 11 |
+
SOURCE_DIR = ROOT / "source" / "products" / "detail"
|
| 12 |
+
OUTPUT_FILE = ROOT / "data" / "products.jsonl"
|
| 13 |
+
PREFIX = "product"
|
| 14 |
+
|
| 15 |
+
logging.basicConfig(level=logging.INFO, format="%(message)s")
|
| 16 |
+
log = logging.getLogger(__name__)
|
| 17 |
+
|
| 18 |
+
# ---------------------------------------------------------------------------
|
| 19 |
+
# Mapper
|
| 20 |
+
# ---------------------------------------------------------------------------
|
| 21 |
+
|
| 22 |
+
def transform_product(data: dict) -> dict:
|
| 23 |
+
"""Flatten product JSON into a ChromaDB-ready format."""
|
| 24 |
+
|
| 25 |
+
chroma_id = f"{PREFIX}:{int(data['id']):06d}"
|
| 26 |
+
|
| 27 |
+
# Build searchable document text
|
| 28 |
+
parts = []
|
| 29 |
+
title = data.get('title') or data.get('name')
|
| 30 |
+
mfr = data.get('manufacturer', {}).get('name')
|
| 31 |
+
|
| 32 |
+
summary = f"Product: {title}"
|
| 33 |
+
if mfr: summary += f" manufactured by {mfr}"
|
| 34 |
+
summary += "."
|
| 35 |
+
parts.append(summary)
|
| 36 |
+
|
| 37 |
+
specs = data.get("specs")
|
| 38 |
+
if specs:
|
| 39 |
+
spec_parts = []
|
| 40 |
+
if specs.get("diameter_in"): spec_parts.append(f"diameter: {specs['diameter_in']} in")
|
| 41 |
+
if specs.get("length_in"): spec_parts.append(f"length: {specs['length_in']} in")
|
| 42 |
+
if specs.get("power_class"): spec_parts.append(f"power class: {specs['power_class']}")
|
| 43 |
+
if specs.get("skill_level"): spec_parts.append(f"skill level: {specs['skill_level']}")
|
| 44 |
+
if specs.get("recovery"): spec_parts.append(f"recovery: {specs['recovery']}")
|
| 45 |
+
if spec_parts:
|
| 46 |
+
parts.append("Specifications: " + ", ".join(spec_parts) + ".")
|
| 47 |
+
|
| 48 |
+
document = " ".join(parts)
|
| 49 |
+
|
| 50 |
+
# Flatten metadata
|
| 51 |
+
metadata = {
|
| 52 |
+
"id": data["id"],
|
| 53 |
+
"name": data.get("name"),
|
| 54 |
+
"type": data.get("type"),
|
| 55 |
+
"manufacturer_name": mfr,
|
| 56 |
+
"power_class": specs.get("power_class") if specs else None,
|
| 57 |
+
"skill_level": specs.get("skill_level") if specs else None,
|
| 58 |
+
"url": data.get("url")
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
metadata = {k: v for k, v in metadata.items() if v is not None}
|
| 62 |
+
|
| 63 |
+
return {
|
| 64 |
+
"id": chroma_id,
|
| 65 |
+
"document": document,
|
| 66 |
+
"metadata": metadata
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
# ---------------------------------------------------------------------------
|
| 70 |
+
# Main
|
| 71 |
+
# ---------------------------------------------------------------------------
|
| 72 |
+
|
| 73 |
+
def main():
|
| 74 |
+
if not SOURCE_DIR.exists():
|
| 75 |
+
log.error(f"Source directory {SOURCE_DIR} not found.")
|
| 76 |
+
return
|
| 77 |
+
|
| 78 |
+
OUTPUT_FILE.parent.mkdir(parents=True, exist_ok=True)
|
| 79 |
+
|
| 80 |
+
count = 0
|
| 81 |
+
with OUTPUT_FILE.open("w", encoding="utf-8") as out:
|
| 82 |
+
for shard_dir in sorted(SOURCE_DIR.iterdir()):
|
| 83 |
+
if not shard_dir.is_dir(): continue
|
| 84 |
+
for file_path in sorted(shard_dir.glob("*.json")):
|
| 85 |
+
try:
|
| 86 |
+
with file_path.open("r", encoding="utf-8") as f:
|
| 87 |
+
raw_data = json.load(f)
|
| 88 |
+
processed = transform_product(raw_data)
|
| 89 |
+
out.write(json.dumps(processed, ensure_ascii=False) + "\n")
|
| 90 |
+
count += 1
|
| 91 |
+
except Exception as e:
|
| 92 |
+
log.error(f"Error processing {file_path}: {e}")
|
| 93 |
+
|
| 94 |
+
log.info(f"Successfully built {count} records in {OUTPUT_FILE}")
|
| 95 |
+
|
| 96 |
+
if __name__ == "__main__":
|
| 97 |
+
main()
|