FalseMemBench / scripts /run_bm25_benchmark.py
codysnider's picture
Clean public release surface
5ecbe81
#!/usr/bin/env python3
import argparse
import json
from pathlib import Path
from rank_bm25 import BM25Okapi
def load_cases(path: Path):
cases = []
with path.open() as f:
for line in f:
line = line.strip()
if line:
cases.append(json.loads(line))
return cases
def tokenize(text: str):
return [token.lower() for token in text.replace("?", " ").replace(",", " ").replace(".", " ").split() if token]
def run_case(case: dict):
corpus = [entry["text"] for entry in case["entries"]]
tokenized = [tokenize(doc) for doc in corpus]
bm25 = BM25Okapi(tokenized)
scores = bm25.get_scores(tokenize(case["query"]))
ranked = sorted(zip(scores, case["entries"]), key=lambda item: (-item[0], item[1]["id"]))
ranked_ids = [entry["id"] for _, entry in ranked]
relevant = set(case["relevant_ids"])
hit_at_1 = bool(ranked_ids[:1] and ranked_ids[0] in relevant)
hit_at_5 = any(item in relevant for item in ranked_ids[:5])
mrr = 0.0
for i, item in enumerate(ranked_ids):
if item in relevant:
mrr = 1.0 / float(i + 1)
break
return {
"id": case["id"],
"adversary_type": case["adversary_type"],
"query": case["query"],
"relevant_ids": case["relevant_ids"],
"ranked_ids": ranked_ids,
"hit_at_1": hit_at_1,
"hit_at_5": hit_at_5,
"mrr": mrr,
}
def summarize(results):
hits1 = hits5 = 0
total_mrr = 0.0
per_category = {}
for item in results:
hits1 += 1 if item["hit_at_1"] else 0
hits5 += 1 if item["hit_at_5"] else 0
total_mrr += item["mrr"]
bucket = per_category.setdefault(item["adversary_type"], {"hit": 0, "total": 0})
bucket["total"] += 1
bucket["hit"] += 1 if item["hit_at_5"] else 0
return {
"cases": len(results),
"recall_at_1": hits1 / len(results) if results else 0,
"recall_at_5": hits5 / len(results) if results else 0,
"mrr": total_mrr / len(results) if results else 0,
"per_category": {key: value["hit"] / value["total"] for key, value in per_category.items()},
"results": results,
}
def main() -> int:
parser = argparse.ArgumentParser(description="Run a BM25 baseline on FalseMemBench")
parser.add_argument("--data", default=str(Path(__file__).resolve().parents[1] / "data" / "cases.jsonl"))
parser.add_argument("--out", default="")
args = parser.parse_args()
cases = load_cases(Path(args.data))
results = [run_case(case) for case in cases]
report = summarize(results)
print(json.dumps({key: report[key] for key in ["cases", "recall_at_1", "recall_at_5", "mrr"]}, indent=2))
if args.out:
out = Path(args.out)
out.parent.mkdir(parents=True, exist_ok=True)
out.write_text(json.dumps(report, indent=2))
return 0
if __name__ == "__main__":
raise SystemExit(main())