FalseMemBench / scripts /run_dense_benchmark.py
codysnider's picture
Clean public release surface
5ecbe81
#!/usr/bin/env python3
import argparse
import json
from pathlib import Path
from sentence_transformers import SentenceTransformer
def load_cases(path: Path):
cases = []
with path.open() as f:
for line in f:
line = line.strip()
if line:
cases.append(json.loads(line))
return cases
def cosine_rank(model, query: str, entries: list[dict]):
texts = [query] + [entry["text"] for entry in entries]
embs = model.encode(texts, normalize_embeddings=True)
query_emb = embs[0]
doc_embs = embs[1:]
scores = (doc_embs @ query_emb).tolist()
ranked = sorted(zip(scores, entries), key=lambda item: (-item[0], item[1]["id"]))
return [entry["id"] for _, entry in ranked]
def score_case(case: dict, ranked_ids: list[str]):
relevant = set(case["relevant_ids"])
hit_at_1 = bool(ranked_ids[:1] and ranked_ids[0] in relevant)
hit_at_5 = any(item in relevant for item in ranked_ids[:5])
mrr = 0.0
for i, item in enumerate(ranked_ids):
if item in relevant:
mrr = 1.0 / float(i + 1)
break
return {
"id": case["id"],
"adversary_type": case["adversary_type"],
"query": case["query"],
"relevant_ids": case["relevant_ids"],
"ranked_ids": ranked_ids,
"hit_at_1": hit_at_1,
"hit_at_5": hit_at_5,
"mrr": mrr,
}
def summarize(results):
hits1 = hits5 = 0
total_mrr = 0.0
per_category = {}
for item in results:
hits1 += 1 if item["hit_at_1"] else 0
hits5 += 1 if item["hit_at_5"] else 0
total_mrr += item["mrr"]
bucket = per_category.setdefault(item["adversary_type"], {"hit": 0, "total": 0})
bucket["total"] += 1
bucket["hit"] += 1 if item["hit_at_5"] else 0
return {
"cases": len(results),
"recall_at_1": hits1 / len(results) if results else 0,
"recall_at_5": hits5 / len(results) if results else 0,
"mrr": total_mrr / len(results) if results else 0,
"per_category": {key: value["hit"] / value["total"] for key, value in per_category.items()},
"results": results,
}
def main() -> int:
parser = argparse.ArgumentParser(description="Run a dense retriever baseline on FalseMemBench")
parser.add_argument("--data", default=str(Path(__file__).resolve().parents[1] / "data" / "cases.jsonl"))
parser.add_argument("--model", required=True)
parser.add_argument("--out", default="")
args = parser.parse_args()
model = SentenceTransformer(args.model)
cases = load_cases(Path(args.data))
results = []
for case in cases:
ranked_ids = cosine_rank(model, case["query"], case["entries"])
results.append(score_case(case, ranked_ids))
report = summarize(results)
print(json.dumps({key: report[key] for key in ["cases", "recall_at_1", "recall_at_5", "mrr"]}, indent=2))
if args.out:
out = Path(args.out)
out.parent.mkdir(parents=True, exist_ok=True)
out.write_text(json.dumps(report, indent=2))
return 0
if __name__ == "__main__":
raise SystemExit(main())