Add AuthorityBench dataset
Browse filesUploads the AuthorityBench dataset for evaluating LLM's authority perception ability in RAG systems.
Three sub-datasets:
- DomainAuth: 10K web domains annotated with PageRank-based authority scores.
- EntityAuth: 22K entities (across basketball, movies, and songs) annotated with popularity-based authority (Wikipedia sitelinks).
- RAGAuth: 120 yes/no queries paired with retrieved documents of varying authority, designed to evaluate the practical impact of authority perception on downstream RAG tasks.
This view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +6 -0
- AuthorityBench/dataset/DomainAuth/corpus/corpus_10_scale.jsonl +0 -0
- AuthorityBench/dataset/DomainAuth/corpus/corpus_10_scale_with_text.jsonl +3 -0
- AuthorityBench/dataset/DomainAuth/corpus/corpus_5_scale.jsonl +0 -0
- AuthorityBench/dataset/DomainAuth/corpus/corpus_5_scale_with_text.jsonl +3 -0
- AuthorityBench/dataset/DomainAuth/listwise/listwise_10_scale_no_text.jsonl +0 -0
- AuthorityBench/dataset/DomainAuth/listwise/listwise_10_scale_with_text.jsonl +3 -0
- AuthorityBench/dataset/DomainAuth/listwise/listwise_5_scale_with_text.jsonl +3 -0
- AuthorityBench/dataset/DomainAuth/pairwise/easy_pairs.jsonl +3 -0
- AuthorityBench/dataset/DomainAuth/pairwise/hard_pairs.jsonl +3 -0
- AuthorityBench/dataset/EntityAuth/corpus/basketball_10_scale.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/corpus/basketball_5_scale.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/corpus/movies_10_scale.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/corpus/movies_5_scale.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/corpus/songs_10_scale.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/corpus/songs_5_scale.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/listwise/basketball_list_10.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/listwise/basketball_list_5.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/listwise/movies_list_10.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/listwise/movies_list_5.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/listwise/songs_list_10.jsonl +0 -0
- AuthorityBench/dataset/EntityAuth/listwise/songs_list_5.jsonl +0 -0
- AuthorityBench/dataset/RAGAuth/authrag.jsonl +0 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/ListJudge_ListRank.py +66 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/ListJudge_ListRank_with_text.py +138 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/ListJudge_PointScore.py +66 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/ListJudge_PointScore_with_text.py +123 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PairJudge_PairRank.py +163 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PairJudge_PairRank_with_text.py +165 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PairJudge_PointScore.py +92 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PairJudge_PointScore_with_text.py +155 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PointJudge.py +70 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PointJudge_with_text.py +104 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_ListRank_ba.py +69 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_ListRank_mo.py +69 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_ListRank_so.py +69 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_PointScore_ba.py +78 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_PointScore_mo.py +78 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_PointScore_so.py +78 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PairRank_ba.py +157 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PairRank_mo.py +157 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PairRank_so.py +157 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PointScore_ba.py +114 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PointScore_mo.py +114 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PointScore_so.py +114 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PointJudge_ba.py +69 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PointJudge_mo.py +69 -0
- AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PointJudge_so.py +65 -0
- AuthorityBench/judge_and_eval/eval/eval_list.py +129 -0
- AuthorityBench/judge_and_eval/eval/eval_pair.py +51 -0
.gitattributes
CHANGED
|
@@ -58,3 +58,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 58 |
# Video files - compressed
|
| 59 |
*.mp4 filter=lfs diff=lfs merge=lfs -text
|
| 60 |
*.webm filter=lfs diff=lfs merge=lfs -text
|
| 61 |
+
AuthorityBench/dataset/DomainAuth/corpus/corpus_10_scale_with_text.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 62 |
+
AuthorityBench/dataset/DomainAuth/corpus/corpus_5_scale_with_text.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 63 |
+
AuthorityBench/dataset/DomainAuth/listwise/listwise_10_scale_with_text.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 64 |
+
AuthorityBench/dataset/DomainAuth/listwise/listwise_5_scale_with_text.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 65 |
+
AuthorityBench/dataset/DomainAuth/pairwise/easy_pairs.jsonl filter=lfs diff=lfs merge=lfs -text
|
| 66 |
+
AuthorityBench/dataset/DomainAuth/pairwise/hard_pairs.jsonl filter=lfs diff=lfs merge=lfs -text
|
AuthorityBench/dataset/DomainAuth/corpus/corpus_10_scale.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/DomainAuth/corpus/corpus_10_scale_with_text.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ea21b3565c5bfa9d457398ca926608e1deb26953b1b05251787bd06ac7b0154
|
| 3 |
+
size 56849513
|
AuthorityBench/dataset/DomainAuth/corpus/corpus_5_scale.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/DomainAuth/corpus/corpus_5_scale_with_text.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fe6f019aff74d55b25402bf9ac40a6384a4b68bc506389a093fbcf7aa0c37fba
|
| 3 |
+
size 56969513
|
AuthorityBench/dataset/DomainAuth/listwise/listwise_10_scale_no_text.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/DomainAuth/listwise/listwise_10_scale_with_text.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:afa05543d0a6cbb09e13250b9b70719eb5d9ed09235df36efd376a29c60354b4
|
| 3 |
+
size 143396221
|
AuthorityBench/dataset/DomainAuth/listwise/listwise_5_scale_with_text.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4f8cc5b401c1e73e35a8230efa1f22bcfaa914668914fa52a200add2e23760fe
|
| 3 |
+
size 71286311
|
AuthorityBench/dataset/DomainAuth/pairwise/easy_pairs.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f711d2ec9796fa67e8c8fcc51c4f2beab2d75961502ff26eeecfac6ddef62951
|
| 3 |
+
size 31904944
|
AuthorityBench/dataset/DomainAuth/pairwise/hard_pairs.jsonl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1074f483806cebd5a878ae327feab05e5a5d638f1791e4e6e0863d68f1cd5940
|
| 3 |
+
size 31671815
|
AuthorityBench/dataset/EntityAuth/corpus/basketball_10_scale.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/corpus/basketball_5_scale.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/corpus/movies_10_scale.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/corpus/movies_5_scale.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/corpus/songs_10_scale.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/corpus/songs_5_scale.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/listwise/basketball_list_10.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/listwise/basketball_list_5.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/listwise/movies_list_10.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/listwise/movies_list_5.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/listwise/songs_list_10.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/EntityAuth/listwise/songs_list_5.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/dataset/RAGAuth/authrag.jsonl
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/ListJudge_ListRank.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import re
|
| 4 |
+
from typing import List, Dict, Any
|
| 5 |
+
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
from transformers import AutoTokenizer
|
| 8 |
+
from vllm import LLM, SamplingParams
|
| 9 |
+
|
| 10 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 11 |
+
EVAL_PAIR_FILE = "LISTWISE_FILE"
|
| 12 |
+
OUTPUT_FILE = "YOUR_MODEL_DIR"
|
| 13 |
+
BATCH_SIZE = 16
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def build_prompt(domains: List[str]) -> str:
|
| 17 |
+
return (
|
| 18 |
+
f"You are a web analysis expert. Please score the source authority of each domain "
|
| 19 |
+
f"in the provided list based on the following criteria, with a rating range of 0 to 9.\n"
|
| 20 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself.\n"
|
| 21 |
+
f"Consider all domains together when assigning scores to ensure consistency and relative calibration.\n\n"
|
| 22 |
+
f"**DOMAINS**:\n"
|
| 23 |
+
+ "\n".join([f"[{i}] {domain}" for i, domain in enumerate(domains)])
|
| 24 |
+
+ f"\n\nYour output MUST be a single, valid JSON object. The JSON object must map each domain's numerical identifier (as a string) to its integer score (0-9).\n"
|
| 25 |
+
f"Example format: {{\"0\": 7, \"1\": 4, ..., \"9\": 8}}. Do not add any explanation.\n"
|
| 26 |
+
f"Your answer: "
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def extract_scores(response: str, num_domains: int) -> List[int]:
|
| 31 |
+
text = response.strip()
|
| 32 |
+
if pos := text.rfind("Your answer:"):
|
| 33 |
+
text = text[pos + 12:].strip()
|
| 34 |
+
if match := re.search(r'\{[^{}]*\}', text, re.DOTALL):
|
| 35 |
+
try:
|
| 36 |
+
scores = {int(re.sub(r'\D', '', str(k))): float(v) for k, v in json.loads(match.group()).items()}
|
| 37 |
+
if len(scores) == num_domains and set(scores.keys()) == set(range(num_domains)):
|
| 38 |
+
return sorted(scores.keys(), key=lambda x: scores[x], reverse=True)
|
| 39 |
+
except Exception:
|
| 40 |
+
pass
|
| 41 |
+
ranking = list(range(num_domains))
|
| 42 |
+
random.shuffle(ranking)
|
| 43 |
+
return ranking
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def main():
|
| 47 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 48 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 49 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 50 |
+
|
| 51 |
+
with open(EVAL_PAIR_FILE, 'r', encoding='utf-8') as f:
|
| 52 |
+
items = [json.loads(line) for line in f]
|
| 53 |
+
|
| 54 |
+
with open(OUTPUT_FILE, 'w', encoding='utf-8') as out_f:
|
| 55 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 56 |
+
batch = items[i:i + BATCH_SIZE]
|
| 57 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(item["domains"])}], tokenize=False, add_generation_prompt=True, enable_thinking=False) for item in batch]
|
| 58 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 59 |
+
for item, output in zip(batch, outputs):
|
| 60 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 61 |
+
pred = extract_scores(text, len(item["domains"]))
|
| 62 |
+
out_f.write(json.dumps({"domains": item["domains"], "pageranks": item["pageranks"], "ground_truth": item["ground_truth"], "pred": pred, "llm_output": text}, ensure_ascii=False) + '\n')
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/ListJudge_ListRank_with_text.py
ADDED
|
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
import random
|
| 5 |
+
from vllm import LLM, SamplingParams
|
| 6 |
+
from transformers import AutoTokenizer
|
| 7 |
+
|
| 8 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 9 |
+
EVAL_PAIR_FILE = "EVAL_FILE"
|
| 10 |
+
OUTPUT_FILE = "OUTPUT_DIR"
|
| 11 |
+
BATCH_SIZE = 8
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_ranking_prompt(domains, doc_texts):
|
| 15 |
+
truncated_texts = [
|
| 16 |
+
text[:500] + "..." if len(text) > 500 else text
|
| 17 |
+
for text in doc_texts
|
| 18 |
+
]
|
| 19 |
+
|
| 20 |
+
prompt = (
|
| 21 |
+
f"You are a web analysis expert that can rank domains based on their authority.\n\n"
|
| 22 |
+
f"I will provide you with 10 domains and their web contents, each indicated by a numerical identifier [].\n"
|
| 23 |
+
f"Rank the domains based on their source authority (credibility, professionalism, official certification, or public recognition).\n"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
for i in range(10):
|
| 27 |
+
prompt += f"[{i}] {domains[i]}: {truncated_texts[i]}\n"
|
| 28 |
+
|
| 29 |
+
prompt += (
|
| 30 |
+
f"\nRank the 10 domains above based on their authority (highest to lowest).\n"
|
| 31 |
+
f"All the domains should be included and listed using identifiers, in descending order of authority.\n"
|
| 32 |
+
f"The output format should be [] > [], e.g., [4] > [0].\n"
|
| 33 |
+
f"Only respond with the ranking results, do not say any word or explain.\n"
|
| 34 |
+
f"Your answer: "
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
return prompt
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def extract_ranking_answer(response_raw: str, num_domains: int = 10):
|
| 41 |
+
response = response_raw.strip()
|
| 42 |
+
|
| 43 |
+
bracket_matches = re.findall(r'\[(\d+)\]', response)
|
| 44 |
+
if bracket_matches:
|
| 45 |
+
try:
|
| 46 |
+
ranking = [int(x) for x in bracket_matches]
|
| 47 |
+
if (len(ranking) == num_domains and
|
| 48 |
+
all(0 <= idx < num_domains for idx in ranking) and
|
| 49 |
+
set(ranking) == set(range(num_domains))):
|
| 50 |
+
return ranking
|
| 51 |
+
except ValueError:
|
| 52 |
+
pass
|
| 53 |
+
|
| 54 |
+
all_numbers = re.findall(r'\b\d+\b', response)
|
| 55 |
+
if len(all_numbers) >= num_domains:
|
| 56 |
+
try:
|
| 57 |
+
ranking = [int(x) for x in all_numbers[:num_domains]]
|
| 58 |
+
if (len(ranking) == num_domains and
|
| 59 |
+
all(0 <= idx < num_domains for idx in ranking) and
|
| 60 |
+
set(ranking) == set(range(num_domains))):
|
| 61 |
+
return ranking
|
| 62 |
+
except ValueError:
|
| 63 |
+
pass
|
| 64 |
+
|
| 65 |
+
lines = response.split('\n')
|
| 66 |
+
for line in reversed(lines):
|
| 67 |
+
line = line.strip()
|
| 68 |
+
if line:
|
| 69 |
+
line_numbers = re.findall(r'\b\d+\b', line)
|
| 70 |
+
if len(line_numbers) >= num_domains:
|
| 71 |
+
try:
|
| 72 |
+
ranking = [int(x) for x in line_numbers[:num_domains]]
|
| 73 |
+
if (len(ranking) == num_domains and
|
| 74 |
+
all(0 <= idx < num_domains for idx in ranking) and
|
| 75 |
+
set(ranking) == set(range(num_domains))):
|
| 76 |
+
return ranking
|
| 77 |
+
except ValueError:
|
| 78 |
+
pass
|
| 79 |
+
|
| 80 |
+
random_ranking = list(range(num_domains))
|
| 81 |
+
random.shuffle(random_ranking)
|
| 82 |
+
return random_ranking
|
| 83 |
+
|
| 84 |
+
|
| 85 |
+
def main():
|
| 86 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 87 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 88 |
+
|
| 89 |
+
sampling_params = SamplingParams(
|
| 90 |
+
temperature=0.0,
|
| 91 |
+
top_p=1.0,
|
| 92 |
+
max_tokens=256,
|
| 93 |
+
)
|
| 94 |
+
|
| 95 |
+
with open(EVAL_PAIR_FILE, 'r', encoding='utf-8') as f:
|
| 96 |
+
listwise_items = [json.loads(line) for line in f]
|
| 97 |
+
|
| 98 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 99 |
+
|
| 100 |
+
results = []
|
| 101 |
+
for i in range(0, len(listwise_items), BATCH_SIZE):
|
| 102 |
+
batch = listwise_items[i:i + BATCH_SIZE]
|
| 103 |
+
|
| 104 |
+
prompts = []
|
| 105 |
+
for item in batch:
|
| 106 |
+
prompt = build_ranking_prompt(item["domains"], item["doc_texts"])
|
| 107 |
+
messages = [{"role": "user", "content": prompt}]
|
| 108 |
+
chat_prompt = tokenizer.apply_chat_template(
|
| 109 |
+
messages,
|
| 110 |
+
tokenize=False,
|
| 111 |
+
add_generation_prompt=True,
|
| 112 |
+
enable_thinking=False
|
| 113 |
+
)
|
| 114 |
+
prompts.append(chat_prompt)
|
| 115 |
+
|
| 116 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 117 |
+
|
| 118 |
+
for item, output in zip(batch, outputs):
|
| 119 |
+
response_raw = output.outputs[0].text if output.outputs else ""
|
| 120 |
+
pred = extract_ranking_answer(response_raw, len(item["domains"]))
|
| 121 |
+
|
| 122 |
+
results.append({
|
| 123 |
+
"domains": item["domains"],
|
| 124 |
+
"pageranks": item["pageranks"],
|
| 125 |
+
"ground_truth": item["ground_truth"],
|
| 126 |
+
"pred": pred,
|
| 127 |
+
"llm_output": response_raw
|
| 128 |
+
})
|
| 129 |
+
|
| 130 |
+
with open(OUTPUT_FILE, 'w', encoding='utf-8') as f:
|
| 131 |
+
for result in results:
|
| 132 |
+
f.write(json.dumps(result, ensure_ascii=False) + '\n')
|
| 133 |
+
|
| 134 |
+
print(f"Results saved to {OUTPUT_FILE}")
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
if __name__ == "__main__":
|
| 138 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/ListJudge_PointScore.py
ADDED
|
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from transformers import AutoTokenizer
|
| 7 |
+
from vllm import LLM, SamplingParams
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 11 |
+
EVAL_PAIR_FILE = "LISTWISE_FILE"
|
| 12 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 13 |
+
BATCH_SIZE = 8
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def build_prompt(domains):
|
| 17 |
+
return (
|
| 18 |
+
f"You are a web analysis expert. Please score the source authority of each domain "
|
| 19 |
+
f"in the provided list based on the following criteria, with a rating range of 0 to 9.\n"
|
| 20 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself.\n"
|
| 21 |
+
f"Consider all domains together when assigning scores to ensure consistency and relative calibration.\n\n"
|
| 22 |
+
f"**DOMAINS**:\n"
|
| 23 |
+
+ "\n".join([f"[{i}] {domain}" for i, domain in enumerate(domains)])
|
| 24 |
+
+ f"\n\nYour output MUST be a single, valid JSON object. The JSON object must map each domain's numerical identifier (as a string) to its integer score (0-9).\n"
|
| 25 |
+
f"Example format: {{\"0\": 7, \"1\": 4, ..., \"9\": 8}}. Do not add any explanation.\n"
|
| 26 |
+
f"Your answer: "
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def extract_scores(response, num_domains):
|
| 31 |
+
text = response.strip()
|
| 32 |
+
if pos := text.rfind("Your answer:"):
|
| 33 |
+
text = text[pos + 12:].strip()
|
| 34 |
+
if match := re.search(r'\{[^{}]*\}', text, re.DOTALL):
|
| 35 |
+
try:
|
| 36 |
+
scores = {int(re.sub(r'\D', '', str(k))): float(v) for k, v in json.loads(match.group()).items()}
|
| 37 |
+
if len(scores) == num_domains and set(scores.keys()) == set(range(num_domains)):
|
| 38 |
+
return sorted(scores.keys(), key=lambda x: scores[x], reverse=True)
|
| 39 |
+
except Exception:
|
| 40 |
+
pass
|
| 41 |
+
ranking = list(range(num_domains))
|
| 42 |
+
random.shuffle(ranking)
|
| 43 |
+
return ranking
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def main():
|
| 47 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 48 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 49 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 50 |
+
|
| 51 |
+
with open(EVAL_PAIR_FILE, 'r', encoding='utf-8') as f:
|
| 52 |
+
items = [json.loads(line) for line in f]
|
| 53 |
+
|
| 54 |
+
with open(OUTPUT_FILE, 'w', encoding='utf-8') as out_f:
|
| 55 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 56 |
+
batch = items[i:i + BATCH_SIZE]
|
| 57 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(item["domains"])}], tokenize=False, add_generation_prompt=True, enable_thinking=False) for item in batch]
|
| 58 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 59 |
+
for item, output in zip(batch, outputs):
|
| 60 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 61 |
+
pred = extract_scores(text, len(item["domains"]))
|
| 62 |
+
out_f.write(json.dumps({"domains": item["domains"], "pageranks": item["pageranks"], "ground_truth": item["ground_truth"], "pred": pred, "llm_output": text}, ensure_ascii=False) + '\n')
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
if __name__ == "__main__":
|
| 66 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/ListJudge_PointScore_with_text.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
from vllm import LLM, SamplingParams
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
|
| 7 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 8 |
+
EVAL_PAIR_FILE = "EVAL_FILE"
|
| 9 |
+
OUTPUT_FILE = "OUTPUT_DIR"
|
| 10 |
+
BATCH_SIZE = 8
|
| 11 |
+
|
| 12 |
+
# Extract scoring answer from model response
|
| 13 |
+
def extract_scoring_answer(response_raw: str, num_domains: int = 10):
|
| 14 |
+
"""
|
| 15 |
+
Extract JSON scoring from model output and return ranked indices.
|
| 16 |
+
Returns (ranking_list, failed_flag).
|
| 17 |
+
"""
|
| 18 |
+
response = response_raw.strip()
|
| 19 |
+
|
| 20 |
+
marker = "Your answer:"
|
| 21 |
+
marker_pos = response.rfind(marker)
|
| 22 |
+
if marker_pos != -1:
|
| 23 |
+
response = response[marker_pos + len(marker):].strip()
|
| 24 |
+
|
| 25 |
+
json_match = re.search(r'\{[^{}]*\}', response, re.DOTALL)
|
| 26 |
+
if json_match:
|
| 27 |
+
try:
|
| 28 |
+
score_dict = json.loads(json_match.group())
|
| 29 |
+
scores = {
|
| 30 |
+
int(re.sub(r'\D', '', str(k))): float(v)
|
| 31 |
+
for k, v in score_dict.items()
|
| 32 |
+
}
|
| 33 |
+
if len(scores) == num_domains and set(scores.keys()) == set(range(num_domains)):
|
| 34 |
+
# Sort by score descending and return original indices
|
| 35 |
+
return sorted(scores.keys(), key=lambda x: scores[x], reverse=True), False
|
| 36 |
+
except (json.JSONDecodeError, ValueError, KeyError):
|
| 37 |
+
pass
|
| 38 |
+
|
| 39 |
+
random_ranking = list(range(num_domains))
|
| 40 |
+
random.shuffle(random_ranking)
|
| 41 |
+
return random_ranking, True
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def build_scoring_prompt(domains, doc_texts):
|
| 45 |
+
"""Build prompt for listwise domain authority scoring."""
|
| 46 |
+
truncated_texts = [
|
| 47 |
+
text[:500] + "..." if len(text) > 500 else text
|
| 48 |
+
for text in doc_texts
|
| 49 |
+
]
|
| 50 |
+
|
| 51 |
+
prompt = (
|
| 52 |
+
f"You are a web analysis expert. Please score the source authority of each domain "
|
| 53 |
+
f"in the provided list based on the following criteria, with a rating range of 0 to 9.\n"
|
| 54 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself and its content.\n"
|
| 55 |
+
f"Consider all domains and their contents together when assigning scores to ensure consistency and relative calibration.\n\n"
|
| 56 |
+
f"**DOMAINS & CONTENTS**:\n"
|
| 57 |
+
)
|
| 58 |
+
for i, domain in enumerate(domains):
|
| 59 |
+
prompt += f"[{i}] {domain}: {truncated_texts[i]}\n"
|
| 60 |
+
|
| 61 |
+
prompt += (
|
| 62 |
+
f"\n"
|
| 63 |
+
f"Your output MUST be a single, valid JSON object. The JSON object must map each domain's numerical identifier (as a string) to its integer score (0-9).\n"
|
| 64 |
+
f"Example format: {{\"0\": 7, \"1\": 4, ..., \"9\": 8}}. Do not add any explanation.\n"
|
| 65 |
+
f"Your answer: "
|
| 66 |
+
)
|
| 67 |
+
return prompt
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def main():
|
| 71 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 72 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 73 |
+
|
| 74 |
+
sampling_params = SamplingParams(
|
| 75 |
+
temperature=0.0,
|
| 76 |
+
top_p=1.0,
|
| 77 |
+
max_tokens=256,
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
with open(EVAL_PAIR_FILE, 'r', encoding='utf-8') as f:
|
| 81 |
+
listwise_items = [json.loads(line) for line in f]
|
| 82 |
+
|
| 83 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 84 |
+
|
| 85 |
+
with open(OUTPUT_FILE, 'w', encoding='utf-8') as out_f:
|
| 86 |
+
for i in range(0, len(listwise_items), BATCH_SIZE):
|
| 87 |
+
batch = listwise_items[i:i + BATCH_SIZE]
|
| 88 |
+
|
| 89 |
+
prompts = []
|
| 90 |
+
for item in batch:
|
| 91 |
+
prompt = build_scoring_prompt(item["domains"], item["doc_texts"])
|
| 92 |
+
messages = [{"role": "user", "content": prompt}]
|
| 93 |
+
chat_prompt = tokenizer.apply_chat_template(
|
| 94 |
+
messages,
|
| 95 |
+
tokenize=False,
|
| 96 |
+
add_generation_prompt=True,
|
| 97 |
+
enable_thinking=False
|
| 98 |
+
)
|
| 99 |
+
prompts.append(chat_prompt)
|
| 100 |
+
|
| 101 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 102 |
+
|
| 103 |
+
for item, output in zip(batch, outputs):
|
| 104 |
+
response_raw = output.outputs[0].text if output.outputs else ""
|
| 105 |
+
pred, failed = extract_scoring_answer(response_raw, len(item["domains"]))
|
| 106 |
+
|
| 107 |
+
result = {
|
| 108 |
+
"domains": item["domains"],
|
| 109 |
+
"pageranks": item["pageranks"],
|
| 110 |
+
"ground_truth": item["ground_truth"],
|
| 111 |
+
"pred": pred,
|
| 112 |
+
"extraction_failed": failed,
|
| 113 |
+
"llm_output": response_raw
|
| 114 |
+
}
|
| 115 |
+
|
| 116 |
+
out_f.write(json.dumps(result, ensure_ascii=False) + '\n')
|
| 117 |
+
out_f.flush()
|
| 118 |
+
|
| 119 |
+
print(f"Results saved to {OUTPUT_FILE}")
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PairJudge_PairRank.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
import re
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 13 |
+
EVAL_FILE = "EVAL_FILE"
|
| 14 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 15 |
+
BATCH_SIZE = 16
|
| 16 |
+
NUM_ANCHORS = 5
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def build_prompt(domain1, domain2):
|
| 20 |
+
return (
|
| 21 |
+
f"You are a web analysis expert. Please determine which of the two given domains "
|
| 22 |
+
f"is more authoritative as an information source based on the following criteria.\n\n"
|
| 23 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself.\n"
|
| 24 |
+
f"Consider both domains together when making the comparison.\n\n"
|
| 25 |
+
f"**DOMAINS**:\n"
|
| 26 |
+
f"Domain A: {domain1}\n"
|
| 27 |
+
f"Domain B: {domain2}\n\n"
|
| 28 |
+
f"Which domain is more authoritative? Only return 'A' or 'B'. Do not add any explanation.\n"
|
| 29 |
+
f"Your answer: "
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def extract_winner(response):
|
| 34 |
+
if not response:
|
| 35 |
+
return None
|
| 36 |
+
text = response.strip().upper()
|
| 37 |
+
first = text[0] if text else ""
|
| 38 |
+
if first in ("A", "B"):
|
| 39 |
+
return first
|
| 40 |
+
if match := re.search(r'\b([AB])\b', text):
|
| 41 |
+
return match.group(1)
|
| 42 |
+
return None
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def compare(i, j, anchor_set, anchor_wins, non_anchor_wins, direct):
|
| 46 |
+
i_is_anchor = i in anchor_set
|
| 47 |
+
j_is_anchor = j in anchor_set
|
| 48 |
+
|
| 49 |
+
if not i_is_anchor and not j_is_anchor:
|
| 50 |
+
return non_anchor_wins.get(i, 0) > non_anchor_wins.get(j, 0)
|
| 51 |
+
elif i_is_anchor and j_is_anchor:
|
| 52 |
+
return anchor_wins.get(i, 0) > anchor_wins.get(j, 0)
|
| 53 |
+
else:
|
| 54 |
+
if (i, j) in direct:
|
| 55 |
+
return direct[(i, j)]
|
| 56 |
+
elif (j, i) in direct:
|
| 57 |
+
return not direct[(j, i)]
|
| 58 |
+
else:
|
| 59 |
+
score_i = anchor_wins.get(i, non_anchor_wins.get(i, 0))
|
| 60 |
+
score_j = anchor_wins.get(j, non_anchor_wins.get(j, 0))
|
| 61 |
+
return score_i > score_j
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def bubble_sort(indices, anchor_set, anchor_wins, non_anchor_wins, direct):
|
| 65 |
+
arr = list(indices)
|
| 66 |
+
n = len(arr)
|
| 67 |
+
for i in range(n):
|
| 68 |
+
for j in range(n - i - 1):
|
| 69 |
+
if not compare(arr[j], arr[j + 1], anchor_set, anchor_wins, non_anchor_wins, direct):
|
| 70 |
+
arr[j], arr[j + 1] = arr[j + 1], arr[j]
|
| 71 |
+
return arr
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def main():
|
| 75 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, tensor_parallel_size=4, trust_remote_code=True)
|
| 76 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 77 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=16)
|
| 78 |
+
|
| 79 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 80 |
+
|
| 81 |
+
completed = set()
|
| 82 |
+
if __import__("os").path.exists(OUTPUT_FILE):
|
| 83 |
+
with open(OUTPUT_FILE, 'r', encoding='utf-8') as f:
|
| 84 |
+
for line in f:
|
| 85 |
+
if line.strip():
|
| 86 |
+
try:
|
| 87 |
+
if "list_id" in (obj := json.loads(line)):
|
| 88 |
+
completed.add(obj["list_id"])
|
| 89 |
+
except Exception:
|
| 90 |
+
pass
|
| 91 |
+
|
| 92 |
+
with open(EVAL_FILE, 'r', encoding='utf-8') as f:
|
| 93 |
+
items = [json.loads(line) for line in f]
|
| 94 |
+
|
| 95 |
+
with open(OUTPUT_FILE, 'a', encoding='utf-8') as out_f:
|
| 96 |
+
for idx, item in enumerate(tqdm(items)):
|
| 97 |
+
list_id = item.get("list_id", idx)
|
| 98 |
+
if list_id in completed:
|
| 99 |
+
continue
|
| 100 |
+
|
| 101 |
+
domains, n = item["domains"], len(domains)
|
| 102 |
+
anchors = random.sample(range(n), NUM_ANCHORS)
|
| 103 |
+
anchor_set = set(anchors)
|
| 104 |
+
non_anchors = [i for i in range(n) if i not in anchor_set]
|
| 105 |
+
|
| 106 |
+
tasks = [{"idx_a": ni, "idx_b": ai, "domain_a": domains[ni], "domain_b": domains[ai], "type": "na_vs_a"}
|
| 107 |
+
for ni in non_anchors for ai in anchors]
|
| 108 |
+
tasks += [{"idx_a": ai, "idx_b": aj, "domain_a": domains[ai], "domain_b": domains[aj], "type": "a_vs_a"}
|
| 109 |
+
for ai, aj in itertools.combinations(anchors, 2)]
|
| 110 |
+
|
| 111 |
+
results = []
|
| 112 |
+
for i in range(0, len(tasks), BATCH_SIZE):
|
| 113 |
+
batch = tasks[i:i + BATCH_SIZE]
|
| 114 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(t["domain_a"], t["domain_b"])}],
|
| 115 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for t in batch]
|
| 116 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 117 |
+
for t, out in zip(batch, outputs):
|
| 118 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 119 |
+
winner = extract_winner(raw)
|
| 120 |
+
results.append((t, winner, raw))
|
| 121 |
+
|
| 122 |
+
direct = {}
|
| 123 |
+
anchor_wins = defaultdict(int)
|
| 124 |
+
non_anchor_wins = defaultdict(int)
|
| 125 |
+
records = []
|
| 126 |
+
|
| 127 |
+
for task, winner, raw in results:
|
| 128 |
+
idx_a, idx_b = task["idx_a"], task["idx_b"]
|
| 129 |
+
if winner:
|
| 130 |
+
i_wins = (winner == "A")
|
| 131 |
+
direct[(idx_a, idx_b)] = i_wins
|
| 132 |
+
direct[(idx_b, idx_a)] = not i_wins
|
| 133 |
+
|
| 134 |
+
if task["type"] == "na_vs_a":
|
| 135 |
+
non_anchor_wins[idx_a] += 1 if i_wins else 0
|
| 136 |
+
elif task["type"] == "a_vs_a":
|
| 137 |
+
anchor_wins[idx_a] += 1 if i_wins else 0
|
| 138 |
+
anchor_wins[idx_b] += 1 if not i_wins else 0
|
| 139 |
+
|
| 140 |
+
records.append({"domain_a": task["domain_a"], "idx_a": idx_a, "domain_b": task["domain_b"],
|
| 141 |
+
"idx_b": idx_b, "winner": winner, "llm_output": raw, "type": task["type"]})
|
| 142 |
+
|
| 143 |
+
for ni in non_anchors:
|
| 144 |
+
if ni not in non_anchor_wins:
|
| 145 |
+
non_anchor_wins[ni] = 0
|
| 146 |
+
for ai in anchors:
|
| 147 |
+
if ai not in anchor_wins:
|
| 148 |
+
anchor_wins[ai] = 0
|
| 149 |
+
|
| 150 |
+
pred = bubble_sort(range(n), anchor_set, anchor_wins, non_anchor_wins, direct)
|
| 151 |
+
|
| 152 |
+
out_f.write(json.dumps({
|
| 153 |
+
"list_id": list_id, "domains": domains, "pageranks": item["pageranks"],
|
| 154 |
+
"ground_truth": item["ground_truth"], "anchor_indices": anchors,
|
| 155 |
+
"anchor_wins_vs_anchors": {str(k): v for k, v in anchor_wins.items()},
|
| 156 |
+
"non_anchor_wins_vs_anchors": {str(k): v for k, v in non_anchor_wins.items()},
|
| 157 |
+
"pred": pred, "comparisons": records
|
| 158 |
+
}, ensure_ascii=False) + '\n')
|
| 159 |
+
out_f.flush()
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
if __name__ == "__main__":
|
| 163 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PairJudge_PairRank_with_text.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
import re
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 13 |
+
EVAL_FILE = "EVAL_FILE"
|
| 14 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 15 |
+
BATCH_SIZE = 16
|
| 16 |
+
NUM_ANCHORS = 5
|
| 17 |
+
MAX_TEXT_LENGTH = 500
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def build_pairwise_prompt(domain1, text1, domain2, text2):
|
| 21 |
+
t1 = (text1[:MAX_TEXT_LENGTH] + "...") if text1 and len(text1) > MAX_TEXT_LENGTH else (text1 or "[No content available]")
|
| 22 |
+
t2 = (text2[:MAX_TEXT_LENGTH] + "...") if text2 and len(text2) > MAX_TEXT_LENGTH else (text2 or "[No content available]")
|
| 23 |
+
return (
|
| 24 |
+
f"You are a web analysis expert. Please determine which of the two given domains "
|
| 25 |
+
f"is more authoritative as an information source based on the following criteria.\n\n"
|
| 26 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself and its content.\n"
|
| 27 |
+
f"Consider both domains and their contents together when making the comparison.\n\n"
|
| 28 |
+
f"**DOMAINS & CONTENTS**:\n"
|
| 29 |
+
f"Domain A: {domain1}\n"
|
| 30 |
+
f"Content A: {t1}\n\n"
|
| 31 |
+
f"Domain B: {domain2}\n"
|
| 32 |
+
f"Content B: {t2}\n\n"
|
| 33 |
+
f"Which domain is more authoritative? Only return 'A' or 'B'. Do not add any explanation.\n"
|
| 34 |
+
f"Your answer: "
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def extract_ab_answer(response):
|
| 39 |
+
if not response:
|
| 40 |
+
return None
|
| 41 |
+
text = response.strip()
|
| 42 |
+
first = text[0].upper() if text else ""
|
| 43 |
+
if first in ("A", "B"):
|
| 44 |
+
return first
|
| 45 |
+
if m := re.search(r"\b([AB])\b", text.upper()):
|
| 46 |
+
return m.group(1)
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def run_batch_inference(task_list, llm, tokenizer, sampling_params):
|
| 51 |
+
results = []
|
| 52 |
+
for i in range(0, len(task_list), BATCH_SIZE):
|
| 53 |
+
batch = task_list[i:i + BATCH_SIZE]
|
| 54 |
+
prompts = [tokenizer.apply_chat_template(
|
| 55 |
+
[{"role": "user", "content": build_pairwise_prompt(task["domain_a"], task["text_a"], task["domain_b"], task["text_b"])}],
|
| 56 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False
|
| 57 |
+
) for task in batch]
|
| 58 |
+
for task, out in zip(batch, llm.generate(prompts, sampling_params)):
|
| 59 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 60 |
+
winner = extract_ab_answer(raw)
|
| 61 |
+
results.append((winner, raw, winner is None))
|
| 62 |
+
return results
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def anchor_compare(i, j, anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 66 |
+
if i not in anchor_set and j not in anchor_set:
|
| 67 |
+
return non_anchor_wins.get(i, 0) > non_anchor_wins.get(j, 0)
|
| 68 |
+
if i in anchor_set and j in anchor_set:
|
| 69 |
+
return anchor_wins.get(i, 0) > anchor_wins.get(j, 0)
|
| 70 |
+
if (i, j) in direct_results:
|
| 71 |
+
return direct_results[(i, j)]
|
| 72 |
+
if (j, i) in direct_results:
|
| 73 |
+
return not direct_results[(j, i)]
|
| 74 |
+
return anchor_wins.get(i, non_anchor_wins.get(i, 0)) > anchor_wins.get(j, non_anchor_wins.get(j, 0))
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def bubble_sort_by_anchor(indices, anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 78 |
+
arr = list(indices)
|
| 79 |
+
n = len(arr)
|
| 80 |
+
for i in range(n):
|
| 81 |
+
for j in range(0, n - i - 1):
|
| 82 |
+
if not anchor_compare(arr[j], arr[j + 1], anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 83 |
+
arr[j], arr[j + 1] = arr[j + 1], arr[j]
|
| 84 |
+
return arr
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def main():
|
| 88 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True)
|
| 89 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 90 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0)
|
| 91 |
+
|
| 92 |
+
os = __import__("os")
|
| 93 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 94 |
+
|
| 95 |
+
completed_ids = set()
|
| 96 |
+
if os.path.exists(OUTPUT_FILE):
|
| 97 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 98 |
+
for line in f:
|
| 99 |
+
if line.strip():
|
| 100 |
+
try:
|
| 101 |
+
if "list_id" in (obj := json.loads(line)):
|
| 102 |
+
completed_ids.add(obj["list_id"])
|
| 103 |
+
except json.JSONDecodeError:
|
| 104 |
+
pass
|
| 105 |
+
|
| 106 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 107 |
+
items = [json.loads(line) for line in f]
|
| 108 |
+
|
| 109 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 110 |
+
for item_idx, item in enumerate(tqdm(items)):
|
| 111 |
+
list_id = item.get("list_id", item_idx)
|
| 112 |
+
if list_id in completed_ids:
|
| 113 |
+
continue
|
| 114 |
+
|
| 115 |
+
domains = item["domains"]
|
| 116 |
+
doc_texts = item.get("doc_texts", [""] * len(domains))
|
| 117 |
+
n = len(domains)
|
| 118 |
+
anchor_indices = random.sample(range(n), NUM_ANCHORS)
|
| 119 |
+
anchor_set = set(anchor_indices)
|
| 120 |
+
non_anchor_indices = [i for i in range(n) if i not in anchor_set]
|
| 121 |
+
|
| 122 |
+
na_vs_a_tasks = [{"idx_a": ni, "domain_a": domains[ni], "text_a": doc_texts[ni],
|
| 123 |
+
"idx_b": ai, "domain_b": domains[ai], "text_b": doc_texts[ai], "type": "non_anchor_vs_anchor"}
|
| 124 |
+
for ni in non_anchor_indices for ai in anchor_indices]
|
| 125 |
+
a_vs_a_tasks = [{"idx_a": ai, "domain_a": domains[ai], "text_a": doc_texts[ai],
|
| 126 |
+
"idx_b": aj, "domain_b": domains[aj], "text_b": doc_texts[aj], "type": "anchor_vs_anchor"}
|
| 127 |
+
for ai, aj in itertools.combinations(anchor_indices, 2)]
|
| 128 |
+
|
| 129 |
+
infer_results = run_batch_inference(na_vs_a_tasks + a_vs_a_tasks, llm, tokenizer, sampling_params)
|
| 130 |
+
|
| 131 |
+
direct_results, non_anchor_wins, anchor_wins, comparison_records = {}, defaultdict(int), defaultdict(int), []
|
| 132 |
+
for task, (winner, raw, failed) in zip(na_vs_a_tasks + a_vs_a_tasks, infer_results):
|
| 133 |
+
idx_a, idx_b = task["idx_a"], task["idx_b"]
|
| 134 |
+
if not failed:
|
| 135 |
+
i_wins = winner == "A"
|
| 136 |
+
direct_results[(idx_a, idx_b)] = i_wins
|
| 137 |
+
direct_results[(idx_b, idx_a)] = not i_wins
|
| 138 |
+
if task["type"] == "non_anchor_vs_anchor" and i_wins:
|
| 139 |
+
non_anchor_wins[idx_a] += 1
|
| 140 |
+
elif task["type"] == "anchor_vs_anchor":
|
| 141 |
+
if i_wins:
|
| 142 |
+
anchor_wins[idx_a] += 1
|
| 143 |
+
else:
|
| 144 |
+
anchor_wins[idx_b] += 1
|
| 145 |
+
|
| 146 |
+
comparison_records.append({
|
| 147 |
+
"domain_a": task["domain_a"], "idx_a": idx_a,
|
| 148 |
+
"domain_b": task["domain_b"], "idx_b": idx_b,
|
| 149 |
+
"winner": winner, "extraction_failed": failed, "llm_output": raw, "type": task["type"]
|
| 150 |
+
})
|
| 151 |
+
|
| 152 |
+
pred = bubble_sort_by_anchor(range(n), anchor_set, anchor_wins, non_anchor_wins, direct_results)
|
| 153 |
+
|
| 154 |
+
out_f.write(json.dumps({
|
| 155 |
+
"list_id": list_id, "domains": domains, "pageranks": item["pageranks"],
|
| 156 |
+
"ground_truth": item["ground_truth"], "anchor_indices": anchor_indices,
|
| 157 |
+
"anchor_wins_vs_anchors": {str(k): v for k, v in anchor_wins.items()},
|
| 158 |
+
"non_anchor_wins_vs_anchors": {str(k): v for k, v in non_anchor_wins.items()},
|
| 159 |
+
"pred": pred, "comparisons": comparison_records
|
| 160 |
+
}, ensure_ascii=False) + "\n")
|
| 161 |
+
out_f.flush()
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
if __name__ == "__main__":
|
| 165 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PairJudge_PointScore.py
ADDED
|
@@ -0,0 +1,92 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
from transformers import AutoTokenizer
|
| 8 |
+
from vllm import LLM, SamplingParams
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 12 |
+
EVAL_FILE = "PAIRWISE_FILE"
|
| 13 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 14 |
+
BATCH_SIZE = 16
|
| 15 |
+
NUM_ANCHORS = 5
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def build_prompt(domain1, domain2):
|
| 19 |
+
return (
|
| 20 |
+
f"You are a web analysis expert. Please score the source authority of each of the two given domains "
|
| 21 |
+
f"based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 22 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself.\n"
|
| 23 |
+
f"Consider both domains together when assigning scores to ensure consistency.\n\n"
|
| 24 |
+
f"**DOMAINS**:\n"
|
| 25 |
+
f"Domain A: {domain1}\n"
|
| 26 |
+
f"Domain B: {domain2}\n\n"
|
| 27 |
+
f"Your output MUST be a single, valid JSON object in the format: {{\"A\": X, \"B\": X}}, where each X is an integer score from 0 to 9. "
|
| 28 |
+
f"Example format: {{\"A\": 7, \"B\": 4}}. Do not add any explanation.\n"
|
| 29 |
+
f"Your answer: "
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def extract_scores(response):
|
| 34 |
+
text = response.strip()
|
| 35 |
+
if pos := text.rfind("Your answer:"):
|
| 36 |
+
text = text[pos + 12:].strip()
|
| 37 |
+
if match := __import__("re").search(r'\{[^{}]*\}', text, __import__("re").DOTALL):
|
| 38 |
+
try:
|
| 39 |
+
d = json.loads(match.group())
|
| 40 |
+
sa, sb = float(d.get("A", d.get("a"))), float(d.get("B", d.get("b")))
|
| 41 |
+
return (sa, sb) if sa is not None and sb is not None else (None, None)
|
| 42 |
+
except Exception:
|
| 43 |
+
pass
|
| 44 |
+
return None, None
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def run_inference(tasks, llm, tokenizer, sampling_params):
|
| 48 |
+
results = []
|
| 49 |
+
for i in range(0, len(tasks), BATCH_SIZE):
|
| 50 |
+
batch = tasks[i:i + BATCH_SIZE]
|
| 51 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(t["domain_a"], t["domain_b"])}], tokenize=False, add_generation_prompt=True, enable_thinking=False) for t in batch]
|
| 52 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 53 |
+
for t, out in zip(batch, outputs):
|
| 54 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 55 |
+
sa, sb = extract_scores(raw)
|
| 56 |
+
results.append((t["idx_a"], t["idx_b"], sa, sb, raw))
|
| 57 |
+
return results
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def main():
|
| 61 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, tensor_parallel_size=4, trust_remote_code=True)
|
| 62 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 63 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=64)
|
| 64 |
+
|
| 65 |
+
with open(EVAL_FILE, 'r', encoding='utf-8') as f:
|
| 66 |
+
items = [json.loads(line) for line in f]
|
| 67 |
+
|
| 68 |
+
with open(OUTPUT_FILE, 'w', encoding='utf-8') as out_f:
|
| 69 |
+
for item in tqdm(items):
|
| 70 |
+
domains, n = item["domains"], len(domains)
|
| 71 |
+
anchors = random.sample(range(n), NUM_ANCHORS)
|
| 72 |
+
non_anchors = [i for i in range(n) if i not in anchors]
|
| 73 |
+
|
| 74 |
+
tasks = [{"idx_a": ni, "idx_b": ai, "domain_a": domains[ni], "domain_b": domains[ai]} for ni in non_anchors for ai in anchors]
|
| 75 |
+
tasks += [{"idx_a": ai, "idx_b": aj, "domain_a": domains[ai], "domain_b": domains[aj]} for ai, aj in itertools.combinations(anchors, 2)]
|
| 76 |
+
|
| 77 |
+
scores = defaultdict(list)
|
| 78 |
+
records = []
|
| 79 |
+
for idx_a, idx_b, sa, sb, raw in run_inference(tasks, llm, tokenizer, sampling_params):
|
| 80 |
+
if sa is not None and sb is not None:
|
| 81 |
+
scores[idx_a].append(sa)
|
| 82 |
+
scores[idx_b].append(sb)
|
| 83 |
+
records.append({"domain_a": domains[idx_a], "idx_a": idx_a, "domain_b": domains[idx_b], "idx_b": idx_b, "score_a": sa, "score_b": sb, "llm_output": raw})
|
| 84 |
+
|
| 85 |
+
avg = {i: float(__import__("numpy").mean(scores[i])) if scores[i] else 0.0 for i in range(n)}
|
| 86 |
+
pred = sorted(range(n), key=lambda x: avg[x], reverse=True)
|
| 87 |
+
|
| 88 |
+
out_f.write(json.dumps({"domains": domains, "pageranks": item["pageranks"], "ground_truth": item["ground_truth"], "anchor_indices": anchors, "avg_scores": avg, "pred": pred, "comparisons": records}, ensure_ascii=False) + '\n')
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
if __name__ == "__main__":
|
| 92 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PairJudge_PointScore_with_text.py
ADDED
|
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import itertools
|
| 4 |
+
from collections import defaultdict
|
| 5 |
+
from vllm import LLM, SamplingParams
|
| 6 |
+
from transformers import AutoTokenizer
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 10 |
+
EVAL_FILE = "PAIRWISE_FILE
|
| 11 |
+
OUTPUT_FILE = "OUTPUT_DIR"
|
| 12 |
+
BATCH_SIZE = 8
|
| 13 |
+
MAX_TEXT_LENGTH = 500
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def build_pairwise_prompt(domain1: str, text1: str, domain2: str, text2: str) -> str:
|
| 17 |
+
t1 = (text1[:MAX_TEXT_LENGTH] + "...") if text1 and len(text1) > MAX_TEXT_LENGTH else (text1 or "[No content available]")
|
| 18 |
+
t2 = (text2[:MAX_TEXT_LENGTH] + "...") if text2 and len(text2) > MAX_TEXT_LENGTH else (text2 or "[No content available]")
|
| 19 |
+
|
| 20 |
+
prompt = (
|
| 21 |
+
f"You are a web analysis expert. Please score the source authority of each of the two given domains "
|
| 22 |
+
f"based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 23 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself and its content.\n"
|
| 24 |
+
f"Consider both domains and their contents together when assigning scores to ensure consistency.\n\n"
|
| 25 |
+
f"**DOMAINS**:\n"
|
| 26 |
+
f"Domain A: {domain1}\n"
|
| 27 |
+
f"Content A: {t1}\n\n"
|
| 28 |
+
f"Domain B: {domain2}\n"
|
| 29 |
+
f"Content B: {t2}\n\n"
|
| 30 |
+
f"Your output MUST be a single, valid JSON object in the format: {{\"A\": X, \"B\": X}}, where each X is an integer score from 0 to 9. "
|
| 31 |
+
f"Example format: {{\"A\": 7, \"B\": 4}}. Do not add any explanation.\n"
|
| 32 |
+
f"Your answer: "
|
| 33 |
+
)
|
| 34 |
+
return prompt
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def extract_pairwise_scores(response_raw: str):
|
| 38 |
+
response = response_raw.strip()
|
| 39 |
+
|
| 40 |
+
marker = "Your answer: "
|
| 41 |
+
pos = response.rfind(marker)
|
| 42 |
+
if pos != -1:
|
| 43 |
+
response = response[pos + len(marker):].strip()
|
| 44 |
+
|
| 45 |
+
json_match = re.search(r'\{[^{}]*\}', response, re.DOTALL)
|
| 46 |
+
if json_match:
|
| 47 |
+
try:
|
| 48 |
+
d = json.loads(json_match.group())
|
| 49 |
+
sa = float(d.get("A", d.get("a", None)))
|
| 50 |
+
sb = float(d.get("B", d.get("b", None)))
|
| 51 |
+
if sa is not None and sb is not None:
|
| 52 |
+
return sa, sb
|
| 53 |
+
except (json.JSONDecodeError, ValueError, TypeError):
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
return None, None
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def run_batch_inference(task_list, llm, tokenizer, sampling_params):
|
| 60 |
+
results = []
|
| 61 |
+
for i in range(0, len(task_list), BATCH_SIZE):
|
| 62 |
+
batch = task_list[i:i + BATCH_SIZE]
|
| 63 |
+
prompts = []
|
| 64 |
+
|
| 65 |
+
for task in batch:
|
| 66 |
+
p = build_pairwise_prompt(task["domain_a"], task["text_a"], task["domain_b"], task["text_b"])
|
| 67 |
+
msg = [{"role": "user", "content": p}]
|
| 68 |
+
chat_p = tokenizer.apply_chat_template(
|
| 69 |
+
msg, tokenize=False, add_generation_prompt=True, enable_thinking=False
|
| 70 |
+
)
|
| 71 |
+
prompts.append(chat_p)
|
| 72 |
+
|
| 73 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 74 |
+
|
| 75 |
+
for task, out in zip(batch, outputs):
|
| 76 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 77 |
+
sa, sb = extract_pairwise_scores(raw)
|
| 78 |
+
failed = (sa is None or sb is None)
|
| 79 |
+
results.append((sa, sb, raw, failed))
|
| 80 |
+
|
| 81 |
+
return results
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
def main():
|
| 85 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, trust_remote_code=True)
|
| 86 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 87 |
+
|
| 88 |
+
sampling_params = SamplingParams(
|
| 89 |
+
temperature=0.0,
|
| 90 |
+
top_p=1.0,
|
| 91 |
+
max_tokens=256,
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
with open(EVAL_FILE, 'r', encoding='utf-8') as f:
|
| 95 |
+
listwise_items = [json.loads(line) for line in f]
|
| 96 |
+
|
| 97 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 98 |
+
|
| 99 |
+
with open(OUTPUT_FILE, 'w', encoding='utf-8') as out_f:
|
| 100 |
+
for item in listwise_items:
|
| 101 |
+
domains = item["domains"]
|
| 102 |
+
doc_texts = item.get("doc_texts", [""] * len(domains))
|
| 103 |
+
pageranks = item["pageranks"]
|
| 104 |
+
ground_truth = item["ground_truth"]
|
| 105 |
+
n = len(domains)
|
| 106 |
+
|
| 107 |
+
all_tasks = []
|
| 108 |
+
for i, j in itertools.combinations(range(n), 2):
|
| 109 |
+
all_tasks.append({
|
| 110 |
+
"idx_a": i, "domain_a": domains[i], "text_a": doc_texts[i],
|
| 111 |
+
"idx_b": j, "domain_b": domains[j], "text_b": doc_texts[j]
|
| 112 |
+
})
|
| 113 |
+
|
| 114 |
+
infer_results = run_batch_inference(all_tasks, llm, tokenizer, sampling_params)
|
| 115 |
+
|
| 116 |
+
score_lists = defaultdict(list)
|
| 117 |
+
comparison_records = []
|
| 118 |
+
|
| 119 |
+
for task, (sa, sb, raw, failed) in zip(all_tasks, infer_results):
|
| 120 |
+
if not failed:
|
| 121 |
+
score_lists[task["idx_a"]].append(sa)
|
| 122 |
+
score_lists[task["idx_b"]].append(sb)
|
| 123 |
+
|
| 124 |
+
comparison_records.append({
|
| 125 |
+
"domain_a": task["domain_a"], "idx_a": task["idx_a"],
|
| 126 |
+
"domain_b": task["domain_b"], "idx_b": task["idx_b"],
|
| 127 |
+
"score_a": sa, "score_b": sb,
|
| 128 |
+
"extraction_failed": failed,
|
| 129 |
+
"llm_output": raw
|
| 130 |
+
})
|
| 131 |
+
|
| 132 |
+
avg_scores = {
|
| 133 |
+
idx: float(sum(scores) / len(scores)) if scores else 0.0
|
| 134 |
+
for idx, scores in score_lists.items()
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
pred = sorted(range(n), key=lambda x: avg_scores[x], reverse=True)
|
| 138 |
+
|
| 139 |
+
result = {
|
| 140 |
+
"domains": domains,
|
| 141 |
+
"pageranks": pageranks,
|
| 142 |
+
"ground_truth": ground_truth,
|
| 143 |
+
"avg_scores": avg_scores,
|
| 144 |
+
"pred": pred,
|
| 145 |
+
"comparisons": comparison_records,
|
| 146 |
+
}
|
| 147 |
+
|
| 148 |
+
out_f.write(json.dumps(result, ensure_ascii=False) + '\n')
|
| 149 |
+
out_f.flush()
|
| 150 |
+
|
| 151 |
+
print(f"Results saved to {OUTPUT_FILE}")
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
if __name__ == "__main__":
|
| 155 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PointJudge.py
ADDED
|
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
from typing import List, Dict, Any
|
| 4 |
+
|
| 5 |
+
from tqdm import tqdm
|
| 6 |
+
from transformers import AutoTokenizer
|
| 7 |
+
from vllm import LLM, SamplingParams
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 11 |
+
INPUT_FILE = "CORPUS_FILE"
|
| 12 |
+
OUTPUT_FILE = "OUTPUT_DIR"
|
| 13 |
+
BATCH_SIZE = 8
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def load_data(file_path: str) -> List[Dict[str, Any]]:
|
| 17 |
+
with open(file_path, "r", encoding="utf-8") as f:
|
| 18 |
+
return [json.loads(line.strip()) for line in f if line.strip()]
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def build_prompt(domain: str) -> str:
|
| 22 |
+
return (
|
| 23 |
+
f"You are a web analysis expert. Please score the source authority of the given domain based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 24 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself.\n\n"
|
| 25 |
+
f"**DOMAIN**: {domain}\n"
|
| 26 |
+
f"Based on the above criteria, rate the source authority of this DOMAIN (0-9 points).\n"
|
| 27 |
+
f"Output only: {{\"score\": X}} where X is your evaluated score (0, 1, 2, 3, 4, 5, 6, 7, 8, or 9). Do not add any explanation.\n"
|
| 28 |
+
f"Your answer: "
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def extract_score(text: str) -> int:
|
| 33 |
+
text = text.strip()
|
| 34 |
+
try:
|
| 35 |
+
if match := re.search(r'\{[^}]*"score"\s*:\s*(\d+)[^}]*\}', text):
|
| 36 |
+
score = int(match.group(1))
|
| 37 |
+
if 0 <= score <= 9:
|
| 38 |
+
return score
|
| 39 |
+
except Exception:
|
| 40 |
+
pass
|
| 41 |
+
if numbers := re.findall(r'\b([0-9])\b', text):
|
| 42 |
+
return int(numbers[0])
|
| 43 |
+
return -1
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def main():
|
| 47 |
+
items = load_data(INPUT_FILE)
|
| 48 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 49 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 50 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=128)
|
| 51 |
+
|
| 52 |
+
results = []
|
| 53 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 54 |
+
batch = items[i : i + BATCH_SIZE]
|
| 55 |
+
messages = [[{"role": "user", "content": build_prompt(item["domain"])}] for item in batch]
|
| 56 |
+
prompts = [tokenizer.apply_chat_template(m, tokenize=False, add_generation_prompt=True, enable_thinking=False) for m in messages]
|
| 57 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 58 |
+
|
| 59 |
+
for item, output in zip(batch, outputs):
|
| 60 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 61 |
+
pred = extract_score(text)
|
| 62 |
+
results.append({"domain": item["domain"], "true_score": item["pagerank"], "pred_score": pred, "llm_output": text if pred != -1 else ""})
|
| 63 |
+
|
| 64 |
+
with open(OUTPUT_FILE, "w", encoding="utf-8") as f:
|
| 65 |
+
for r in results:
|
| 66 |
+
f.write(json.dumps(r, ensure_ascii=False) + "\n")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_DomainAuth/PointJudge_with_text.py
ADDED
|
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
import re
|
| 4 |
+
from vllm import LLM, SamplingParams
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
|
| 7 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 8 |
+
INPUT_FILE = "EVAL_FILE"
|
| 9 |
+
OUTPUT_FILE = "OUTPUT_DIR"
|
| 10 |
+
BATCH_SIZE = 8
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def build_prompt(domain: str, doc_text: str) -> str:
|
| 14 |
+
truncated_text = doc_text[:500] if len(doc_text) > 500 else doc_text
|
| 15 |
+
|
| 16 |
+
prompt = (
|
| 17 |
+
f"You are a web analysis expert. Please score the source authority of the given domain based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 18 |
+
f"Source authority refers to the credibility, professionalism, official certification, or public recognition of the domain itself.\n"
|
| 19 |
+
f"**DOMAIN**: {domain}\n"
|
| 20 |
+
f"**Webpage Content**: {truncated_text}\n"
|
| 21 |
+
f"Based on the domain name, their web content and the above criteria, rate the source authority of this DOMAIN (0-9 points).\n"
|
| 22 |
+
f"Output only: {{\"score\": X}} where X is your evaluated score (0, 1, 2, 3, 4, 5, 6, 7, 8, or 9). Do not add any explanation.\n"
|
| 23 |
+
f"Your answer: "
|
| 24 |
+
)
|
| 25 |
+
return prompt
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def extract_score(text: str) -> int:
|
| 29 |
+
text = text.strip()
|
| 30 |
+
|
| 31 |
+
# Try to extract JSON format: {"score": X}
|
| 32 |
+
json_match = re.search(r'\{[^}]*"score"\s*:\s*(\d+)[^}]*\}', text)
|
| 33 |
+
if json_match:
|
| 34 |
+
score = int(json_match.group(1))
|
| 35 |
+
if 0 <= score <= 9:
|
| 36 |
+
return score
|
| 37 |
+
|
| 38 |
+
numbers = re.findall(r'\b([0-9])\b', text)
|
| 39 |
+
if numbers:
|
| 40 |
+
return int(numbers[0])
|
| 41 |
+
|
| 42 |
+
return -1
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def main():
|
| 46 |
+
pointwise_items = []
|
| 47 |
+
with open(INPUT_FILE, "r", encoding="utf-8") as f:
|
| 48 |
+
for line in f:
|
| 49 |
+
line = line.strip()
|
| 50 |
+
if line:
|
| 51 |
+
pointwise_items.append(json.loads(line))
|
| 52 |
+
|
| 53 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 54 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 55 |
+
|
| 56 |
+
sampling_params = SamplingParams(
|
| 57 |
+
temperature=0.0,
|
| 58 |
+
top_p=1.0,
|
| 59 |
+
max_tokens=256
|
| 60 |
+
)
|
| 61 |
+
|
| 62 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 63 |
+
|
| 64 |
+
with open(OUTPUT_FILE, "w", encoding="utf-8") as f:
|
| 65 |
+
for i in range(0, len(pointwise_items), BATCH_SIZE):
|
| 66 |
+
batch = pointwise_items[i:i + BATCH_SIZE]
|
| 67 |
+
|
| 68 |
+
raw_prompts = [
|
| 69 |
+
build_prompt(item.get("domain", ""), item.get("doc_text", ""))
|
| 70 |
+
for item in batch
|
| 71 |
+
]
|
| 72 |
+
|
| 73 |
+
messages_batch = [[{"role": "user", "content": prompt}] for prompt in raw_prompts]
|
| 74 |
+
prompts = [
|
| 75 |
+
tokenizer.apply_chat_template(
|
| 76 |
+
messages,
|
| 77 |
+
tokenize=False,
|
| 78 |
+
add_generation_prompt=True,
|
| 79 |
+
enable_thinking=False
|
| 80 |
+
)
|
| 81 |
+
for messages in messages_batch
|
| 82 |
+
]
|
| 83 |
+
|
| 84 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 85 |
+
|
| 86 |
+
for item, output in zip(batch, outputs):
|
| 87 |
+
response_raw = output.outputs[0].text if output.outputs else ""
|
| 88 |
+
pred = extract_score(response_raw)
|
| 89 |
+
|
| 90 |
+
result = {
|
| 91 |
+
"domain": item.get("domain", ""),
|
| 92 |
+
"url": item.get("url", ""),
|
| 93 |
+
"docid": item.get("docid", ""),
|
| 94 |
+
"true_score": item["pagerank"],
|
| 95 |
+
"pred_score": pred,
|
| 96 |
+
"llm_output": response_raw
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
f.write(json.dumps(result, ensure_ascii=False) + "\n")
|
| 100 |
+
f.flush()
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
if __name__ == "__main__":
|
| 104 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_ListRank_ba.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
from vllm import LLM, SamplingParams
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 10 |
+
EVAL_FILE = "EVAL_FILE"
|
| 11 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 12 |
+
BATCH_SIZE = 16
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def build_ranking_prompt(question_entity):
|
| 16 |
+
return (
|
| 17 |
+
f"You are a Wikipedia notability expert and a pop-culture sports expert that can rank entities based on their authority.\n\n"
|
| 18 |
+
f"I will provide you with 10 basketball entities, each indicated by a numerical identifier [].\n"
|
| 19 |
+
f"Rank the basketball entities based on their authority (the public recognition, fame, historical significance, or widespread knowledge of the entity).\n"
|
| 20 |
+
f"Context: These entities are the subjects of questions like 'Where is the birthplace of the basketball player...'\n\n"
|
| 21 |
+
+ "\n".join([f"[{i}] \"{question_entity[i]}\"" for i in range(10)])
|
| 22 |
+
+ f"\n\nRank the 10 basketball entities above based on their authority (highest to lowest).\n"
|
| 23 |
+
f"All the basketball entities should be included and listed using identifiers, in descending order of authority.\n"
|
| 24 |
+
f"The output format should be [] > [], e.g., [4] > [0].\n"
|
| 25 |
+
f"Only respond with the ranking results, do not say any word or explain.\n"
|
| 26 |
+
f"Your answer: "
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def extract_ranking_answer(response, num_entities=10):
|
| 31 |
+
text = response.strip()
|
| 32 |
+
if match := re.search(r"Your answer:\s*(.*)", text, re.IGNORECASE | re.DOTALL):
|
| 33 |
+
text = match.group(1).strip()
|
| 34 |
+
for extractor in [re.compile(r"\[(\d+)\]"), re.compile(r"\b\d+\b")]:
|
| 35 |
+
if nums := [int(x) for x in extractor.findall(text)]:
|
| 36 |
+
if len(nums) >= num_entities:
|
| 37 |
+
ranking = nums[:num_entities]
|
| 38 |
+
if len(ranking) == num_entities and set(ranking) == set(range(num_entities)):
|
| 39 |
+
return ranking
|
| 40 |
+
return -1
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def main():
|
| 44 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, trust_remote_code=True)
|
| 45 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 46 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 47 |
+
|
| 48 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 49 |
+
|
| 50 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 51 |
+
items = [json.loads(line) for line in f]
|
| 52 |
+
|
| 53 |
+
with open(OUTPUT_FILE, "w", encoding="utf-8") as out_f:
|
| 54 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 55 |
+
batch = items[i:i + BATCH_SIZE]
|
| 56 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_ranking_prompt(item["question_entity"])}],
|
| 57 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for item in batch]
|
| 58 |
+
for item, output in zip(batch, llm.generate(prompts, sampling_params)):
|
| 59 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 60 |
+
n = len(item["question_entity"])
|
| 61 |
+
pred = extract_ranking_answer(text, n)
|
| 62 |
+
out_f.write(json.dumps({
|
| 63 |
+
"question_entity": item["question_entity"], "score": item["score"],
|
| 64 |
+
"ground_truth": item["ground_truth"], "pred": pred, "llm_output": text
|
| 65 |
+
}, ensure_ascii=False) + "\n")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_ListRank_mo.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
from vllm import LLM, SamplingParams
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 10 |
+
EVAL_FILE = "EVAL_FILE"
|
| 11 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 12 |
+
BATCH_SIZE = 16
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def build_ranking_prompt(question_entity):
|
| 16 |
+
return (
|
| 17 |
+
f"You are a Wikipedia notability expert and a film expert that can rank entities based on their authority.\n\n"
|
| 18 |
+
f"I will provide you with 10 movie entities, each indicated by a numerical identifier [].\n"
|
| 19 |
+
f"Rank the movie entities based on their authority (the public recognition, fame, historical significance, or widespread knowledge of the entity).\n"
|
| 20 |
+
f"Context: These entities are the subjects of questions like 'Who is the director of the movie...'\n\n"
|
| 21 |
+
+ "\n".join([f"[{i}] \"{question_entity[i]}\"" for i in range(10)])
|
| 22 |
+
+ f"\n\nRank the 10 movie entities above based on their authority (highest to lowest).\n"
|
| 23 |
+
f"All the movie entities should be included and listed using identifiers, in descending order of authority.\n"
|
| 24 |
+
f"The output format should be [] > [], e.g., [4] > [0].\n"
|
| 25 |
+
f"Only respond with the ranking results, do not say any word or explain.\n"
|
| 26 |
+
f"Your answer: "
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def extract_ranking_answer(response, num_entities=10):
|
| 31 |
+
text = response.strip()
|
| 32 |
+
if match := re.search(r"Your answer:\s*(.*)", text, re.IGNORECASE | re.DOTALL):
|
| 33 |
+
text = match.group(1).strip()
|
| 34 |
+
for extractor in [re.compile(r"\[(\d+)\]"), re.compile(r"\b\d+\b")]:
|
| 35 |
+
if nums := [int(x) for x in extractor.findall(text)]:
|
| 36 |
+
if len(nums) >= num_entities:
|
| 37 |
+
ranking = nums[:num_entities]
|
| 38 |
+
if len(ranking) == num_entities and set(ranking) == set(range(num_entities)):
|
| 39 |
+
return ranking
|
| 40 |
+
return -1
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def main():
|
| 44 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, trust_remote_code=True)
|
| 45 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 46 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 47 |
+
|
| 48 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 49 |
+
|
| 50 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 51 |
+
items = [json.loads(line) for line in f]
|
| 52 |
+
|
| 53 |
+
with open(OUTPUT_FILE, "w", encoding="utf-8") as out_f:
|
| 54 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 55 |
+
batch = items[i:i + BATCH_SIZE]
|
| 56 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_ranking_prompt(item["question_entity"])}],
|
| 57 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for item in batch]
|
| 58 |
+
for item, output in zip(batch, llm.generate(prompts, sampling_params)):
|
| 59 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 60 |
+
n = len(item["question_entity"])
|
| 61 |
+
pred = extract_ranking_answer(text, n)
|
| 62 |
+
out_f.write(json.dumps({
|
| 63 |
+
"question_entity": item["question_entity"], "score": item["score"],
|
| 64 |
+
"ground_truth": item["ground_truth"], "pred": pred, "llm_output": text
|
| 65 |
+
}, ensure_ascii=False) + "\n")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_ListRank_so.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
from vllm import LLM, SamplingParams
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 10 |
+
EVAL_FILE = "EVAL_FILE"
|
| 11 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 12 |
+
BATCH_SIZE = 16
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def build_ranking_prompt(question_entity):
|
| 16 |
+
return (
|
| 17 |
+
f"You are a Wikipedia notability expert and a music industry expert that can rank entities based on their authority.\n\n"
|
| 18 |
+
f"I will provide you with 10 song entities, each indicated by a numerical identifier [].\n"
|
| 19 |
+
f"Rank the song entities based on their authority (the public recognition, fame, historical significance, or widespread knowledge of the entity).\n"
|
| 20 |
+
f"Context: These entities are the subjects of questions like 'Who is the performer of the song ...'\n\n"
|
| 21 |
+
+ "\n".join([f"[{i}] \"{question_entity[i]}\"" for i in range(10)])
|
| 22 |
+
+ f"\n\nRank the 10 song entities above based on their authority (highest to lowest).\n"
|
| 23 |
+
f"All the song entities should be included and listed using identifiers, in descending order of authority.\n"
|
| 24 |
+
f"The output format should be [] > [], e.g., [4] > [0].\n"
|
| 25 |
+
f"Only respond with the ranking results, do not say any word or explain.\n"
|
| 26 |
+
f"Your answer: "
|
| 27 |
+
)
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def extract_ranking_answer(response, num_entities=10):
|
| 31 |
+
text = response.strip()
|
| 32 |
+
if match := re.search(r"Your answer:\s*(.*)", text, re.IGNORECASE | re.DOTALL):
|
| 33 |
+
text = match.group(1).strip()
|
| 34 |
+
for extractor in [re.compile(r"\[(\d+)\]"), re.compile(r"\b\d+\b")]:
|
| 35 |
+
if nums := [int(x) for x in extractor.findall(text)]:
|
| 36 |
+
if len(nums) >= num_entities:
|
| 37 |
+
ranking = nums[:num_entities]
|
| 38 |
+
if len(ranking) == num_entities and set(ranking) == set(range(num_entities)):
|
| 39 |
+
return ranking
|
| 40 |
+
return -1
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def main():
|
| 44 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, trust_remote_code=True)
|
| 45 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 46 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 47 |
+
|
| 48 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 49 |
+
|
| 50 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 51 |
+
items = [json.loads(line) for line in f]
|
| 52 |
+
|
| 53 |
+
with open(OUTPUT_FILE, "w", encoding="utf-8") as out_f:
|
| 54 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 55 |
+
batch = items[i:i + BATCH_SIZE]
|
| 56 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_ranking_prompt(item["question_entity"])}],
|
| 57 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for item in batch]
|
| 58 |
+
for item, output in zip(batch, llm.generate(prompts, sampling_params)):
|
| 59 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 60 |
+
n = len(item["question_entity"])
|
| 61 |
+
pred = extract_ranking_answer(text, n)
|
| 62 |
+
out_f.write(json.dumps({
|
| 63 |
+
"question_entity": item["question_entity"], "score": item["score"],
|
| 64 |
+
"ground_truth": item["ground_truth"], "pred": pred, "llm_output": text
|
| 65 |
+
}, ensure_ascii=False) + "\n")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_PointScore_ba.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
from transformers import AutoTokenizer
|
| 5 |
+
from vllm import LLM, SamplingParams
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 9 |
+
EVAL_FILE = "LISTWISE_FILE"
|
| 10 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 11 |
+
BATCH_SIZE = 16
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_prompt(entities):
|
| 15 |
+
return (
|
| 16 |
+
f"You are a Wikipedia notability expert and a pop-culture sports expert. Please score the entity authority of each basketball entity "
|
| 17 |
+
f"in the provided list based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 18 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 19 |
+
f"Consider all entities together when assigning scores to ensure consistency and relative calibration.\n\n"
|
| 20 |
+
f"**QUESTION CONTEXT**: These entities are the subjects of questions like 'Where is the birthplace of the basketball player...'\n\n"
|
| 21 |
+
f"**TARGET ENTITIES**:\n"
|
| 22 |
+
+ "\n".join([f'[{i}] "{entity}"' for i, entity in enumerate(entities)])
|
| 23 |
+
+ f"\n\nYour output MUST be a single, valid JSON object. The JSON object must map each entity's numerical identifier (as a string) to its integer score (0-9).\n"
|
| 24 |
+
f'Example format: {{"0": 7, "1": 4, ..., "9": 8}}. Do not add any explanation.\n'
|
| 25 |
+
f"Your answer: "
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def extract_scores(response, num_entities):
|
| 30 |
+
text = response.strip()
|
| 31 |
+
try:
|
| 32 |
+
start, end = text.find("{"), text.rfind("}")
|
| 33 |
+
if start != -1 and end != -1 and end > start:
|
| 34 |
+
scores = {int(k.strip("[]")): float(v) for k, v in json.loads(text[start:end + 1]).items()}
|
| 35 |
+
if len(scores) == num_entities:
|
| 36 |
+
return sorted(range(num_entities), key=lambda x: scores.get(x, 0), reverse=True), scores
|
| 37 |
+
except Exception:
|
| 38 |
+
pass
|
| 39 |
+
return list(range(num_entities)), None
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def main():
|
| 43 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True, tensor_parallel_size=4, dtype="bfloat16")
|
| 44 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 45 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 46 |
+
|
| 47 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 48 |
+
|
| 49 |
+
completed = set()
|
| 50 |
+
if __import__("os").path.exists(OUTPUT_FILE):
|
| 51 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 52 |
+
for line in f:
|
| 53 |
+
if line.strip():
|
| 54 |
+
completed.add(json.loads(line).get("item_id", -1))
|
| 55 |
+
|
| 56 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 57 |
+
items = [json.loads(line) for line in f]
|
| 58 |
+
|
| 59 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 60 |
+
pending = [(idx, item) for idx, item in enumerate(items) if idx not in completed]
|
| 61 |
+
for i in tqdm(range(0, len(pending), BATCH_SIZE)):
|
| 62 |
+
batch = pending[i:i + BATCH_SIZE]
|
| 63 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(item["question_entity"])}],
|
| 64 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for _, item in batch]
|
| 65 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 66 |
+
for (idx, item), output in zip(batch, outputs):
|
| 67 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 68 |
+
n = len(item["question_entity"])
|
| 69 |
+
pred, scores = extract_scores(text, n)
|
| 70 |
+
out_f.write(json.dumps({
|
| 71 |
+
"item_id": idx, "question_entity": item["question_entity"], "score": item["score"],
|
| 72 |
+
"ground_truth": item["ground_truth"], "pred": pred, "scores": scores, "llm_output": text
|
| 73 |
+
}, ensure_ascii=False) + "\n")
|
| 74 |
+
out_f.flush()
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_PointScore_mo.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
from transformers import AutoTokenizer
|
| 5 |
+
from vllm import LLM, SamplingParams
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 9 |
+
EVAL_FILE = "LISTWISE_FILE"
|
| 10 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 11 |
+
BATCH_SIZE = 16
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_prompt(entities):
|
| 15 |
+
return (
|
| 16 |
+
f"You are a Wikipedia notability expert and a film expert. Please score the entity authority of each movie entity "
|
| 17 |
+
f"in the provided list based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 18 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 19 |
+
f"Consider all entities together when assigning scores to ensure consistency and relative calibration.\n\n"
|
| 20 |
+
f"**QUESTION CONTEXT**: These entities are the subjects of questions like 'Who is the director of the movie ...'\n\n"
|
| 21 |
+
f"**TARGET ENTITIES**:\n"
|
| 22 |
+
+ "\n".join([f'[{i}] "{entity}"' for i, entity in enumerate(entities)])
|
| 23 |
+
+ f"\n\nYour output MUST be a single, valid JSON object. The JSON object must map each entity's numerical identifier (as a string) to its integer score (0-9).\n"
|
| 24 |
+
f'Example format: {{"0": 7, "1": 4, ..., "9": 8}}. Do not add any explanation.\n'
|
| 25 |
+
f"Your answer: "
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def extract_scores(response, num_entities):
|
| 30 |
+
text = response.strip()
|
| 31 |
+
try:
|
| 32 |
+
start, end = text.find("{"), text.rfind("}")
|
| 33 |
+
if start != -1 and end != -1 and end > start:
|
| 34 |
+
scores = {int(k.strip("[]")): float(v) for k, v in json.loads(text[start:end + 1]).items()}
|
| 35 |
+
if len(scores) == num_entities:
|
| 36 |
+
return sorted(range(num_entities), key=lambda x: scores.get(x, 0), reverse=True), scores
|
| 37 |
+
except Exception:
|
| 38 |
+
pass
|
| 39 |
+
return list(range(num_entities)), None
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def main():
|
| 43 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True, tensor_parallel_size=4, dtype="bfloat16")
|
| 44 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 45 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 46 |
+
|
| 47 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 48 |
+
|
| 49 |
+
completed = set()
|
| 50 |
+
if __import__("os").path.exists(OUTPUT_FILE):
|
| 51 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 52 |
+
for line in f:
|
| 53 |
+
if line.strip():
|
| 54 |
+
completed.add(json.loads(line).get("item_id", -1))
|
| 55 |
+
|
| 56 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 57 |
+
items = [json.loads(line) for line in f]
|
| 58 |
+
|
| 59 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 60 |
+
pending = [(idx, item) for idx, item in enumerate(items) if idx not in completed]
|
| 61 |
+
for i in tqdm(range(0, len(pending), BATCH_SIZE)):
|
| 62 |
+
batch = pending[i:i + BATCH_SIZE]
|
| 63 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(item["question_entity"])}],
|
| 64 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for _, item in batch]
|
| 65 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 66 |
+
for (idx, item), output in zip(batch, outputs):
|
| 67 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 68 |
+
n = len(item["question_entity"])
|
| 69 |
+
pred, scores = extract_scores(text, n)
|
| 70 |
+
out_f.write(json.dumps({
|
| 71 |
+
"item_id": idx, "question_entity": item["question_entity"], "score": item["score"],
|
| 72 |
+
"ground_truth": item["ground_truth"], "pred": pred, "scores": scores, "llm_output": text
|
| 73 |
+
}, ensure_ascii=False) + "\n")
|
| 74 |
+
out_f.flush()
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/ListJudge_PointScore_so.py
ADDED
|
@@ -0,0 +1,78 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
|
| 3 |
+
from tqdm import tqdm
|
| 4 |
+
from transformers import AutoTokenizer
|
| 5 |
+
from vllm import LLM, SamplingParams
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 9 |
+
EVAL_FILE = "LISTWISE_FILE"
|
| 10 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 11 |
+
BATCH_SIZE = 16
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_prompt(entities):
|
| 15 |
+
return (
|
| 16 |
+
f"You are a Wikipedia notability expert and a music industry expert. Please score the entity authority of each song entity "
|
| 17 |
+
f"in the provided list based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 18 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 19 |
+
f"Consider all entities together when assigning scores to ensure consistency and relative calibration.\n\n"
|
| 20 |
+
f"**QUESTION CONTEXT**: These entities are the subjects of questions like 'Who is the performer of the song...'\n\n"
|
| 21 |
+
f"**TARGET ENTITIES**:\n"
|
| 22 |
+
+ "\n".join([f'[{i}] "{entity}"' for i, entity in enumerate(entities)])
|
| 23 |
+
+ f"\n\nYour output MUST be a single, valid JSON object. The JSON object must map each entity's numerical identifier (as a string) to its integer score (0-9).\n"
|
| 24 |
+
f'Example format: {{"0": 7, "1": 4, ..., "9": 8}}. Do not add any explanation.\n'
|
| 25 |
+
f"Your answer: "
|
| 26 |
+
)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def extract_scores(response, num_entities):
|
| 30 |
+
text = response.strip()
|
| 31 |
+
try:
|
| 32 |
+
start, end = text.find("{"), text.rfind("}")
|
| 33 |
+
if start != -1 and end != -1 and end > start:
|
| 34 |
+
scores = {int(k.strip("[]")): float(v) for k, v in json.loads(text[start:end + 1]).items()}
|
| 35 |
+
if len(scores) == num_entities:
|
| 36 |
+
return sorted(range(num_entities), key=lambda x: scores.get(x, 0), reverse=True), scores
|
| 37 |
+
except Exception:
|
| 38 |
+
pass
|
| 39 |
+
return list(range(num_entities)), None
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def main():
|
| 43 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True, tensor_parallel_size=4, dtype="bfloat16")
|
| 44 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 45 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 46 |
+
|
| 47 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 48 |
+
|
| 49 |
+
completed = set()
|
| 50 |
+
if __import__("os").path.exists(OUTPUT_FILE):
|
| 51 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 52 |
+
for line in f:
|
| 53 |
+
if line.strip():
|
| 54 |
+
completed.add(json.loads(line).get("item_id", -1))
|
| 55 |
+
|
| 56 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 57 |
+
items = [json.loads(line) for line in f]
|
| 58 |
+
|
| 59 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 60 |
+
pending = [(idx, item) for idx, item in enumerate(items) if idx not in completed]
|
| 61 |
+
for i in tqdm(range(0, len(pending), BATCH_SIZE)):
|
| 62 |
+
batch = pending[i:i + BATCH_SIZE]
|
| 63 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(item["question_entity"])}],
|
| 64 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for _, item in batch]
|
| 65 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 66 |
+
for (idx, item), output in zip(batch, outputs):
|
| 67 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 68 |
+
n = len(item["question_entity"])
|
| 69 |
+
pred, scores = extract_scores(text, n)
|
| 70 |
+
out_f.write(json.dumps({
|
| 71 |
+
"item_id": idx, "question_entity": item["question_entity"], "score": item["score"],
|
| 72 |
+
"ground_truth": item["ground_truth"], "pred": pred, "scores": scores, "llm_output": text
|
| 73 |
+
}, ensure_ascii=False) + "\n")
|
| 74 |
+
out_f.flush()
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
if __name__ == "__main__":
|
| 78 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PairRank_ba.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
import re
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 13 |
+
EVAL_FILE = "EVAL_FILE"
|
| 14 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 15 |
+
BATCH_SIZE = 16
|
| 16 |
+
NUM_ANCHORS = 5
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def build_pairwise_prompt(entity1, entity2):
|
| 20 |
+
return (
|
| 21 |
+
f"You are a Wikipedia notability expert and a pop-culture sports expert. Please determine which of the two given entities is more authoritative based on the following criteria.\n\n"
|
| 22 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 23 |
+
f"Consider both entities together when assigning scores to ensure consistency.\n\n"
|
| 24 |
+
f"**QUESTION CONTEXT**: These entities are the subjects of questions like 'Where is the birthplace of the basketball player ...'\n\n"
|
| 25 |
+
f"**TARGET ENTITIES**:\n"
|
| 26 |
+
f"Entity A: \"{entity1}\"\n"
|
| 27 |
+
f"Entity B: \"{entity2}\"\n\n"
|
| 28 |
+
f"Which entity is more authoritative? Only return 'A' or 'B'. Do not add any explanation.\n"
|
| 29 |
+
f"Your answer: "
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def extract_ab_answer(response):
|
| 34 |
+
if not response:
|
| 35 |
+
return None
|
| 36 |
+
text = response.strip()
|
| 37 |
+
first = text[0].upper() if text else ""
|
| 38 |
+
if first in ("A", "B"):
|
| 39 |
+
return first
|
| 40 |
+
if m := re.search(r"\b([AB])\b", text.upper()):
|
| 41 |
+
return m.group(1)
|
| 42 |
+
return None
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def run_batch_inference(task_list, llm, tokenizer, sampling_params):
|
| 46 |
+
results = []
|
| 47 |
+
for i in range(0, len(task_list), BATCH_SIZE):
|
| 48 |
+
batch = task_list[i:i + BATCH_SIZE]
|
| 49 |
+
prompts = [tokenizer.apply_chat_template(
|
| 50 |
+
[{"role": "user", "content": build_pairwise_prompt(task["entity_a"], task["entity_b"])}],
|
| 51 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False
|
| 52 |
+
) for task in batch]
|
| 53 |
+
for task, out in zip(batch, llm.generate(prompts, sampling_params)):
|
| 54 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 55 |
+
winner = extract_ab_answer(raw)
|
| 56 |
+
results.append((winner, raw, winner is None))
|
| 57 |
+
return results
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def anchor_compare(i, j, anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 61 |
+
if i not in anchor_set and j not in anchor_set:
|
| 62 |
+
return non_anchor_wins.get(i, 0) > non_anchor_wins.get(j, 0)
|
| 63 |
+
if i in anchor_set and j in anchor_set:
|
| 64 |
+
return anchor_wins.get(i, 0) > anchor_wins.get(j, 0)
|
| 65 |
+
if (i, j) in direct_results:
|
| 66 |
+
return direct_results[(i, j)]
|
| 67 |
+
if (j, i) in direct_results:
|
| 68 |
+
return not direct_results[(j, i)]
|
| 69 |
+
return anchor_wins.get(i, non_anchor_wins.get(i, 0)) > anchor_wins.get(j, non_anchor_wins.get(j, 0))
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def bubble_sort_by_anchor(indices, anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 73 |
+
arr = list(indices)
|
| 74 |
+
n = len(arr)
|
| 75 |
+
for i in range(n):
|
| 76 |
+
for j in range(0, n - i - 1):
|
| 77 |
+
if not anchor_compare(arr[j], arr[j + 1], anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 78 |
+
arr[j], arr[j + 1] = arr[j + 1], arr[j]
|
| 79 |
+
return arr
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def main():
|
| 83 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True)
|
| 84 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 85 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0)
|
| 86 |
+
|
| 87 |
+
os = __import__("os")
|
| 88 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 89 |
+
|
| 90 |
+
completed_ids = set()
|
| 91 |
+
if os.path.exists(OUTPUT_FILE):
|
| 92 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 93 |
+
for line in f:
|
| 94 |
+
if line.strip():
|
| 95 |
+
try:
|
| 96 |
+
if "list_id" in (obj := json.loads(line)):
|
| 97 |
+
completed_ids.add(obj["list_id"])
|
| 98 |
+
except json.JSONDecodeError:
|
| 99 |
+
pass
|
| 100 |
+
|
| 101 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 102 |
+
items = [json.loads(line) for line in f]
|
| 103 |
+
|
| 104 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 105 |
+
for item_idx, item in enumerate(tqdm(items)):
|
| 106 |
+
list_id = item.get("list_id", item_idx)
|
| 107 |
+
if list_id in completed_ids:
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
entities = item["question_entity"]
|
| 111 |
+
n = len(entities)
|
| 112 |
+
anchor_indices = random.sample(range(n), NUM_ANCHORS)
|
| 113 |
+
anchor_set = set(anchor_indices)
|
| 114 |
+
non_anchor_indices = [i for i in range(n) if i not in anchor_set]
|
| 115 |
+
|
| 116 |
+
na_vs_a_tasks = [{"idx_a": ni, "entity_a": entities[ni], "idx_b": ai, "entity_b": entities[ai], "type": "non_anchor_vs_anchor"}
|
| 117 |
+
for ni in non_anchor_indices for ai in anchor_indices]
|
| 118 |
+
a_vs_a_tasks = [{"idx_a": ai, "entity_a": entities[ai], "idx_b": aj, "entity_b": entities[aj], "type": "anchor_vs_anchor"}
|
| 119 |
+
for ai, aj in itertools.combinations(anchor_indices, 2)]
|
| 120 |
+
|
| 121 |
+
infer_results = run_batch_inference(na_vs_a_tasks + a_vs_a_tasks, llm, tokenizer, sampling_params)
|
| 122 |
+
|
| 123 |
+
direct_results, non_anchor_wins, anchor_wins, comparison_records = {}, defaultdict(int), defaultdict(int), []
|
| 124 |
+
for task, (winner, raw, failed) in zip(na_vs_a_tasks + a_vs_a_tasks, infer_results):
|
| 125 |
+
idx_a, idx_b = task["idx_a"], task["idx_b"]
|
| 126 |
+
if not failed:
|
| 127 |
+
i_wins = winner == "A"
|
| 128 |
+
direct_results[(idx_a, idx_b)] = i_wins
|
| 129 |
+
direct_results[(idx_b, idx_a)] = not i_wins
|
| 130 |
+
if task["type"] == "non_anchor_vs_anchor" and i_wins:
|
| 131 |
+
non_anchor_wins[idx_a] += 1
|
| 132 |
+
elif task["type"] == "anchor_vs_anchor":
|
| 133 |
+
if i_wins:
|
| 134 |
+
anchor_wins[idx_a] += 1
|
| 135 |
+
else:
|
| 136 |
+
anchor_wins[idx_b] += 1
|
| 137 |
+
|
| 138 |
+
comparison_records.append({
|
| 139 |
+
"entity_a": task["entity_a"], "idx_a": idx_a,
|
| 140 |
+
"entity_b": task["entity_b"], "idx_b": idx_b,
|
| 141 |
+
"winner": winner, "extraction_failed": failed, "llm_output": raw, "type": task["type"]
|
| 142 |
+
})
|
| 143 |
+
|
| 144 |
+
pred = bubble_sort_by_anchor(range(n), anchor_set, anchor_wins, non_anchor_wins, direct_results)
|
| 145 |
+
|
| 146 |
+
out_f.write(json.dumps({
|
| 147 |
+
"list_id": list_id, "question_entity": entities, "score": item["score"],
|
| 148 |
+
"ground_truth": item["ground_truth"], "anchor_indices": anchor_indices,
|
| 149 |
+
"anchor_wins_vs_anchors": {str(k): v for k, v in anchor_wins.items()},
|
| 150 |
+
"non_anchor_wins_vs_anchors": {str(k): v for k, v in non_anchor_wins.items()},
|
| 151 |
+
"pred": pred, "comparisons": comparison_records
|
| 152 |
+
}, ensure_ascii=False) + "\n")
|
| 153 |
+
out_f.flush()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
if __name__ == "__main__":
|
| 157 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PairRank_mo.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
import re
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 13 |
+
EVAL_FILE = "EVAL_FILE"
|
| 14 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 15 |
+
BATCH_SIZE = 16
|
| 16 |
+
NUM_ANCHORS = 5
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def build_pairwise_prompt(entity1, entity2):
|
| 20 |
+
return (
|
| 21 |
+
f"You are a Wikipedia notability expert and a film expert. Please determine which of the two given entities is more authoritative based on the following criteria.\n\n"
|
| 22 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 23 |
+
f"Consider both entities together when assigning scores to ensure consistency.\n\n"
|
| 24 |
+
f"**QUESTION CONTEXT**: These entities are the subjects of questions like 'Who is the director of the movie ...'\n\n"
|
| 25 |
+
f"**TARGET ENTITIES**:\n"
|
| 26 |
+
f"Entity A: \"{entity1}\"\n"
|
| 27 |
+
f"Entity B: \"{entity2}\"\n\n"
|
| 28 |
+
f"Which entity is more authoritative? Only return 'A' or 'B'. Do not add any explanation.\n"
|
| 29 |
+
f"Your answer: "
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def extract_ab_answer(response):
|
| 34 |
+
if not response:
|
| 35 |
+
return None
|
| 36 |
+
text = response.strip()
|
| 37 |
+
first = text[0].upper() if text else ""
|
| 38 |
+
if first in ("A", "B"):
|
| 39 |
+
return first
|
| 40 |
+
if m := re.search(r"\b([AB])\b", text.upper()):
|
| 41 |
+
return m.group(1)
|
| 42 |
+
return None
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def run_batch_inference(task_list, llm, tokenizer, sampling_params):
|
| 46 |
+
results = []
|
| 47 |
+
for i in range(0, len(task_list), BATCH_SIZE):
|
| 48 |
+
batch = task_list[i:i + BATCH_SIZE]
|
| 49 |
+
prompts = [tokenizer.apply_chat_template(
|
| 50 |
+
[{"role": "user", "content": build_pairwise_prompt(task["entity_a"], task["entity_b"])}],
|
| 51 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False
|
| 52 |
+
) for task in batch]
|
| 53 |
+
for task, out in zip(batch, llm.generate(prompts, sampling_params)):
|
| 54 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 55 |
+
winner = extract_ab_answer(raw)
|
| 56 |
+
results.append((winner, raw, winner is None))
|
| 57 |
+
return results
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def anchor_compare(i, j, anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 61 |
+
if i not in anchor_set and j not in anchor_set:
|
| 62 |
+
return non_anchor_wins.get(i, 0) > non_anchor_wins.get(j, 0)
|
| 63 |
+
if i in anchor_set and j in anchor_set:
|
| 64 |
+
return anchor_wins.get(i, 0) > anchor_wins.get(j, 0)
|
| 65 |
+
if (i, j) in direct_results:
|
| 66 |
+
return direct_results[(i, j)]
|
| 67 |
+
if (j, i) in direct_results:
|
| 68 |
+
return not direct_results[(j, i)]
|
| 69 |
+
return anchor_wins.get(i, non_anchor_wins.get(i, 0)) > anchor_wins.get(j, non_anchor_wins.get(j, 0))
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def bubble_sort_by_anchor(indices, anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 73 |
+
arr = list(indices)
|
| 74 |
+
n = len(arr)
|
| 75 |
+
for i in range(n):
|
| 76 |
+
for j in range(0, n - i - 1):
|
| 77 |
+
if not anchor_compare(arr[j], arr[j + 1], anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 78 |
+
arr[j], arr[j + 1] = arr[j + 1], arr[j]
|
| 79 |
+
return arr
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def main():
|
| 83 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True)
|
| 84 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 85 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0)
|
| 86 |
+
|
| 87 |
+
os = __import__("os")
|
| 88 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 89 |
+
|
| 90 |
+
completed_ids = set()
|
| 91 |
+
if os.path.exists(OUTPUT_FILE):
|
| 92 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 93 |
+
for line in f:
|
| 94 |
+
if line.strip():
|
| 95 |
+
try:
|
| 96 |
+
if "list_id" in (obj := json.loads(line)):
|
| 97 |
+
completed_ids.add(obj["list_id"])
|
| 98 |
+
except json.JSONDecodeError:
|
| 99 |
+
pass
|
| 100 |
+
|
| 101 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 102 |
+
items = [json.loads(line) for line in f]
|
| 103 |
+
|
| 104 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 105 |
+
for item_idx, item in enumerate(tqdm(items)):
|
| 106 |
+
list_id = item.get("list_id", item_idx)
|
| 107 |
+
if list_id in completed_ids:
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
entities = item["question_entity"]
|
| 111 |
+
n = len(entities)
|
| 112 |
+
anchor_indices = random.sample(range(n), NUM_ANCHORS)
|
| 113 |
+
anchor_set = set(anchor_indices)
|
| 114 |
+
non_anchor_indices = [i for i in range(n) if i not in anchor_set]
|
| 115 |
+
|
| 116 |
+
na_vs_a_tasks = [{"idx_a": ni, "entity_a": entities[ni], "idx_b": ai, "entity_b": entities[ai], "type": "non_anchor_vs_anchor"}
|
| 117 |
+
for ni in non_anchor_indices for ai in anchor_indices]
|
| 118 |
+
a_vs_a_tasks = [{"idx_a": ai, "entity_a": entities[ai], "idx_b": aj, "entity_b": entities[aj], "type": "anchor_vs_anchor"}
|
| 119 |
+
for ai, aj in itertools.combinations(anchor_indices, 2)]
|
| 120 |
+
|
| 121 |
+
infer_results = run_batch_inference(na_vs_a_tasks + a_vs_a_tasks, llm, tokenizer, sampling_params)
|
| 122 |
+
|
| 123 |
+
direct_results, non_anchor_wins, anchor_wins, comparison_records = {}, defaultdict(int), defaultdict(int), []
|
| 124 |
+
for task, (winner, raw, failed) in zip(na_vs_a_tasks + a_vs_a_tasks, infer_results):
|
| 125 |
+
idx_a, idx_b = task["idx_a"], task["idx_b"]
|
| 126 |
+
if not failed:
|
| 127 |
+
i_wins = winner == "A"
|
| 128 |
+
direct_results[(idx_a, idx_b)] = i_wins
|
| 129 |
+
direct_results[(idx_b, idx_a)] = not i_wins
|
| 130 |
+
if task["type"] == "non_anchor_vs_anchor" and i_wins:
|
| 131 |
+
non_anchor_wins[idx_a] += 1
|
| 132 |
+
elif task["type"] == "anchor_vs_anchor":
|
| 133 |
+
if i_wins:
|
| 134 |
+
anchor_wins[idx_a] += 1
|
| 135 |
+
else:
|
| 136 |
+
anchor_wins[idx_b] += 1
|
| 137 |
+
|
| 138 |
+
comparison_records.append({
|
| 139 |
+
"entity_a": task["entity_a"], "idx_a": idx_a,
|
| 140 |
+
"entity_b": task["entity_b"], "idx_b": idx_b,
|
| 141 |
+
"winner": winner, "extraction_failed": failed, "llm_output": raw, "type": task["type"]
|
| 142 |
+
})
|
| 143 |
+
|
| 144 |
+
pred = bubble_sort_by_anchor(range(n), anchor_set, anchor_wins, non_anchor_wins, direct_results)
|
| 145 |
+
|
| 146 |
+
out_f.write(json.dumps({
|
| 147 |
+
"list_id": list_id, "question_entity": entities, "score": item["score"],
|
| 148 |
+
"ground_truth": item["ground_truth"], "anchor_indices": anchor_indices,
|
| 149 |
+
"anchor_wins_vs_anchors": {str(k): v for k, v in anchor_wins.items()},
|
| 150 |
+
"non_anchor_wins_vs_anchors": {str(k): v for k, v in non_anchor_wins.items()},
|
| 151 |
+
"pred": pred, "comparisons": comparison_records
|
| 152 |
+
}, ensure_ascii=False) + "\n")
|
| 153 |
+
out_f.flush()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
if __name__ == "__main__":
|
| 157 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PairRank_so.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
import re
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 13 |
+
EVAL_FILE = "EVAL_FILE"
|
| 14 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 15 |
+
BATCH_SIZE = 16
|
| 16 |
+
NUM_ANCHORS = 5
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def build_pairwise_prompt(entity1, entity2):
|
| 20 |
+
return (
|
| 21 |
+
f"You are a Wikipedia notability expert and a music industry expert. Please determine which of the two given entities is more authoritative based on the following criteria.\n\n"
|
| 22 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 23 |
+
f"Consider both entities together when assigning scores to ensure consistency.\n\n"
|
| 24 |
+
f"**QUESTION CONTEXT**: These entities are the subjects of questions like 'Who is the performer of the song ...'\n\n"
|
| 25 |
+
f"**TARGET ENTITIES**:\n"
|
| 26 |
+
f"Entity A: \"{entity1}\"\n"
|
| 27 |
+
f"Entity B: \"{entity2}\"\n\n"
|
| 28 |
+
f"Which entity is more authoritative? Only return 'A' or 'B'. Do not add any explanation.\n"
|
| 29 |
+
f"Your answer: "
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def extract_ab_answer(response):
|
| 34 |
+
if not response:
|
| 35 |
+
return None
|
| 36 |
+
text = response.strip()
|
| 37 |
+
first = text[0].upper() if text else ""
|
| 38 |
+
if first in ("A", "B"):
|
| 39 |
+
return first
|
| 40 |
+
if m := re.search(r"\b([AB])\b", text.upper()):
|
| 41 |
+
return m.group(1)
|
| 42 |
+
return None
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def run_batch_inference(task_list, llm, tokenizer, sampling_params):
|
| 46 |
+
results = []
|
| 47 |
+
for i in range(0, len(task_list), BATCH_SIZE):
|
| 48 |
+
batch = task_list[i:i + BATCH_SIZE]
|
| 49 |
+
prompts = [tokenizer.apply_chat_template(
|
| 50 |
+
[{"role": "user", "content": build_pairwise_prompt(task["entity_a"], task["entity_b"])}],
|
| 51 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False
|
| 52 |
+
) for task in batch]
|
| 53 |
+
for task, out in zip(batch, llm.generate(prompts, sampling_params)):
|
| 54 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 55 |
+
winner = extract_ab_answer(raw)
|
| 56 |
+
results.append((winner, raw, winner is None))
|
| 57 |
+
return results
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def anchor_compare(i, j, anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 61 |
+
if i not in anchor_set and j not in anchor_set:
|
| 62 |
+
return non_anchor_wins.get(i, 0) > non_anchor_wins.get(j, 0)
|
| 63 |
+
if i in anchor_set and j in anchor_set:
|
| 64 |
+
return anchor_wins.get(i, 0) > anchor_wins.get(j, 0)
|
| 65 |
+
if (i, j) in direct_results:
|
| 66 |
+
return direct_results[(i, j)]
|
| 67 |
+
if (j, i) in direct_results:
|
| 68 |
+
return not direct_results[(j, i)]
|
| 69 |
+
return anchor_wins.get(i, non_anchor_wins.get(i, 0)) > anchor_wins.get(j, non_anchor_wins.get(j, 0))
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def bubble_sort_by_anchor(indices, anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 73 |
+
arr = list(indices)
|
| 74 |
+
n = len(arr)
|
| 75 |
+
for i in range(n):
|
| 76 |
+
for j in range(0, n - i - 1):
|
| 77 |
+
if not anchor_compare(arr[j], arr[j + 1], anchor_set, anchor_wins, non_anchor_wins, direct_results):
|
| 78 |
+
arr[j], arr[j + 1] = arr[j + 1], arr[j]
|
| 79 |
+
return arr
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def main():
|
| 83 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True)
|
| 84 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 85 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0)
|
| 86 |
+
|
| 87 |
+
os = __import__("os")
|
| 88 |
+
os.makedirs(os.path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 89 |
+
|
| 90 |
+
completed_ids = set()
|
| 91 |
+
if os.path.exists(OUTPUT_FILE):
|
| 92 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 93 |
+
for line in f:
|
| 94 |
+
if line.strip():
|
| 95 |
+
try:
|
| 96 |
+
if "list_id" in (obj := json.loads(line)):
|
| 97 |
+
completed_ids.add(obj["list_id"])
|
| 98 |
+
except json.JSONDecodeError:
|
| 99 |
+
pass
|
| 100 |
+
|
| 101 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 102 |
+
items = [json.loads(line) for line in f]
|
| 103 |
+
|
| 104 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 105 |
+
for item_idx, item in enumerate(tqdm(items)):
|
| 106 |
+
list_id = item.get("list_id", item_idx)
|
| 107 |
+
if list_id in completed_ids:
|
| 108 |
+
continue
|
| 109 |
+
|
| 110 |
+
entities = item["question_entity"]
|
| 111 |
+
n = len(entities)
|
| 112 |
+
anchor_indices = random.sample(range(n), NUM_ANCHORS)
|
| 113 |
+
anchor_set = set(anchor_indices)
|
| 114 |
+
non_anchor_indices = [i for i in range(n) if i not in anchor_set]
|
| 115 |
+
|
| 116 |
+
na_vs_a_tasks = [{"idx_a": ni, "entity_a": entities[ni], "idx_b": ai, "entity_b": entities[ai], "type": "non_anchor_vs_anchor"}
|
| 117 |
+
for ni in non_anchor_indices for ai in anchor_indices]
|
| 118 |
+
a_vs_a_tasks = [{"idx_a": ai, "entity_a": entities[ai], "idx_b": aj, "entity_b": entities[aj], "type": "anchor_vs_anchor"}
|
| 119 |
+
for ai, aj in itertools.combinations(anchor_indices, 2)]
|
| 120 |
+
|
| 121 |
+
infer_results = run_batch_inference(na_vs_a_tasks + a_vs_a_tasks, llm, tokenizer, sampling_params)
|
| 122 |
+
|
| 123 |
+
direct_results, non_anchor_wins, anchor_wins, comparison_records = {}, defaultdict(int), defaultdict(int), []
|
| 124 |
+
for task, (winner, raw, failed) in zip(na_vs_a_tasks + a_vs_a_tasks, infer_results):
|
| 125 |
+
idx_a, idx_b = task["idx_a"], task["idx_b"]
|
| 126 |
+
if not failed:
|
| 127 |
+
i_wins = winner == "A"
|
| 128 |
+
direct_results[(idx_a, idx_b)] = i_wins
|
| 129 |
+
direct_results[(idx_b, idx_a)] = not i_wins
|
| 130 |
+
if task["type"] == "non_anchor_vs_anchor" and i_wins:
|
| 131 |
+
non_anchor_wins[idx_a] += 1
|
| 132 |
+
elif task["type"] == "anchor_vs_anchor":
|
| 133 |
+
if i_wins:
|
| 134 |
+
anchor_wins[idx_a] += 1
|
| 135 |
+
else:
|
| 136 |
+
anchor_wins[idx_b] += 1
|
| 137 |
+
|
| 138 |
+
comparison_records.append({
|
| 139 |
+
"entity_a": task["entity_a"], "idx_a": idx_a,
|
| 140 |
+
"entity_b": task["entity_b"], "idx_b": idx_b,
|
| 141 |
+
"winner": winner, "extraction_failed": failed, "llm_output": raw, "type": task["type"]
|
| 142 |
+
})
|
| 143 |
+
|
| 144 |
+
pred = bubble_sort_by_anchor(range(n), anchor_set, anchor_wins, non_anchor_wins, direct_results)
|
| 145 |
+
|
| 146 |
+
out_f.write(json.dumps({
|
| 147 |
+
"list_id": list_id, "question_entity": entities, "score": item["score"],
|
| 148 |
+
"ground_truth": item["ground_truth"], "anchor_indices": anchor_indices,
|
| 149 |
+
"anchor_wins_vs_anchors": {str(k): v for k, v in anchor_wins.items()},
|
| 150 |
+
"non_anchor_wins_vs_anchors": {str(k): v for k, v in non_anchor_wins.items()},
|
| 151 |
+
"pred": pred, "comparisons": comparison_records
|
| 152 |
+
}, ensure_ascii=False) + "\n")
|
| 153 |
+
out_f.flush()
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
if __name__ == "__main__":
|
| 157 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PointScore_ba.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
import re
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 13 |
+
EVAL_FILE = "LISTWISE_FILE"
|
| 14 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 15 |
+
BATCH_SIZE = 16
|
| 16 |
+
NUM_ANCHORS = 5
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def build_prompt(entity1, entity2):
|
| 20 |
+
return (
|
| 21 |
+
f"You are a Wikipedia notability expert and a pop-culture sports expert. Please score the entity authority of each of the two given basketball entities "
|
| 22 |
+
f"based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 23 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 24 |
+
f"Consider both entities together when assigning scores to ensure consistency.\n\n"
|
| 25 |
+
f"QUESTION CONTEXT: These entities are the subjects of questions like 'Where is the birthplace of the basketball player...'\n\n"
|
| 26 |
+
f"**TARGET ENTITIES**:\n"
|
| 27 |
+
f"Entity A: {entity1}\n"
|
| 28 |
+
f"Entity B: {entity2}\n\n"
|
| 29 |
+
f"Your output MUST be a single, valid JSON object in the format: {{\"A\": X, \"B\": Y}}, where X and Y are integer scores from 0 to 9. "
|
| 30 |
+
f"Example format: {{\"A\": 7, \"B\": 4}}. Do not add any explanation.\n"
|
| 31 |
+
f"Your answer: "
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def extract_scores(response):
|
| 36 |
+
text = response.strip()
|
| 37 |
+
if pos := text.rfind("Your answer:"):
|
| 38 |
+
text = text[pos + 12:].strip()
|
| 39 |
+
if match := re.search(r'\{[^{}]*\}', text, re.DOTALL):
|
| 40 |
+
try:
|
| 41 |
+
d = json.loads(match.group())
|
| 42 |
+
sa, sb = float(d.get("A", d.get("a"))), float(d.get("B", d.get("b")))
|
| 43 |
+
if sa is not None and sb is not None and 0 <= sa <= 9 and 0 <= sb <= 9:
|
| 44 |
+
return sa, sb
|
| 45 |
+
except Exception:
|
| 46 |
+
pass
|
| 47 |
+
return None, None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def main():
|
| 51 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True, tensor_parallel_size=4, dtype="bfloat16")
|
| 52 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 53 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=64)
|
| 54 |
+
|
| 55 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 56 |
+
|
| 57 |
+
completed = set()
|
| 58 |
+
if __import__("os").path.exists(OUTPUT_FILE):
|
| 59 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 60 |
+
for line in f:
|
| 61 |
+
if line.strip():
|
| 62 |
+
completed.add(json.loads(line).get("item_id", -1))
|
| 63 |
+
|
| 64 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 65 |
+
items = [json.loads(line) for line in f]
|
| 66 |
+
|
| 67 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 68 |
+
pending = [(idx, item) for idx, item in enumerate(items) if idx not in completed]
|
| 69 |
+
for idx, item in tqdm(pending):
|
| 70 |
+
entities = item["question_entity"]
|
| 71 |
+
n = len(entities)
|
| 72 |
+
anchors = random.sample(range(n), NUM_ANCHORS)
|
| 73 |
+
non_anchors = [i for i in range(n) if i not in anchors]
|
| 74 |
+
|
| 75 |
+
tasks = [{"idx_a": ni, "idx_b": ai, "entity_a": entities[ni], "entity_b": entities[ai]}
|
| 76 |
+
for ni in non_anchors for ai in anchors]
|
| 77 |
+
tasks += [{"idx_a": ai, "idx_b": aj, "entity_a": entities[ai], "entity_b": entities[aj]}
|
| 78 |
+
for ai, aj in itertools.combinations(anchors, 2)]
|
| 79 |
+
|
| 80 |
+
# Batch inference
|
| 81 |
+
results = []
|
| 82 |
+
for i in range(0, len(tasks), BATCH_SIZE):
|
| 83 |
+
batch = tasks[i:i + BATCH_SIZE]
|
| 84 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(t["entity_a"], t["entity_b"])}],
|
| 85 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for t in batch]
|
| 86 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 87 |
+
for t, out in zip(batch, outputs):
|
| 88 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 89 |
+
sa, sb = extract_scores(raw)
|
| 90 |
+
results.append((t, sa, sb, raw))
|
| 91 |
+
|
| 92 |
+
# Aggregate scores
|
| 93 |
+
scores = defaultdict(list)
|
| 94 |
+
records = []
|
| 95 |
+
for task, sa, sb, raw in results:
|
| 96 |
+
if sa is not None and sb is not None:
|
| 97 |
+
scores[task["idx_a"]].append(sa)
|
| 98 |
+
scores[task["idx_b"]].append(sb)
|
| 99 |
+
records.append({"entity_a": task["entity_a"], "idx_a": task["idx_a"], "entity_b": task["entity_b"],
|
| 100 |
+
"idx_b": task["idx_b"], "score_a": sa, "score_b": sb, "llm_output": raw})
|
| 101 |
+
|
| 102 |
+
avg = {i: float(__import__("numpy").mean(scores[i])) if scores[i] else 0.0 for i in range(n)}
|
| 103 |
+
pred = sorted(range(n), key=lambda x: avg[x], reverse=True)
|
| 104 |
+
|
| 105 |
+
out_f.write(json.dumps({
|
| 106 |
+
"item_id": idx, "question_entity": entities, "score": item.get("score"),
|
| 107 |
+
"ground_truth": item["ground_truth"], "anchor_indices": anchors,
|
| 108 |
+
"avg_scores": avg, "pred": pred, "comparisons": records
|
| 109 |
+
}, ensure_ascii=False) + "\n")
|
| 110 |
+
out_f.flush()
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
if __name__ == "__main__":
|
| 114 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PointScore_mo.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
import re
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 13 |
+
EVAL_FILE = "LISTWISE_FILE"
|
| 14 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 15 |
+
BATCH_SIZE = 16
|
| 16 |
+
NUM_ANCHORS = 5
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def build_prompt(entity1, entity2):
|
| 20 |
+
return (
|
| 21 |
+
f"You are a Wikipedia notability expert and a film expert. Please score the entity authority of each of the two given movie entities "
|
| 22 |
+
f"based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 23 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 24 |
+
f"Consider both entities together when assigning scores to ensure consistency.\n\n"
|
| 25 |
+
f"QUESTION CONTEXT: These entities are the subjects of questions like 'Who is the director of the movie ...'\n\n"
|
| 26 |
+
f"**TARGET ENTITIES**:\n"
|
| 27 |
+
f"Entity A: {entity1}\n"
|
| 28 |
+
f"Entity B: {entity2}\n\n"
|
| 29 |
+
f"Your output MUST be a single, valid JSON object in the format: {{\"A\": X, \"B\": Y}}, where X and Y are integer scores from 0 to 9. "
|
| 30 |
+
f"Example format: {{\"A\": 7, \"B\": 4}}. Do not add any explanation.\n"
|
| 31 |
+
f"Your answer: "
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def extract_scores(response):
|
| 36 |
+
text = response.strip()
|
| 37 |
+
if pos := text.rfind("Your answer:"):
|
| 38 |
+
text = text[pos + 12:].strip()
|
| 39 |
+
if match := re.search(r'\{[^{}]*\}', text, re.DOTALL):
|
| 40 |
+
try:
|
| 41 |
+
d = json.loads(match.group())
|
| 42 |
+
sa, sb = float(d.get("A", d.get("a"))), float(d.get("B", d.get("b")))
|
| 43 |
+
if sa is not None and sb is not None and 0 <= sa <= 9 and 0 <= sb <= 9:
|
| 44 |
+
return sa, sb
|
| 45 |
+
except Exception:
|
| 46 |
+
pass
|
| 47 |
+
return None, None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def main():
|
| 51 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True, tensor_parallel_size=4, dtype="bfloat16")
|
| 52 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 53 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=64)
|
| 54 |
+
|
| 55 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 56 |
+
|
| 57 |
+
completed = set()
|
| 58 |
+
if __import__("os").path.exists(OUTPUT_FILE):
|
| 59 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 60 |
+
for line in f:
|
| 61 |
+
if line.strip():
|
| 62 |
+
completed.add(json.loads(line).get("item_id", -1))
|
| 63 |
+
|
| 64 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 65 |
+
items = [json.loads(line) for line in f]
|
| 66 |
+
|
| 67 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 68 |
+
pending = [(idx, item) for idx, item in enumerate(items) if idx not in completed]
|
| 69 |
+
for idx, item in tqdm(pending):
|
| 70 |
+
entities = item["question_entity"]
|
| 71 |
+
n = len(entities)
|
| 72 |
+
anchors = random.sample(range(n), NUM_ANCHORS)
|
| 73 |
+
non_anchors = [i for i in range(n) if i not in anchors]
|
| 74 |
+
|
| 75 |
+
tasks = [{"idx_a": ni, "idx_b": ai, "entity_a": entities[ni], "entity_b": entities[ai]}
|
| 76 |
+
for ni in non_anchors for ai in anchors]
|
| 77 |
+
tasks += [{"idx_a": ai, "idx_b": aj, "entity_a": entities[ai], "entity_b": entities[aj]}
|
| 78 |
+
for ai, aj in itertools.combinations(anchors, 2)]
|
| 79 |
+
|
| 80 |
+
# Batch inference
|
| 81 |
+
results = []
|
| 82 |
+
for i in range(0, len(tasks), BATCH_SIZE):
|
| 83 |
+
batch = tasks[i:i + BATCH_SIZE]
|
| 84 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(t["entity_a"], t["entity_b"])}],
|
| 85 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for t in batch]
|
| 86 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 87 |
+
for t, out in zip(batch, outputs):
|
| 88 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 89 |
+
sa, sb = extract_scores(raw)
|
| 90 |
+
results.append((t, sa, sb, raw))
|
| 91 |
+
|
| 92 |
+
# Aggregate scores
|
| 93 |
+
scores = defaultdict(list)
|
| 94 |
+
records = []
|
| 95 |
+
for task, sa, sb, raw in results:
|
| 96 |
+
if sa is not None and sb is not None:
|
| 97 |
+
scores[task["idx_a"]].append(sa)
|
| 98 |
+
scores[task["idx_b"]].append(sb)
|
| 99 |
+
records.append({"entity_a": task["entity_a"], "idx_a": task["idx_a"], "entity_b": task["entity_b"],
|
| 100 |
+
"idx_b": task["idx_b"], "score_a": sa, "score_b": sb, "llm_output": raw})
|
| 101 |
+
|
| 102 |
+
avg = {i: float(__import__("numpy").mean(scores[i])) if scores[i] else 0.0 for i in range(n)}
|
| 103 |
+
pred = sorted(range(n), key=lambda x: avg[x], reverse=True)
|
| 104 |
+
|
| 105 |
+
out_f.write(json.dumps({
|
| 106 |
+
"item_id": idx, "question_entity": entities, "score": item.get("score"),
|
| 107 |
+
"ground_truth": item["ground_truth"], "anchor_indices": anchors,
|
| 108 |
+
"avg_scores": avg, "pred": pred, "comparisons": records
|
| 109 |
+
}, ensure_ascii=False) + "\n")
|
| 110 |
+
out_f.flush()
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
if __name__ == "__main__":
|
| 114 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PairJudge_PointScore_so.py
ADDED
|
@@ -0,0 +1,114 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import random
|
| 3 |
+
import itertools
|
| 4 |
+
import re
|
| 5 |
+
from collections import defaultdict
|
| 6 |
+
|
| 7 |
+
from tqdm import tqdm
|
| 8 |
+
from transformers import AutoTokenizer
|
| 9 |
+
from vllm import LLM, SamplingParams
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 13 |
+
EVAL_FILE = "LISTWISE_FILE"
|
| 14 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 15 |
+
BATCH_SIZE = 16
|
| 16 |
+
NUM_ANCHORS = 5
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
def build_prompt(entity1, entity2):
|
| 20 |
+
return (
|
| 21 |
+
f"You are a Wikipedia notability expert and a music industry expert. Please score the entity authority of each of the two given song entities "
|
| 22 |
+
f"based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 23 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 24 |
+
f"Consider both entities together when assigning scores to ensure consistency.\n\n"
|
| 25 |
+
f"QUESTION CONTEXT: These entities are the subjects of questions like 'Who is the performer of the song...'\n\n"
|
| 26 |
+
f"**TARGET ENTITIES**:\n"
|
| 27 |
+
f"Entity A: {entity1}\n"
|
| 28 |
+
f"Entity B: {entity2}\n\n"
|
| 29 |
+
f"Your output MUST be a single, valid JSON object in the format: {{\"A\": X, \"B\": Y}}, where X and Y are integer scores from 0 to 9. "
|
| 30 |
+
f"Example format: {{\"A\": 7, \"B\": 4}}. Do not add any explanation.\n"
|
| 31 |
+
f"Your answer: "
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def extract_scores(response):
|
| 36 |
+
text = response.strip()
|
| 37 |
+
if pos := text.rfind("Your answer:"):
|
| 38 |
+
text = text[pos + 12:].strip()
|
| 39 |
+
if match := re.search(r'\{[^{}]*\}', text, re.DOTALL):
|
| 40 |
+
try:
|
| 41 |
+
d = json.loads(match.group())
|
| 42 |
+
sa, sb = float(d.get("A", d.get("a"))), float(d.get("B", d.get("b")))
|
| 43 |
+
if sa is not None and sb is not None and 0 <= sa <= 9 and 0 <= sb <= 9:
|
| 44 |
+
return sa, sb
|
| 45 |
+
except Exception:
|
| 46 |
+
pass
|
| 47 |
+
return None, None
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def main():
|
| 51 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR, max_num_seqs=256, trust_remote_code=True, tensor_parallel_size=2, dtype="bfloat16")
|
| 52 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 53 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=64)
|
| 54 |
+
|
| 55 |
+
__import__("os").makedirs(__import__("os").path.dirname(OUTPUT_FILE), exist_ok=True)
|
| 56 |
+
|
| 57 |
+
completed = set()
|
| 58 |
+
if __import__("os").path.exists(OUTPUT_FILE):
|
| 59 |
+
with open(OUTPUT_FILE, "r", encoding="utf-8") as f:
|
| 60 |
+
for line in f:
|
| 61 |
+
if line.strip():
|
| 62 |
+
completed.add(json.loads(line).get("item_id", -1))
|
| 63 |
+
|
| 64 |
+
with open(EVAL_FILE, "r", encoding="utf-8") as f:
|
| 65 |
+
items = [json.loads(line) for line in f]
|
| 66 |
+
|
| 67 |
+
with open(OUTPUT_FILE, "a", encoding="utf-8") as out_f:
|
| 68 |
+
pending = [(idx, item) for idx, item in enumerate(items) if idx not in completed]
|
| 69 |
+
for idx, item in tqdm(pending):
|
| 70 |
+
entities = item["question_entity"]
|
| 71 |
+
n = len(entities)
|
| 72 |
+
anchors = random.sample(range(n), NUM_ANCHORS)
|
| 73 |
+
non_anchors = [i for i in range(n) if i not in anchors]
|
| 74 |
+
|
| 75 |
+
tasks = [{"idx_a": ni, "idx_b": ai, "entity_a": entities[ni], "entity_b": entities[ai]}
|
| 76 |
+
for ni in non_anchors for ai in anchors]
|
| 77 |
+
tasks += [{"idx_a": ai, "idx_b": aj, "entity_a": entities[ai], "entity_b": entities[aj]}
|
| 78 |
+
for ai, aj in itertools.combinations(anchors, 2)]
|
| 79 |
+
|
| 80 |
+
# Batch inference
|
| 81 |
+
results = []
|
| 82 |
+
for i in range(0, len(tasks), BATCH_SIZE):
|
| 83 |
+
batch = tasks[i:i + BATCH_SIZE]
|
| 84 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(t["entity_a"], t["entity_b"])}],
|
| 85 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for t in batch]
|
| 86 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 87 |
+
for t, out in zip(batch, outputs):
|
| 88 |
+
raw = out.outputs[0].text if out.outputs else ""
|
| 89 |
+
sa, sb = extract_scores(raw)
|
| 90 |
+
results.append((t, sa, sb, raw))
|
| 91 |
+
|
| 92 |
+
# Aggregate scores
|
| 93 |
+
scores = defaultdict(list)
|
| 94 |
+
records = []
|
| 95 |
+
for task, sa, sb, raw in results:
|
| 96 |
+
if sa is not None and sb is not None:
|
| 97 |
+
scores[task["idx_a"]].append(sa)
|
| 98 |
+
scores[task["idx_b"]].append(sb)
|
| 99 |
+
records.append({"entity_a": task["entity_a"], "idx_a": task["idx_a"], "entity_b": task["entity_b"],
|
| 100 |
+
"idx_b": task["idx_b"], "score_a": sa, "score_b": sb, "llm_output": raw})
|
| 101 |
+
|
| 102 |
+
avg = {i: float(__import__("numpy").mean(scores[i])) if scores[i] else 0.0 for i in range(n)}
|
| 103 |
+
pred = sorted(range(n), key=lambda x: avg[x], reverse=True)
|
| 104 |
+
|
| 105 |
+
out_f.write(json.dumps({
|
| 106 |
+
"item_id": idx, "question_entity": entities, "score": item.get("score"),
|
| 107 |
+
"ground_truth": item["ground_truth"], "anchor_indices": anchors,
|
| 108 |
+
"avg_scores": avg, "pred": pred, "comparisons": records
|
| 109 |
+
}, ensure_ascii=False) + "\n")
|
| 110 |
+
out_f.flush()
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
if __name__ == "__main__":
|
| 114 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PointJudge_ba.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
from vllm import LLM, SamplingParams
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 10 |
+
INPUT_FILE = "INPUT_FILE"
|
| 11 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 12 |
+
BATCH_SIZE = 8
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def build_prompt(question, entity):
|
| 16 |
+
return (
|
| 17 |
+
f"You are a Wikipedia notability expert and a pop-culture sports expert. Please score the authority of the given basketball entity based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 18 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 19 |
+
f"**QUESTION CONTEXT**: {question}\n"
|
| 20 |
+
f"**TARGET ENTITY**: {entity}\n"
|
| 21 |
+
f"Based on the question context and the above criteria, rate the authority of this ENTITY (0-9 points).\n"
|
| 22 |
+
f"Output only: {{\"score\": X}} where X is your evaluated score (0, 1, 2, 3, 4, 5, 6, 7, 8, or 9). Do not add any explanation.\n"
|
| 23 |
+
f"Your answer:"
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def extract_score(text):
|
| 28 |
+
text = text.strip()
|
| 29 |
+
if "Your answer:" in text:
|
| 30 |
+
text = text.split("Your answer:")[-1].strip()
|
| 31 |
+
text = re.sub(r'^(user|assistant|Your answer:|Answer:)\s*', '', text, flags=re.IGNORECASE).strip()
|
| 32 |
+
try:
|
| 33 |
+
if match := re.search(r'\{[^}]*"score"\s*:\s*(\d+)[^}]*\}', text, re.IGNORECASE):
|
| 34 |
+
score = int(match.group(1))
|
| 35 |
+
if 0 <= score <= 9:
|
| 36 |
+
return score
|
| 37 |
+
except Exception:
|
| 38 |
+
pass
|
| 39 |
+
if numbers := re.findall(r'\b([0-9])\b', text):
|
| 40 |
+
return int(numbers[0])
|
| 41 |
+
return -1
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 46 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 47 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 48 |
+
|
| 49 |
+
with open(INPUT_FILE, "r", encoding="utf-8") as f:
|
| 50 |
+
items = [json.loads(line.strip()) for line in f if line.strip()]
|
| 51 |
+
|
| 52 |
+
with open(OUTPUT_FILE, "w", encoding="utf-8") as out_f:
|
| 53 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 54 |
+
batch = items[i:i + BATCH_SIZE]
|
| 55 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(item["question"], item["question_entity"])}],
|
| 56 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for item in batch]
|
| 57 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 58 |
+
for item, output in zip(batch, outputs):
|
| 59 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 60 |
+
pred = extract_score(text)
|
| 61 |
+
out_f.write(json.dumps({
|
| 62 |
+
"question": item["question"], "question_entity": item["question_entity"],
|
| 63 |
+
"popularity": item["popularity"], "true_score": item["score"],
|
| 64 |
+
"pred_score": pred, "llm_output": text if pred != -1 else ""
|
| 65 |
+
}, ensure_ascii=False) + "\n")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PointJudge_mo.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
from vllm import LLM, SamplingParams
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 10 |
+
INPUT_FILE = "INPUT_FILE"
|
| 11 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 12 |
+
BATCH_SIZE = 8
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def build_prompt(question, entity):
|
| 16 |
+
return (
|
| 17 |
+
f"You are a Wikipedia notability expert and a film expert. Please score the authority of the given movie entity based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 18 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 19 |
+
f"**QUESTION CONTEXT**: {question}\n"
|
| 20 |
+
f"**TARGET ENTITY**: {entity}\n"
|
| 21 |
+
f"Based on the question context and the above criteria, rate the authority of this ENTITY (0-9 points).\n"
|
| 22 |
+
f"Output only: {{\"score\": X}} where X is your evaluated score (0, 1, 2, 3, 4, 5, 6, 7, 8, or 9). Do not add any explanation.\n"
|
| 23 |
+
f"Your answer: "
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def extract_score(text):
|
| 28 |
+
text = text.strip()
|
| 29 |
+
if "Your answer:" in text:
|
| 30 |
+
text = text.split("Your answer:")[-1].strip()
|
| 31 |
+
text = re.sub(r'^(user|assistant|Your answer:|Answer:)\s*', '', text, flags=re.IGNORECASE).strip()
|
| 32 |
+
try:
|
| 33 |
+
if match := re.search(r'\{[^}]*"score"\s*:\s*(\d+)[^}]*\}', text, re.IGNORECASE):
|
| 34 |
+
score = int(match.group(1))
|
| 35 |
+
if 0 <= score <= 9:
|
| 36 |
+
return score
|
| 37 |
+
except Exception:
|
| 38 |
+
pass
|
| 39 |
+
if numbers := re.findall(r'\b([0-9])\b', text):
|
| 40 |
+
return int(numbers[0])
|
| 41 |
+
return -1
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
def main():
|
| 45 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 46 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 47 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 48 |
+
|
| 49 |
+
with open(INPUT_FILE, "r", encoding="utf-8") as f:
|
| 50 |
+
items = [json.loads(line.strip()) for line in f if line.strip()]
|
| 51 |
+
|
| 52 |
+
with open(OUTPUT_FILE, "w", encoding="utf-8") as out_f:
|
| 53 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 54 |
+
batch = items[i:i + BATCH_SIZE]
|
| 55 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(item["question"], item["question_entity"])}],
|
| 56 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for item in batch]
|
| 57 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 58 |
+
for item, output in zip(batch, outputs):
|
| 59 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 60 |
+
pred = extract_score(text)
|
| 61 |
+
out_f.write(json.dumps({
|
| 62 |
+
"question": item["question"], "question_entity": item["question_entity"],
|
| 63 |
+
"popularity": item["popularity"], "true_score": item["score"],
|
| 64 |
+
"pred_score": pred, "llm_output": text if pred != -1 else ""
|
| 65 |
+
}, ensure_ascii=False) + "\n")
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
if __name__ == "__main__":
|
| 69 |
+
main()
|
AuthorityBench/judge_and_eval/Judges/Judges_on_EntityAuth/PointJudge_so.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import re
|
| 3 |
+
|
| 4 |
+
from tqdm import tqdm
|
| 5 |
+
from transformers import AutoTokenizer
|
| 6 |
+
from vllm import LLM, SamplingParams
|
| 7 |
+
|
| 8 |
+
MODEL_DIR = "YOUR_MODEL_DIR"
|
| 9 |
+
INPUT_FILE = "INPUT_FILE"
|
| 10 |
+
OUTPUT_FILE = "OUTPUT_FILE"
|
| 11 |
+
BATCH_SIZE = 8
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def build_prompt(question, entity):
|
| 15 |
+
return (
|
| 16 |
+
f"You are a Wikipedia notability expert and a music industry expert. Please score the authority of the given song entity based on the following criteria, with a rating range of 0 to 9.\n\n"
|
| 17 |
+
f"Entity authority refers to the public recognition, fame, historical significance, or widespread knowledge of the entity (person, place, organization, etc.).\n"
|
| 18 |
+
f"**QUESTION CONTEXT**: {question}\n"
|
| 19 |
+
f"**TARGET ENTITY**: {entity}\n"
|
| 20 |
+
f"Based on the question context and the above criteria, rate the authority of this ENTITY (0-9 points).\n"
|
| 21 |
+
f"Output only: {{\"score\": X}} where X is your evaluated score (0, 1, 2, 3, 4, 5, 6, 7, 8, or 9). Do not add any explanation.\n"
|
| 22 |
+
f"Your answer: "
|
| 23 |
+
)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def extract_score(text):
|
| 27 |
+
text = text.strip()
|
| 28 |
+
try:
|
| 29 |
+
if match := re.search(r'\{[^}]*"score"\s*:\s*(\d+)[^}]*\}', text):
|
| 30 |
+
score = int(match.group(1))
|
| 31 |
+
if 0 <= score <= 9:
|
| 32 |
+
return score
|
| 33 |
+
except Exception:
|
| 34 |
+
pass
|
| 35 |
+
if numbers := re.findall(r'\b([0-9])\b', text):
|
| 36 |
+
return int(numbers[0])
|
| 37 |
+
return -1
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def main():
|
| 41 |
+
llm = LLM(model=MODEL_DIR, tokenizer=MODEL_DIR)
|
| 42 |
+
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR, trust_remote_code=True)
|
| 43 |
+
sampling_params = SamplingParams(temperature=0.0, top_p=1.0, max_tokens=256)
|
| 44 |
+
|
| 45 |
+
with open(INPUT_FILE, "r", encoding="utf-8") as f:
|
| 46 |
+
items = [json.loads(line.strip()) for line in f if line.strip()]
|
| 47 |
+
|
| 48 |
+
with open(OUTPUT_FILE, "w", encoding="utf-8") as out_f:
|
| 49 |
+
for i in tqdm(range(0, len(items), BATCH_SIZE)):
|
| 50 |
+
batch = items[i:i + BATCH_SIZE]
|
| 51 |
+
prompts = [tokenizer.apply_chat_template([{"role": "user", "content": build_prompt(item["question"], item["question_entity"])}],
|
| 52 |
+
tokenize=False, add_generation_prompt=True, enable_thinking=False) for item in batch]
|
| 53 |
+
outputs = llm.generate(prompts, sampling_params)
|
| 54 |
+
for item, output in zip(batch, outputs):
|
| 55 |
+
text = output.outputs[0].text if output.outputs else ""
|
| 56 |
+
pred = extract_score(text)
|
| 57 |
+
out_f.write(json.dumps({
|
| 58 |
+
"question": item["question"], "question_entity": item["question_entity"],
|
| 59 |
+
"popularity": item["popularity"], "true_score": item["score"],
|
| 60 |
+
"pred_score": pred, "llm_output": text if pred != -1 else ""
|
| 61 |
+
}, ensure_ascii=False) + "\n")
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
if __name__ == "__main__":
|
| 65 |
+
main()
|
AuthorityBench/judge_and_eval/eval/eval_list.py
ADDED
|
@@ -0,0 +1,129 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import numpy as np
|
| 3 |
+
import argparse
|
| 4 |
+
from scipy.stats import spearmanr, kendalltau
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def ranking_to_ranks(ranking):
|
| 8 |
+
n = len(ranking)
|
| 9 |
+
ranks = np.empty(n, dtype=int)
|
| 10 |
+
for rank, doc_idx in enumerate(ranking):
|
| 11 |
+
ranks[doc_idx] = rank
|
| 12 |
+
return ranks
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def calculate_metrics(file_path):
|
| 16 |
+
spearman_scores = []
|
| 17 |
+
kendall_scores = []
|
| 18 |
+
valid_count = 0
|
| 19 |
+
|
| 20 |
+
skip_json_error = 0
|
| 21 |
+
skip_wrong_type = 0
|
| 22 |
+
skip_wrong_length = 0
|
| 23 |
+
skip_not_permutation = 0
|
| 24 |
+
skip_other = 0
|
| 25 |
+
|
| 26 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 27 |
+
total_lines = 0
|
| 28 |
+
for line in f:
|
| 29 |
+
total_lines += 1
|
| 30 |
+
if not line.strip():
|
| 31 |
+
continue
|
| 32 |
+
|
| 33 |
+
try:
|
| 34 |
+
data = json.loads(line)
|
| 35 |
+
pred_ranking = data.get('pred')
|
| 36 |
+
ground_truth_ranking = data.get('ground_truth')
|
| 37 |
+
|
| 38 |
+
if not isinstance(pred_ranking, list) or not isinstance(ground_truth_ranking, list):
|
| 39 |
+
skip_wrong_type += 1
|
| 40 |
+
continue
|
| 41 |
+
|
| 42 |
+
n = len(ground_truth_ranking)
|
| 43 |
+
|
| 44 |
+
if len(pred_ranking) != n or n < 2:
|
| 45 |
+
skip_wrong_length += 1
|
| 46 |
+
continue
|
| 47 |
+
|
| 48 |
+
pred_set = set(pred_ranking)
|
| 49 |
+
gt_set = set(ground_truth_ranking)
|
| 50 |
+
expected_set = set(range(n))
|
| 51 |
+
|
| 52 |
+
if pred_set != expected_set:
|
| 53 |
+
skip_not_permutation += 1
|
| 54 |
+
continue
|
| 55 |
+
|
| 56 |
+
if gt_set != expected_set:
|
| 57 |
+
skip_not_permutation += 1
|
| 58 |
+
continue
|
| 59 |
+
|
| 60 |
+
gt_ranks = ranking_to_ranks(ground_truth_ranking)
|
| 61 |
+
pred_ranks = ranking_to_ranks(pred_ranking)
|
| 62 |
+
|
| 63 |
+
s_corr, _ = spearmanr(gt_ranks, pred_ranks)
|
| 64 |
+
k_corr, _ = kendalltau(gt_ranks, pred_ranks)
|
| 65 |
+
|
| 66 |
+
spearman_scores.append(s_corr if not np.isnan(s_corr) else 0.0)
|
| 67 |
+
kendall_scores.append(k_corr if not np.isnan(k_corr) else 0.0)
|
| 68 |
+
|
| 69 |
+
valid_count += 1
|
| 70 |
+
|
| 71 |
+
except json.JSONDecodeError:
|
| 72 |
+
skip_json_error += 1
|
| 73 |
+
continue
|
| 74 |
+
except (KeyError, TypeError, ValueError):
|
| 75 |
+
skip_other += 1
|
| 76 |
+
continue
|
| 77 |
+
|
| 78 |
+
print(f"Evaluation File: {file_path}")
|
| 79 |
+
|
| 80 |
+
if valid_count == 0:
|
| 81 |
+
print(f"Error: No valid evaluation data found in {file_path}")
|
| 82 |
+
|
| 83 |
+
total_skipped = (skip_json_error + skip_wrong_type +
|
| 84 |
+
skip_wrong_length + skip_not_permutation + skip_other)
|
| 85 |
+
total_processed = total_lines - skip_json_error
|
| 86 |
+
|
| 87 |
+
print(f"\n[Sample Statistics]")
|
| 88 |
+
print(f" Total lines: {total_lines}")
|
| 89 |
+
print(f" Valid samples: {valid_count}")
|
| 90 |
+
print(f" Skipped samples: {total_skipped}")
|
| 91 |
+
print(f"\n[Skip Details]")
|
| 92 |
+
print(f" JSON parse error: {skip_json_error}")
|
| 93 |
+
print(f" Wrong field type: {skip_wrong_type}")
|
| 94 |
+
print(f" List length mismatch: {skip_wrong_length}")
|
| 95 |
+
print(f" Not a permutation: {skip_not_permutation}")
|
| 96 |
+
print(f" Other errors: {skip_other}")
|
| 97 |
+
|
| 98 |
+
if valid_count > 0:
|
| 99 |
+
avg_spearman = np.mean(spearman_scores)
|
| 100 |
+
avg_kendall = np.mean(kendall_scores)
|
| 101 |
+
|
| 102 |
+
print(f" Spearman's Rho: {avg_spearman:.4f}")
|
| 103 |
+
print(f" Kendall's Tau: {avg_kendall:.4f}")
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
return {
|
| 107 |
+
'total_lines': total_lines,
|
| 108 |
+
'valid_count': valid_count,
|
| 109 |
+
'skipped': {
|
| 110 |
+
'json_error': skip_json_error,
|
| 111 |
+
'wrong_type': skip_wrong_type,
|
| 112 |
+
'wrong_length': skip_wrong_length,
|
| 113 |
+
'not_permutation': skip_not_permutation,
|
| 114 |
+
'other': skip_other,
|
| 115 |
+
'total': total_skipped
|
| 116 |
+
},
|
| 117 |
+
'spearman': avg_spearman if valid_count > 0 else None,
|
| 118 |
+
'kendall': avg_kendall if valid_count > 0 else None,
|
| 119 |
+
}
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
if __name__ == "__main__":
|
| 123 |
+
parser = argparse.ArgumentParser(
|
| 124 |
+
description="Evaluate ranking metrics (Spearman / Kendall)"
|
| 125 |
+
)
|
| 126 |
+
parser.add_argument("file_path", type=str, help="Path to JSONL file with evaluation results")
|
| 127 |
+
args = parser.parse_args()
|
| 128 |
+
|
| 129 |
+
calculate_metrics(args.file_path)
|
AuthorityBench/judge_and_eval/eval/eval_pair.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import argparse
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
def calculate_accuracy(file_path):
|
| 6 |
+
total = 0
|
| 7 |
+
correct = 0
|
| 8 |
+
|
| 9 |
+
with open(file_path, 'r', encoding='utf-8') as f:
|
| 10 |
+
for line in f:
|
| 11 |
+
if not line.strip():
|
| 12 |
+
continue
|
| 13 |
+
|
| 14 |
+
try:
|
| 15 |
+
data = json.loads(line.strip())
|
| 16 |
+
label = data.get('label')
|
| 17 |
+
pred = data.get('pred')
|
| 18 |
+
|
| 19 |
+
if label is not None and pred is not None:
|
| 20 |
+
total += 1
|
| 21 |
+
if label == pred:
|
| 22 |
+
correct += 1
|
| 23 |
+
|
| 24 |
+
except json.JSONDecodeError:
|
| 25 |
+
continue
|
| 26 |
+
|
| 27 |
+
accuracy = correct / total if total > 0 else 0.0
|
| 28 |
+
|
| 29 |
+
return {
|
| 30 |
+
'total': total,
|
| 31 |
+
'correct': correct,
|
| 32 |
+
'accuracy': accuracy
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def main():
|
| 37 |
+
parser = argparse.ArgumentParser(
|
| 38 |
+
description="Calculate prediction accuracy from JSONL file"
|
| 39 |
+
)
|
| 40 |
+
parser.add_argument("file_path", type=str, help="Path to JSONL file")
|
| 41 |
+
args = parser.parse_args()
|
| 42 |
+
|
| 43 |
+
result = calculate_accuracy(args.file_path)
|
| 44 |
+
|
| 45 |
+
print(f" Accuracy: {result['accuracy']:.4f} ({result['accuracy']*100:.2f}%)")
|
| 46 |
+
|
| 47 |
+
return result
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
if __name__ == '__main__':
|
| 51 |
+
main()
|