paper2thesis1.0 / scripts /validate_ids.py
anon-nips2026's picture
Upload folder using huggingface_hub
e3f7137 verified
#!/usr/bin/env python3
"""Validate released Paper2Thesis JSONL split files.
This script is standalone and only uses the Python standard library.
"""
import argparse
import json
import re
import sys
from pathlib import Path
SPLITS = ("train", "validation", "test")
REQUIRED_FIELDS = {
"example_id",
"input_paper_ids",
"target_thesis_id",
"split",
"field",
"target_year",
"num_input_papers",
"input_total_words",
"target_total_words",
"target_page_count",
"input_versions",
"target_version",
"target_license",
"input_licenses",
}
ARXIV_ID_RE = re.compile(r"^(?:[a-z-]+(?:\.[A-Z]{2})?/\d{7}|\d{4}\.\d{4,5})(?:v\d+)?$")
def is_valid_arxiv_id(value):
return isinstance(value, str) and ARXIV_ID_RE.fullmatch(value.strip()) is not None
def add_error(errors, path, line_number, message):
errors.append(f"{path}:{line_number}: {message}")
def read_jsonl(path, errors):
rows = []
if not path.exists():
errors.append(f"{path}: file does not exist")
return rows
with path.open("r", encoding="utf-8") as handle:
for line_number, line in enumerate(handle, start=1):
stripped = line.strip()
if not stripped:
add_error(errors, path, line_number, "blank line")
continue
try:
row = json.loads(stripped)
except json.JSONDecodeError as exc:
add_error(errors, path, line_number, f"invalid JSON: {exc}")
continue
if not isinstance(row, dict):
add_error(errors, path, line_number, "row is not a JSON object")
continue
rows.append((path, line_number, row))
return rows
def validate_required_fields(path, line_number, row, errors):
missing = sorted(REQUIRED_FIELDS - set(row))
if missing:
add_error(errors, path, line_number, f"missing required fields: {missing}")
def validate_arxiv_ids(path, line_number, row, errors):
target_thesis_id = row.get("target_thesis_id")
if not is_valid_arxiv_id(target_thesis_id):
add_error(errors, path, line_number, f"invalid target_thesis_id: {target_thesis_id!r}")
input_paper_ids = row.get("input_paper_ids")
if not isinstance(input_paper_ids, list):
add_error(errors, path, line_number, "input_paper_ids is not a list")
return
for index, paper_id in enumerate(input_paper_ids):
if not is_valid_arxiv_id(paper_id):
add_error(errors, path, line_number, f"invalid input_paper_ids[{index}]: {paper_id!r}")
def validate_split(path, line_number, row, expected_split, errors):
split = row.get("split")
if split != expected_split:
add_error(errors, path, line_number, f"split is {split!r}, expected {expected_split!r}")
def validate_num_input_papers(path, line_number, row, errors):
input_paper_ids = row.get("input_paper_ids")
num_input_papers = row.get("num_input_papers")
if not isinstance(input_paper_ids, list):
return
if not isinstance(num_input_papers, int):
add_error(errors, path, line_number, f"num_input_papers is not an integer: {num_input_papers!r}")
return
if num_input_papers != len(input_paper_ids):
add_error(
errors,
path,
line_number,
f"num_input_papers={num_input_papers} but len(input_paper_ids)={len(input_paper_ids)}",
)
def validate_license_fields(path, line_number, row, errors):
target_license = row.get("target_license")
if not isinstance(target_license, str) or not target_license.strip():
add_error(errors, path, line_number, f"target_license missing or empty: {target_license!r}")
input_licenses = row.get("input_licenses")
input_paper_ids = row.get("input_paper_ids")
if not isinstance(input_licenses, list):
add_error(errors, path, line_number, "input_licenses is not a list")
return
if isinstance(input_paper_ids, list) and len(input_licenses) != len(input_paper_ids):
add_error(
errors,
path,
line_number,
f"len(input_licenses)={len(input_licenses)} but len(input_paper_ids)={len(input_paper_ids)}",
)
for index, license_value in enumerate(input_licenses):
if not isinstance(license_value, str) or not license_value.strip():
add_error(errors, path, line_number, f"input_licenses[{index}] missing or empty: {license_value!r}")
def validate_duplicates_and_overlap(rows, errors):
example_locations = {}
target_locations = {}
input_paper_splits = {}
for path, line_number, row in rows:
example_id = row.get("example_id")
if example_id in example_locations:
previous_path, previous_line = example_locations[example_id]
add_error(
errors,
path,
line_number,
f"duplicate example_id {example_id!r}; first seen at {previous_path}:{previous_line}",
)
else:
example_locations[example_id] = (path, line_number)
target_thesis_id = row.get("target_thesis_id")
if target_thesis_id in target_locations:
previous_path, previous_line = target_locations[target_thesis_id]
add_error(
errors,
path,
line_number,
f"duplicate target_thesis_id {target_thesis_id!r}; first seen at {previous_path}:{previous_line}",
)
else:
target_locations[target_thesis_id] = (path, line_number)
split = row.get("split")
input_paper_ids = row.get("input_paper_ids")
if not isinstance(input_paper_ids, list):
continue
for paper_id in input_paper_ids:
if paper_id not in input_paper_splits:
input_paper_splits[paper_id] = (split, path, line_number)
continue
previous_split, previous_path, previous_line = input_paper_splits[paper_id]
if split != previous_split:
add_error(
errors,
path,
line_number,
"input_paper_id "
f"{paper_id!r} appears in split {split!r} and split {previous_split!r}; "
f"first seen at {previous_path}:{previous_line}",
)
return len(example_locations), len(target_locations)
def parse_args():
parser = argparse.ArgumentParser(description="Validate Paper2Thesis released JSONL split files.")
parser.add_argument(
"--data_dir",
default="data",
help="Directory containing train.jsonl, validation.jsonl, and test.jsonl. Default: data",
)
return parser.parse_args()
def main():
args = parse_args()
data_dir = Path(args.data_dir)
errors = []
all_rows = []
for split in SPLITS:
path = data_dir / f"{split}.jsonl"
split_rows = read_jsonl(path, errors)
for row_path, line_number, row in split_rows:
validate_required_fields(row_path, line_number, row, errors)
validate_split(row_path, line_number, row, split, errors)
validate_arxiv_ids(row_path, line_number, row, errors)
validate_num_input_papers(row_path, line_number, row, errors)
validate_license_fields(row_path, line_number, row, errors)
all_rows.extend(split_rows)
_example_count, target_count = validate_duplicates_and_overlap(all_rows, errors)
print(f"Loaded {len(all_rows)} examples.")
if errors:
print(f"Validation failed with {len(errors)} error(s):")
for error in errors:
print(f"- {error}")
return 1
print("No duplicate example_id.")
print("No duplicate target_thesis_id.")
print("No input paper overlap across splits.")
print(f"Validated {target_count} unique target_thesis_id values.")
print("Validation passed.")
return 0
if __name__ == "__main__":
sys.exit(main())