Datasets:
File size: 8,103 Bytes
e3f7137 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 | #!/usr/bin/env python3
"""Validate released Paper2Thesis JSONL split files.
This script is standalone and only uses the Python standard library.
"""
import argparse
import json
import re
import sys
from pathlib import Path
SPLITS = ("train", "validation", "test")
REQUIRED_FIELDS = {
"example_id",
"input_paper_ids",
"target_thesis_id",
"split",
"field",
"target_year",
"num_input_papers",
"input_total_words",
"target_total_words",
"target_page_count",
"input_versions",
"target_version",
"target_license",
"input_licenses",
}
ARXIV_ID_RE = re.compile(r"^(?:[a-z-]+(?:\.[A-Z]{2})?/\d{7}|\d{4}\.\d{4,5})(?:v\d+)?$")
def is_valid_arxiv_id(value):
return isinstance(value, str) and ARXIV_ID_RE.fullmatch(value.strip()) is not None
def add_error(errors, path, line_number, message):
errors.append(f"{path}:{line_number}: {message}")
def read_jsonl(path, errors):
rows = []
if not path.exists():
errors.append(f"{path}: file does not exist")
return rows
with path.open("r", encoding="utf-8") as handle:
for line_number, line in enumerate(handle, start=1):
stripped = line.strip()
if not stripped:
add_error(errors, path, line_number, "blank line")
continue
try:
row = json.loads(stripped)
except json.JSONDecodeError as exc:
add_error(errors, path, line_number, f"invalid JSON: {exc}")
continue
if not isinstance(row, dict):
add_error(errors, path, line_number, "row is not a JSON object")
continue
rows.append((path, line_number, row))
return rows
def validate_required_fields(path, line_number, row, errors):
missing = sorted(REQUIRED_FIELDS - set(row))
if missing:
add_error(errors, path, line_number, f"missing required fields: {missing}")
def validate_arxiv_ids(path, line_number, row, errors):
target_thesis_id = row.get("target_thesis_id")
if not is_valid_arxiv_id(target_thesis_id):
add_error(errors, path, line_number, f"invalid target_thesis_id: {target_thesis_id!r}")
input_paper_ids = row.get("input_paper_ids")
if not isinstance(input_paper_ids, list):
add_error(errors, path, line_number, "input_paper_ids is not a list")
return
for index, paper_id in enumerate(input_paper_ids):
if not is_valid_arxiv_id(paper_id):
add_error(errors, path, line_number, f"invalid input_paper_ids[{index}]: {paper_id!r}")
def validate_split(path, line_number, row, expected_split, errors):
split = row.get("split")
if split != expected_split:
add_error(errors, path, line_number, f"split is {split!r}, expected {expected_split!r}")
def validate_num_input_papers(path, line_number, row, errors):
input_paper_ids = row.get("input_paper_ids")
num_input_papers = row.get("num_input_papers")
if not isinstance(input_paper_ids, list):
return
if not isinstance(num_input_papers, int):
add_error(errors, path, line_number, f"num_input_papers is not an integer: {num_input_papers!r}")
return
if num_input_papers != len(input_paper_ids):
add_error(
errors,
path,
line_number,
f"num_input_papers={num_input_papers} but len(input_paper_ids)={len(input_paper_ids)}",
)
def validate_license_fields(path, line_number, row, errors):
target_license = row.get("target_license")
if not isinstance(target_license, str) or not target_license.strip():
add_error(errors, path, line_number, f"target_license missing or empty: {target_license!r}")
input_licenses = row.get("input_licenses")
input_paper_ids = row.get("input_paper_ids")
if not isinstance(input_licenses, list):
add_error(errors, path, line_number, "input_licenses is not a list")
return
if isinstance(input_paper_ids, list) and len(input_licenses) != len(input_paper_ids):
add_error(
errors,
path,
line_number,
f"len(input_licenses)={len(input_licenses)} but len(input_paper_ids)={len(input_paper_ids)}",
)
for index, license_value in enumerate(input_licenses):
if not isinstance(license_value, str) or not license_value.strip():
add_error(errors, path, line_number, f"input_licenses[{index}] missing or empty: {license_value!r}")
def validate_duplicates_and_overlap(rows, errors):
example_locations = {}
target_locations = {}
input_paper_splits = {}
for path, line_number, row in rows:
example_id = row.get("example_id")
if example_id in example_locations:
previous_path, previous_line = example_locations[example_id]
add_error(
errors,
path,
line_number,
f"duplicate example_id {example_id!r}; first seen at {previous_path}:{previous_line}",
)
else:
example_locations[example_id] = (path, line_number)
target_thesis_id = row.get("target_thesis_id")
if target_thesis_id in target_locations:
previous_path, previous_line = target_locations[target_thesis_id]
add_error(
errors,
path,
line_number,
f"duplicate target_thesis_id {target_thesis_id!r}; first seen at {previous_path}:{previous_line}",
)
else:
target_locations[target_thesis_id] = (path, line_number)
split = row.get("split")
input_paper_ids = row.get("input_paper_ids")
if not isinstance(input_paper_ids, list):
continue
for paper_id in input_paper_ids:
if paper_id not in input_paper_splits:
input_paper_splits[paper_id] = (split, path, line_number)
continue
previous_split, previous_path, previous_line = input_paper_splits[paper_id]
if split != previous_split:
add_error(
errors,
path,
line_number,
"input_paper_id "
f"{paper_id!r} appears in split {split!r} and split {previous_split!r}; "
f"first seen at {previous_path}:{previous_line}",
)
return len(example_locations), len(target_locations)
def parse_args():
parser = argparse.ArgumentParser(description="Validate Paper2Thesis released JSONL split files.")
parser.add_argument(
"--data_dir",
default="data",
help="Directory containing train.jsonl, validation.jsonl, and test.jsonl. Default: data",
)
return parser.parse_args()
def main():
args = parse_args()
data_dir = Path(args.data_dir)
errors = []
all_rows = []
for split in SPLITS:
path = data_dir / f"{split}.jsonl"
split_rows = read_jsonl(path, errors)
for row_path, line_number, row in split_rows:
validate_required_fields(row_path, line_number, row, errors)
validate_split(row_path, line_number, row, split, errors)
validate_arxiv_ids(row_path, line_number, row, errors)
validate_num_input_papers(row_path, line_number, row, errors)
validate_license_fields(row_path, line_number, row, errors)
all_rows.extend(split_rows)
_example_count, target_count = validate_duplicates_and_overlap(all_rows, errors)
print(f"Loaded {len(all_rows)} examples.")
if errors:
print(f"Validation failed with {len(errors)} error(s):")
for error in errors:
print(f"- {error}")
return 1
print("No duplicate example_id.")
print("No duplicate target_thesis_id.")
print("No input paper overlap across splits.")
print(f"Validated {target_count} unique target_thesis_id values.")
print("Validation passed.")
return 0
if __name__ == "__main__":
sys.exit(main())
|