sgl / tools /prepare_textvqa_for_sgl.py
xiaohaoWillX's picture
Add files using upload-large-folder tool
60e7f1f verified
import argparse
import json
import os
from collections import Counter
def canonical_answer(answers):
if not answers:
return ""
counts = Counter(answers)
best = max(counts.values())
for answer in answers:
if counts[answer] == best:
return answer
return answers[0]
def ensure_symlink(src, dst):
if os.path.islink(dst) or os.path.exists(dst):
return
os.symlink(src, dst)
def convert_split(src_json, image_dir, out_jsonl):
with open(src_json, "r") as f:
data = json.load(f)["data"]
with open(out_jsonl, "w") as writer:
for item in data:
answers = item.get("answers", [])
record = {
"image": os.path.join(image_dir, f"{item['image_id']}.jpg"),
"question": item["question"],
"question_id": item["question_id"],
"answer": canonical_answer(answers),
}
writer.write(json.dumps(record) + "\n")
return data
def build_val_questions(val_data, out_path):
payload = {
"questions": [
{
"image_id": item["image_id"],
"question": item["question"],
"question_id": item["question_id"],
}
for item in val_data
]
}
with open(out_path, "w") as f:
json.dump(payload, f)
def build_val_annotations(val_data, out_path):
payload = {
"annotations": [
{
"image_id": item["image_id"],
"question_id": item["question_id"],
"answers": [{"answer": ans} for ans in item.get("answers", [])],
}
for item in val_data
]
}
with open(out_path, "w") as f:
json.dump(payload, f)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--official-root", type=str, default="data/textvqa_official")
parser.add_argument("--output-root", type=str, default="data/textvqa")
args = parser.parse_args()
official_root = args.official_root
output_root = args.output_root
os.makedirs(output_root, exist_ok=True)
train_images_src = os.path.abspath(os.path.join(official_root, "train_images"))
test_images_src = os.path.abspath(os.path.join(official_root, "test_images"))
train_images_dst = os.path.join(output_root, "train_images")
test_images_dst = os.path.join(output_root, "test_images")
ensure_symlink(train_images_src, train_images_dst)
ensure_symlink(test_images_src, test_images_dst)
train_json = os.path.join(official_root, "TextVQA_0.5.1_train.json")
val_json = os.path.join(official_root, "TextVQA_0.5.1_val.json")
test_json = os.path.join(official_root, "TextVQA_0.5.1_test.json")
train_data = convert_split(
train_json,
os.path.join(output_root, "train_images"),
os.path.join(output_root, "textvqa_train.jsonl"),
)
val_data = convert_split(
val_json,
os.path.join(output_root, "train_images"),
os.path.join(output_root, "textvqa_val.jsonl"),
)
_ = convert_split(
test_json,
os.path.join(output_root, "test_images"),
os.path.join(output_root, "textvqa_test.jsonl"),
)
build_val_questions(val_data, os.path.join(output_root, "textvqa_val_questions.json"))
build_val_annotations(val_data, os.path.join(output_root, "textvqa_val_annotations.json"))
print(f"wrote {len(train_data)} train samples")
print(f"wrote {len(val_data)} val samples")
print(f"prepared TextVQA data under {output_root}")
if __name__ == "__main__":
main()