VirtualLUO commited on
Commit
47c6d07
·
verified ·
1 Parent(s): 601fb5c

Upload eval_comet_demo.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. eval_comet_demo.py +83 -0
eval_comet_demo.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ MMTIT-Bench COMET Evaluation Demo
3
+
4
+ Prediction file format (JSONL):
5
+ {"image_id": "Korea_Menu_20843.jpg", "pred": "梅尔街 ..."}
6
+
7
+ Usage:
8
+ python eval_comet_demo.py \
9
+ --prediction prediction.jsonl \
10
+ --annotation annotation.jsonl \
11
+ --direction other2zh \
12
+ --batch_size 16 --gpus 0
13
+ """
14
+
15
+ import json
16
+ import argparse
17
+ from comet import download_model, load_from_checkpoint
18
+
19
+
20
+ def load_jsonl(path):
21
+ with open(path, "r", encoding="utf-8") as f:
22
+ return [json.loads(line) for line in f if line.strip()]
23
+
24
+
25
+ def main():
26
+ parser = argparse.ArgumentParser(description="MMTIT-Bench COMET Evaluation")
27
+ parser.add_argument("--prediction", type=str, required=True, help="Path to prediction JSONL (fields: image_id, pred)")
28
+ parser.add_argument("--annotation", type=str, default="annotation.jsonl", help="Path to annotation JSONL")
29
+ parser.add_argument("--direction", type=str, required=True, choices=["other2zh", "other2en"], help="Translation direction")
30
+ parser.add_argument("--batch_size", type=int, default=16)
31
+ parser.add_argument("--gpus", type=int, default=0, help="0 for CPU")
32
+ parser.add_argument("--output", type=str, default=None, help="Output path for per-sample scores")
33
+ args = parser.parse_args()
34
+
35
+ if args.output is None:
36
+ args.output = f"comet_results_{args.direction}.jsonl"
37
+
38
+ # Choose reference field based on direction
39
+ ref_key = "translation_zh" if args.direction == "other2zh" else "translation_en"
40
+
41
+ # Load data
42
+ annotations = {item["image_id"]: item for item in load_jsonl(args.annotation)}
43
+ predictions = load_jsonl(args.prediction)
44
+ print(f"Annotations: {len(annotations)}, Predictions: {len(predictions)}")
45
+
46
+ # Merge by image_id -> build COMET inputs (src / mt / ref)
47
+ comet_inputs = []
48
+ matched_ids = []
49
+ for pred in predictions:
50
+ img_id = pred["image_id"]
51
+ if img_id in annotations:
52
+ ann = annotations[img_id]
53
+ comet_inputs.append({
54
+ "src": ann["parsing_anno"], # source OCR text
55
+ "mt": pred["pred"], # model prediction
56
+ "ref": ann[ref_key], # ground-truth translation
57
+ })
58
+ matched_ids.append(img_id)
59
+
60
+ print(f"Matched: {len(comet_inputs)} / {len(predictions)}")
61
+ assert len(comet_inputs) > 0, "No matching samples found. Check image_id consistency."
62
+
63
+ # Load COMET model and evaluate
64
+ model_path = download_model("Unbabel/wmt22-comet-da")
65
+ model = load_from_checkpoint(model_path)
66
+ model_output = model.predict(comet_inputs, batch_size=args.batch_size, gpus=args.gpus)
67
+
68
+ # Print system score
69
+ print(f"\n{'='*50}")
70
+ print(f" Direction: {args.direction}")
71
+ print(f" Samples: {len(comet_inputs)}")
72
+ print(f" COMET Score: {model_output.system_score:.4f}")
73
+ print(f"{'='*50}")
74
+
75
+ # Save per-sample results
76
+ with open(args.output, "w", encoding="utf-8") as f:
77
+ for img_id, score in zip(matched_ids, model_output.scores):
78
+ f.write(json.dumps({"image_id": img_id, "comet_score": score}, ensure_ascii=False) + "\n")
79
+ print(f"Per-sample scores saved to: {args.output}")
80
+
81
+
82
+ if __name__ == "__main__":
83
+ main()