anonymousreview111 commited on
Commit
e9e55e1
·
verified ·
1 Parent(s): 1b43a45

Upload coherence.jsonl

Browse files
Files changed (1) hide show
  1. data/coherence.jsonl +125 -0
data/coherence.jsonl ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"pair_id": "cohe_001", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
2
+ {"pair_id": "cohe_002", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
3
+ {"pair_id": "cohe_003", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
4
+ {"pair_id": "cohe_004", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
5
+ {"pair_id": "cohe_005", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
6
+ {"pair_id": "cohe_006", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
7
+ {"pair_id": "cohe_007", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
8
+ {"pair_id": "cohe_008", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
9
+ {"pair_id": "cohe_009", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
10
+ {"pair_id": "cohe_010", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
11
+ {"pair_id": "cohe_011", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
12
+ {"pair_id": "cohe_012", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
13
+ {"pair_id": "cohe_013", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
14
+ {"pair_id": "cohe_014", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
15
+ {"pair_id": "cohe_015", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
16
+ {"pair_id": "cohe_016", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
17
+ {"pair_id": "cohe_017", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
18
+ {"pair_id": "cohe_018", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
19
+ {"pair_id": "cohe_019", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
20
+ {"pair_id": "cohe_020", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
21
+ {"pair_id": "cohe_021", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
22
+ {"pair_id": "cohe_022", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
23
+ {"pair_id": "cohe_023", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
24
+ {"pair_id": "cohe_024", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
25
+ {"pair_id": "cohe_025", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "response_being_judged": "The meeting discussed budget allocation. Marketing needs increased funding. Sales performed well last quarter. The CEO approved the proposal.", "ground_truth_label": "score_1", "semantic_equivalence_score": 1.0}
26
+ {"pair_id": "cohe_026", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
27
+ {"pair_id": "cohe_027", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
28
+ {"pair_id": "cohe_028", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
29
+ {"pair_id": "cohe_029", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
30
+ {"pair_id": "cohe_030", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
31
+ {"pair_id": "cohe_031", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
32
+ {"pair_id": "cohe_032", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
33
+ {"pair_id": "cohe_033", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
34
+ {"pair_id": "cohe_034", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
35
+ {"pair_id": "cohe_035", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
36
+ {"pair_id": "cohe_036", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
37
+ {"pair_id": "cohe_037", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
38
+ {"pair_id": "cohe_038", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
39
+ {"pair_id": "cohe_039", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
40
+ {"pair_id": "cohe_040", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
41
+ {"pair_id": "cohe_041", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
42
+ {"pair_id": "cohe_042", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
43
+ {"pair_id": "cohe_043", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
44
+ {"pair_id": "cohe_044", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
45
+ {"pair_id": "cohe_045", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
46
+ {"pair_id": "cohe_046", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
47
+ {"pair_id": "cohe_047", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
48
+ {"pair_id": "cohe_048", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
49
+ {"pair_id": "cohe_049", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
50
+ {"pair_id": "cohe_050", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "response_being_judged": "Algorithm efficiency depends on complexity analysis. Big O notation measures worst-case performance. Different algorithms solve problems differently.", "ground_truth_label": "score_2", "semantic_equivalence_score": 1.0}
51
+ {"pair_id": "cohe_051", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
52
+ {"pair_id": "cohe_052", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
53
+ {"pair_id": "cohe_053", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
54
+ {"pair_id": "cohe_054", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
55
+ {"pair_id": "cohe_055", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
56
+ {"pair_id": "cohe_056", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
57
+ {"pair_id": "cohe_057", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
58
+ {"pair_id": "cohe_058", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
59
+ {"pair_id": "cohe_059", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
60
+ {"pair_id": "cohe_060", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
61
+ {"pair_id": "cohe_061", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
62
+ {"pair_id": "cohe_062", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
63
+ {"pair_id": "cohe_063", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
64
+ {"pair_id": "cohe_064", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
65
+ {"pair_id": "cohe_065", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
66
+ {"pair_id": "cohe_066", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
67
+ {"pair_id": "cohe_067", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
68
+ {"pair_id": "cohe_068", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
69
+ {"pair_id": "cohe_069", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
70
+ {"pair_id": "cohe_070", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
71
+ {"pair_id": "cohe_071", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
72
+ {"pair_id": "cohe_072", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
73
+ {"pair_id": "cohe_073", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
74
+ {"pair_id": "cohe_074", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
75
+ {"pair_id": "cohe_075", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "response_being_judged": "Climate change affects global temperatures. Ice caps are melting. We need renewable energy. Solar panels are expensive.", "ground_truth_label": "score_3", "semantic_equivalence_score": 1.0}
76
+ {"pair_id": "cohe_076", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
77
+ {"pair_id": "cohe_077", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
78
+ {"pair_id": "cohe_078", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
79
+ {"pair_id": "cohe_079", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
80
+ {"pair_id": "cohe_080", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
81
+ {"pair_id": "cohe_081", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
82
+ {"pair_id": "cohe_082", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
83
+ {"pair_id": "cohe_083", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
84
+ {"pair_id": "cohe_084", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
85
+ {"pair_id": "cohe_085", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
86
+ {"pair_id": "cohe_086", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
87
+ {"pair_id": "cohe_087", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
88
+ {"pair_id": "cohe_088", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
89
+ {"pair_id": "cohe_089", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
90
+ {"pair_id": "cohe_090", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
91
+ {"pair_id": "cohe_091", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
92
+ {"pair_id": "cohe_092", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
93
+ {"pair_id": "cohe_093", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
94
+ {"pair_id": "cohe_094", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
95
+ {"pair_id": "cohe_095", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
96
+ {"pair_id": "cohe_096", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
97
+ {"pair_id": "cohe_097", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
98
+ {"pair_id": "cohe_098", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
99
+ {"pair_id": "cohe_099", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
100
+ {"pair_id": "cohe_100", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "response_being_judged": "The study examined neural networks for image recognition. Results showed 95% accuracy. Robustness to adversarial examples remains unclear.", "ground_truth_label": "score_4", "semantic_equivalence_score": 1.0}
101
+ {"pair_id": "cohe_101", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
102
+ {"pair_id": "cohe_102", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
103
+ {"pair_id": "cohe_103", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
104
+ {"pair_id": "cohe_104", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
105
+ {"pair_id": "cohe_105", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
106
+ {"pair_id": "cohe_106", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
107
+ {"pair_id": "cohe_107", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
108
+ {"pair_id": "cohe_108", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
109
+ {"pair_id": "cohe_109", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
110
+ {"pair_id": "cohe_110", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
111
+ {"pair_id": "cohe_111", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
112
+ {"pair_id": "cohe_112", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
113
+ {"pair_id": "cohe_113", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
114
+ {"pair_id": "cohe_114", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
115
+ {"pair_id": "cohe_115", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
116
+ {"pair_id": "cohe_116", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
117
+ {"pair_id": "cohe_117", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
118
+ {"pair_id": "cohe_118", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
119
+ {"pair_id": "cohe_119", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
120
+ {"pair_id": "cohe_120", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
121
+ {"pair_id": "cohe_121", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
122
+ {"pair_id": "cohe_122", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
123
+ {"pair_id": "cohe_123", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Rate coherence 1-5. One number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
124
+ {"pair_id": "cohe_124", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Coherence score 1 to 5, respond with number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "How coherent is this? Score: 1=poor 5=excellent. Number only.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}
125
+ {"pair_id": "cohe_125", "task_type": "coherence", "source_benchmark": "SummEval", "prompt_a": "Assign coherence rating 1-5. Single digit response.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "prompt_b": "Coherence: 1 (incoherent) to 5 (very coherent). Reply with number.\n\nText: Dogs have four legs. They bark. Cats have tails. Birds fly.", "response_being_judged": "Dogs have four legs. They bark. Cats have tails. Birds fly.", "ground_truth_label": "score_5", "semantic_equivalence_score": 1.0}