judgesense-benchmark / data /factuality.jsonl
anonymousreview111's picture
Upload judgesense-benchmark dataset
8848f9e verified
{"pair_id": "fact_001", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_002", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_003", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_004", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_005", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_006", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_007", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_008", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_009", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_010", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_011", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_012", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_013", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The Earth orbits around the Sun.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The Earth orbits around the Sun.", "response_being_judged": "The Earth orbits around the Sun.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_014", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Paris is the capital of France.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_015", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Paris is the capital of France.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_016", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Paris is the capital of France.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_017", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Paris is the capital of France.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_018", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Paris is the capital of France.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_019", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Paris is the capital of France.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_020", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Paris is the capital of France.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_021", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Paris is the capital of France.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_022", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Paris is the capital of France.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_023", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Paris is the capital of France.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_024", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Paris is the capital of France.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_025", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Paris is the capital of France.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_026", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Paris is the capital of France.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Paris is the capital of France.", "response_being_judged": "Paris is the capital of France.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_027", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_028", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_029", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_030", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_031", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_032", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_033", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_034", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_035", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_036", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_037", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_038", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_039", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Water boils at 100 degrees Celsius at sea level.", "response_being_judged": "Water boils at 100 degrees Celsius at sea level.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_040", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_041", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_042", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_043", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_044", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_045", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_046", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_047", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_048", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_049", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_050", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_051", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_052", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: DNA is a protein molecule found in cells.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: DNA is a protein molecule found in cells.", "response_being_judged": "DNA is a protein molecule found in cells.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_053", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The United States has 50 states.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_054", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The United States has 50 states.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_055", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The United States has 50 states.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_056", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: The United States has 50 states.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_057", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: The United States has 50 states.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_058", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The United States has 50 states.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_059", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The United States has 50 states.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_060", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The United States has 50 states.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_061", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: The United States has 50 states.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_062", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: The United States has 50 states.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_063", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The United States has 50 states.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_064", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The United States has 50 states.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_065", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The United States has 50 states.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The United States has 50 states.", "response_being_judged": "The United States has 50 states.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_066", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_067", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_068", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_069", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_070", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_071", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_072", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_073", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_074", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_075", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_076", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_077", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Mount Everest is the tallest mountain in the solar system.", "response_being_judged": "Mount Everest is the tallest mountain in the solar system.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_078", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_079", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_080", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_081", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_082", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_083", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_084", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_085", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_086", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_087", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_088", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_089", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Photosynthesis converts sunlight into chemical energy.", "response_being_judged": "Photosynthesis converts sunlight into chemical energy.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_090", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_091", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_092", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_093", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_094", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_095", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_096", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_097", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_098", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_099", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_100", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_101", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Quantum mechanics describes particles larger than atoms.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Quantum mechanics describes particles larger than atoms.", "response_being_judged": "Quantum mechanics describes particles larger than atoms.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_102", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_103", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_104", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_105", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_106", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_107", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_108", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_109", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_110", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_111", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_112", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_113", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: The human heart pumps blood to the lungs and body.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: The human heart pumps blood to the lungs and body.", "response_being_judged": "The human heart pumps blood to the lungs and body.", "ground_truth_label": "accurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_114", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_115", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_116", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_117", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_118", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_119", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_120", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_121", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Is this factually correct? Answer YES or NO only.\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_122", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Are the facts in this response accurate? YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_123", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Is this factually correct? Answer YES or NO only.\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_124", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Fact-check this response. Reply YES (correct) or NO (incorrect).\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Are the facts in this response accurate? YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}
{"pair_id": "fact_125", "task_type": "factuality", "source_benchmark": "TruthfulQA", "prompt_a": "Does this response contain factual errors? Answer NO (accurate) or YES (has errors).\n\nResponse: Gravitational waves were theorized by Newton.", "prompt_b": "Evaluate factual accuracy. One word answer - YES or NO.\n\nResponse: Gravitational waves were theorized by Newton.", "response_being_judged": "Gravitational waves were theorized by Newton.", "ground_truth_label": "inaccurate", "semantic_equivalence_score": 1.0}