SemanticQA / SemanticQA.py
jacklanda's picture
Upload folder using huggingface_hub
0ab56fa verified
"""SemanticQA: A Semantic Reasoning Benchmark for Language Models."""
import json
import os
import datasets
_CITATION = """\
@article{liu2024revisiting,
title={Revisiting a Pain in the Neck: Semantic Phrase Processing Benchmark for Language Models},
author={Liu, Yang and Qin, Melissa Xiaohui and Li, Hongming and Huang, Chao},
journal={arXiv preprint arXiv:2405.02861},
year={2024}
}
"""
_DESCRIPTION = """\
SemanticQA is a comprehensive benchmark for evaluating language models on semantic \
phrase processing tasks, covering idioms, noun compounds, lexical collocations, and \
verbal multiword expressions (VMWEs). It includes 11 core evaluation subsets spanning \
4 phrase types with tasks such as detection, extraction, categorization, interpretation, \
and retrieval.
"""
_HOMEPAGE = "https://github.com/jacklanda/SemanticQA"
_LICENSE = "MIT"
_DATA_DIR = "data"
_CONFIGS = {
"collocate_retrieval": {
"description": "Collocate Retrieval (CR): Given a base word, its lexical function label, and a sentential context with a [MASK] token, retrieve the correct collocate.",
"data_files": {"test": "collocate_retrieval/collocate_retrieval.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"base": datasets.Value("string"),
"collocate": datasets.Value("string"),
"collocation": datasets.Value("string"),
"label": datasets.Value("string"),
"context": datasets.Value("string"),
}),
},
"collocation_categorization": {
"description": "Lexical Collocation Categorization (LCC): Classify a collocation into its lexical function category.",
"data_files": {"test": "collocation_categorization/collocation_categorization.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"base": datasets.Value("string"),
"collocate": datasets.Value("string"),
"collocation": datasets.Value("string"),
"label": datasets.Value("string"),
"label_id": datasets.Value("string"),
"context": datasets.Value("string"),
}),
},
"collocation_extraction": {
"description": "Lexical Collocation Extraction (LCE): Extract the collocation from a given context.",
"data_files": {"test": "collocation_extraction/collocation_extraction.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"keyword": datasets.Value("string"),
"value": datasets.Value("string"),
"collocation": datasets.Value("string"),
"label": datasets.Value("string"),
"label_id": datasets.Value("string"),
"context": datasets.Value("string"),
}),
},
"collocation_paraphrase": {
"description": "Lexical Collocation Interpretation (LCI): Generate paraphrases for a collocation in context.",
"data_files": {"test": "collocation_paraphrase/collocation_paraphrase.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"base": datasets.Value("string"),
"collocate": datasets.Value("string"),
"collocation": datasets.Value("string"),
"label": datasets.Value("string"),
"label_id": datasets.Value("string"),
"context": datasets.Value("string"),
"paraphrases": datasets.Sequence(datasets.Value("string")),
}),
},
"idiom_detection": {
"description": "Idiomatic Expression Detection (IED): Multiple-choice task to identify the meaning of an idiom in context.",
"data_files": {"test": "idiom_detection/idiom_detection.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"context": datasets.Value("string"),
"idiom": datasets.Value("string"),
"A": datasets.Value("string"),
"B": datasets.Value("string"),
"C": datasets.Value("string"),
"D": datasets.Value("string"),
"target": datasets.Value("string"),
}),
},
"idiom_extraction": {
"description": "Idiomatic Expression Extraction (IEE): Extract the idiomatic expression from a given context.",
"data_files": {"test": "idiom_extraction/idiom_extraction.json"},
"features": datasets.Features({
"context": datasets.Value("string"),
"idiom": datasets.Value("string"),
}),
},
"idiom_paraphrase": {
"description": "Idiomatic Expression Interpretation (IEI): Generate a literal paraphrase of an idiomatic expression in context.",
"data_files": {"test": "idiom_paraphrase/idiom_paraphrase.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"idiom": datasets.Value("string"),
"paraphrase": datasets.Value("string"),
"context_idiomatic": datasets.Value("string"),
"context_literal": datasets.Value("string"),
}),
},
"noun_compound_compositionality": {
"description": "Noun Compound Compositionality (NCC): Multiple-choice task to judge the compositionality level of a noun compound in context.",
"data_files": {"test": "noun_compound_compositionality/noun_compound_compositionality.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"noun_compound": datasets.Value("string"),
"context": datasets.Value("string"),
"A": datasets.Value("string"),
"B": datasets.Value("string"),
"C": datasets.Value("string"),
"D": datasets.Value("string"),
"target": datasets.Value("string"),
}),
},
"noun_compound_compositionality_ft": {
"description": "Noun Compound Compositionality fine-tuning splits (NCC-FT): Train/test/validation data for fine-tuning compositionality classifiers.",
"data_files": {
"train": "noun_compound_compositionality/noun_compound_compositionality_ft_train.json",
"test": "noun_compound_compositionality/noun_compound_compositionality_ft_test.json",
"validation": "noun_compound_compositionality/noun_compound_compositionality_ft_valid.json",
},
"features": datasets.Features({
"text": datasets.Value("string"),
"options": datasets.Value("string"),
"answer": datasets.Value("string"),
}),
},
"noun_compound_extraction": {
"description": "Noun Compound Extraction (NCE): Extract the noun compound from a given context.",
"data_files": {"test": "noun_compound_extraction/noun_compound_extraction.json"},
"features": datasets.Features({
"context": datasets.Value("string"),
"start_index": datasets.Value("string"),
"end_index": datasets.Value("string"),
"noun_compound": datasets.Value("string"),
"interpretation": datasets.Value("string"),
}),
},
"noun_compound_interpretation": {
"description": "Noun Compound Interpretation (NCI): Generate free-form interpretations of a noun compound.",
"data_files": {"test": "noun_compound_interpretation/noun_compound_interpretation.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"noun_compound": datasets.Value("string"),
"references": datasets.Sequence(datasets.Value("string")),
}),
},
"verbal_mwe_extraction": {
"description": "Verbal Multiword Expression Extraction (VMWE): Extract the verbal MWE from a given context and identify its type (VPC, LVC, VID).",
"data_files": {"test": "verbal_mwe_extraction/verbal_mwe_extraction.json"},
"features": datasets.Features({
"id": datasets.Value("string"),
"context": datasets.Value("string"),
"vmwe": datasets.Value("string"),
"label": datasets.Value("string"),
}),
},
}
class SemanticQA(datasets.GeneratorBasedBuilder):
"""SemanticQA benchmark dataset."""
BUILDER_CONFIGS = [
datasets.BuilderConfig(name=name, description=cfg["description"])
for name, cfg in _CONFIGS.items()
]
DEFAULT_CONFIG_NAME = "idiom_detection"
def _info(self):
cfg = _CONFIGS[self.config.name]
return datasets.DatasetInfo(
description=cfg["description"],
features=cfg["features"],
homepage=_HOMEPAGE,
license=_LICENSE,
citation=_CITATION,
)
def _split_generators(self, dl_manager):
cfg = _CONFIGS[self.config.name]
urls = {
split: os.path.join(_DATA_DIR, path)
for split, path in cfg["data_files"].items()
}
downloaded = dl_manager.download_and_extract(urls)
split_map = {
"test": datasets.Split.TEST,
"train": datasets.Split.TRAIN,
"validation": datasets.Split.VALIDATION,
}
return [
datasets.SplitGenerator(
name=split_map[split_key],
gen_kwargs={"filepath": path},
)
for split_key, path in downloaded.items()
]
def _generate_examples(self, filepath):
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for idx, row in enumerate(data):
yield idx, row