File size: 9,562 Bytes
0ab56fa
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
"""SemanticQA: A Semantic Reasoning Benchmark for Language Models."""

import json
import os

import datasets


_CITATION = """\
@article{liu2024revisiting,
    title={Revisiting a Pain in the Neck: Semantic Phrase Processing Benchmark for Language Models},
    author={Liu, Yang and Qin, Melissa Xiaohui and Li, Hongming and Huang, Chao},
    journal={arXiv preprint arXiv:2405.02861},
    year={2024}
}
"""

_DESCRIPTION = """\
SemanticQA is a comprehensive benchmark for evaluating language models on semantic \
phrase processing tasks, covering idioms, noun compounds, lexical collocations, and \
verbal multiword expressions (VMWEs). It includes 11 core evaluation subsets spanning \
4 phrase types with tasks such as detection, extraction, categorization, interpretation, \
and retrieval.
"""

_HOMEPAGE = "https://github.com/jacklanda/SemanticQA"
_LICENSE = "MIT"
_DATA_DIR = "data"

_CONFIGS = {
    "collocate_retrieval": {
        "description": "Collocate Retrieval (CR): Given a base word, its lexical function label, and a sentential context with a [MASK] token, retrieve the correct collocate.",
        "data_files": {"test": "collocate_retrieval/collocate_retrieval.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "base": datasets.Value("string"),
            "collocate": datasets.Value("string"),
            "collocation": datasets.Value("string"),
            "label": datasets.Value("string"),
            "context": datasets.Value("string"),
        }),
    },
    "collocation_categorization": {
        "description": "Lexical Collocation Categorization (LCC): Classify a collocation into its lexical function category.",
        "data_files": {"test": "collocation_categorization/collocation_categorization.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "base": datasets.Value("string"),
            "collocate": datasets.Value("string"),
            "collocation": datasets.Value("string"),
            "label": datasets.Value("string"),
            "label_id": datasets.Value("string"),
            "context": datasets.Value("string"),
        }),
    },
    "collocation_extraction": {
        "description": "Lexical Collocation Extraction (LCE): Extract the collocation from a given context.",
        "data_files": {"test": "collocation_extraction/collocation_extraction.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "keyword": datasets.Value("string"),
            "value": datasets.Value("string"),
            "collocation": datasets.Value("string"),
            "label": datasets.Value("string"),
            "label_id": datasets.Value("string"),
            "context": datasets.Value("string"),
        }),
    },
    "collocation_paraphrase": {
        "description": "Lexical Collocation Interpretation (LCI): Generate paraphrases for a collocation in context.",
        "data_files": {"test": "collocation_paraphrase/collocation_paraphrase.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "base": datasets.Value("string"),
            "collocate": datasets.Value("string"),
            "collocation": datasets.Value("string"),
            "label": datasets.Value("string"),
            "label_id": datasets.Value("string"),
            "context": datasets.Value("string"),
            "paraphrases": datasets.Sequence(datasets.Value("string")),
        }),
    },
    "idiom_detection": {
        "description": "Idiomatic Expression Detection (IED): Multiple-choice task to identify the meaning of an idiom in context.",
        "data_files": {"test": "idiom_detection/idiom_detection.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "context": datasets.Value("string"),
            "idiom": datasets.Value("string"),
            "A": datasets.Value("string"),
            "B": datasets.Value("string"),
            "C": datasets.Value("string"),
            "D": datasets.Value("string"),
            "target": datasets.Value("string"),
        }),
    },
    "idiom_extraction": {
        "description": "Idiomatic Expression Extraction (IEE): Extract the idiomatic expression from a given context.",
        "data_files": {"test": "idiom_extraction/idiom_extraction.json"},
        "features": datasets.Features({
            "context": datasets.Value("string"),
            "idiom": datasets.Value("string"),
        }),
    },
    "idiom_paraphrase": {
        "description": "Idiomatic Expression Interpretation (IEI): Generate a literal paraphrase of an idiomatic expression in context.",
        "data_files": {"test": "idiom_paraphrase/idiom_paraphrase.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "idiom": datasets.Value("string"),
            "paraphrase": datasets.Value("string"),
            "context_idiomatic": datasets.Value("string"),
            "context_literal": datasets.Value("string"),
        }),
    },
    "noun_compound_compositionality": {
        "description": "Noun Compound Compositionality (NCC): Multiple-choice task to judge the compositionality level of a noun compound in context.",
        "data_files": {"test": "noun_compound_compositionality/noun_compound_compositionality.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "noun_compound": datasets.Value("string"),
            "context": datasets.Value("string"),
            "A": datasets.Value("string"),
            "B": datasets.Value("string"),
            "C": datasets.Value("string"),
            "D": datasets.Value("string"),
            "target": datasets.Value("string"),
        }),
    },
    "noun_compound_compositionality_ft": {
        "description": "Noun Compound Compositionality fine-tuning splits (NCC-FT): Train/test/validation data for fine-tuning compositionality classifiers.",
        "data_files": {
            "train": "noun_compound_compositionality/noun_compound_compositionality_ft_train.json",
            "test": "noun_compound_compositionality/noun_compound_compositionality_ft_test.json",
            "validation": "noun_compound_compositionality/noun_compound_compositionality_ft_valid.json",
        },
        "features": datasets.Features({
            "text": datasets.Value("string"),
            "options": datasets.Value("string"),
            "answer": datasets.Value("string"),
        }),
    },
    "noun_compound_extraction": {
        "description": "Noun Compound Extraction (NCE): Extract the noun compound from a given context.",
        "data_files": {"test": "noun_compound_extraction/noun_compound_extraction.json"},
        "features": datasets.Features({
            "context": datasets.Value("string"),
            "start_index": datasets.Value("string"),
            "end_index": datasets.Value("string"),
            "noun_compound": datasets.Value("string"),
            "interpretation": datasets.Value("string"),
        }),
    },
    "noun_compound_interpretation": {
        "description": "Noun Compound Interpretation (NCI): Generate free-form interpretations of a noun compound.",
        "data_files": {"test": "noun_compound_interpretation/noun_compound_interpretation.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "noun_compound": datasets.Value("string"),
            "references": datasets.Sequence(datasets.Value("string")),
        }),
    },
    "verbal_mwe_extraction": {
        "description": "Verbal Multiword Expression Extraction (VMWE): Extract the verbal MWE from a given context and identify its type (VPC, LVC, VID).",
        "data_files": {"test": "verbal_mwe_extraction/verbal_mwe_extraction.json"},
        "features": datasets.Features({
            "id": datasets.Value("string"),
            "context": datasets.Value("string"),
            "vmwe": datasets.Value("string"),
            "label": datasets.Value("string"),
        }),
    },
}


class SemanticQA(datasets.GeneratorBasedBuilder):
    """SemanticQA benchmark dataset."""

    BUILDER_CONFIGS = [
        datasets.BuilderConfig(name=name, description=cfg["description"])
        for name, cfg in _CONFIGS.items()
    ]
    DEFAULT_CONFIG_NAME = "idiom_detection"

    def _info(self):
        cfg = _CONFIGS[self.config.name]
        return datasets.DatasetInfo(
            description=cfg["description"],
            features=cfg["features"],
            homepage=_HOMEPAGE,
            license=_LICENSE,
            citation=_CITATION,
        )

    def _split_generators(self, dl_manager):
        cfg = _CONFIGS[self.config.name]
        urls = {
            split: os.path.join(_DATA_DIR, path)
            for split, path in cfg["data_files"].items()
        }
        downloaded = dl_manager.download_and_extract(urls)

        split_map = {
            "test": datasets.Split.TEST,
            "train": datasets.Split.TRAIN,
            "validation": datasets.Split.VALIDATION,
        }
        return [
            datasets.SplitGenerator(
                name=split_map[split_key],
                gen_kwargs={"filepath": path},
            )
            for split_key, path in downloaded.items()
        ]

    def _generate_examples(self, filepath):
        with open(filepath, encoding="utf-8") as f:
            data = json.load(f)
        for idx, row in enumerate(data):
            yield idx, row