| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| """Only Connect Wall (OCW) dataset""" |
|
|
| import json |
| import os |
|
|
| import datasets |
|
|
|
|
| _CITATION = """\ |
| @article{Naeini2023LargeLM, |
| title = {Large Language Models are Fixated by Red Herrings: Exploring Creative Problem Solving and Einstellung Effect using the Only Connect Wall Dataset}, |
| author = {Saeid Alavi Naeini and Raeid Saqur and Mozhgan Saeidi and John Giorgi and Babak Taati}, |
| year = 2023, |
| journal = {ArXiv}, |
| volume = {abs/2306.11167}, |
| url = {https://api.semanticscholar.org/CorpusID:259203717} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| The Only Connect Wall (OCW) dataset contains 618 "Connecting Walls" from the Round 3: Connecting Wall segment of the Only Connect quiz show, collected from 15 seasons' worth of episodes. Each wall contains the ground-truth groups and connections as well as recorded human performance. |
| """ |
|
|
| _HOMEPAGE_URL = "https://github.com/TaatiTeam/OCW/" |
|
|
| _LICENSE = "MIT" |
|
|
| _BASE_URL = "https://www.cs.toronto.edu/~taati/OCW/" |
| _URLS = { |
| "ocw": _BASE_URL + "OCW.tar.gz", |
| "ocw_randomized": _BASE_URL + "OCW_randomized.tar.gz", |
| "ocw_wordnet": _BASE_URL + "OCW_wordnet.tar.gz" |
| |
| } |
|
|
|
|
| class OCW(datasets.GeneratorBasedBuilder): |
| """OCW dataset""" |
|
|
| VERSION = datasets.Version("1.0.0") |
|
|
| BUILDER_CONFIGS = [ |
| datasets.BuilderConfig(name="ocw", version=VERSION, |
| description="main OCW dataset"), |
| datasets.BuilderConfig(name="ocw_randomized", version=VERSION, |
| description="Easy OCW dataset with randomized groups in each wall"), |
| datasets.BuilderConfig(name="ocw_wordnet", version=VERSION, |
| description="Easy OCW dataset with wordnet synonyms replaced with original clues") |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "ocw" |
|
|
| def _info(self): |
| features = datasets.Features( |
| { |
| |
| |
| |
| "wall_id": datasets.Value("string"), |
| "season": datasets.Value("int32"), |
| "episode": datasets.Value("int32"), |
| "words": datasets.features.Sequence(feature=datasets.Value("string")), |
| "gt_connections": datasets.features.Sequence(feature=datasets.Value("string")), |
| "group_1": |
| { |
| "group_id": datasets.Value("string"), |
| "gt_words":datasets.features.Sequence(feature=datasets.Value("string")), |
| "gt_connection": datasets.Value("string"), |
| "human_performance": |
| { |
| "grouping": datasets.Value("int32"), |
| "connection": datasets.Value("int32") |
| } |
| }, |
| "group_2": |
| { |
| "group_id": datasets.Value("string"), |
| "gt_words": datasets.features.Sequence(feature=datasets.Value("string")), |
| "gt_connection": datasets.Value("string"), |
| "human_performance": |
| { |
| "grouping": datasets.Value("int32"), |
| "connection": datasets.Value("int32") |
| } |
| }, |
| "group_3": |
| { |
| "group_id": datasets.Value("string"), |
| "gt_words": datasets.features.Sequence(feature=datasets.Value("string")), |
| "gt_connection": datasets.Value("string"), |
| "human_performance": |
| { |
| "grouping": datasets.Value("int32"), |
| "connection": datasets.Value("int32") |
| } |
| }, |
| "group_4": |
| { |
| "group_id": datasets.Value("string"), |
| "gt_words": datasets.features.Sequence(feature=datasets.Value("string")), |
| "gt_connection": datasets.Value("string"), |
| "human_performance": |
| { |
| "grouping": datasets.Value("int32"), |
| "connection": datasets.Value("int32") |
| } |
| }, |
|
|
| } |
| ) |
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features= features, |
| |
| homepage=_HOMEPAGE_URL, |
| |
| license=_LICENSE, |
| |
| citation=_CITATION, |
| |
| supervised_keys=None |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| url = _URLS[self.config.name] |
| if self.config.name == "ocw_randomized": |
| url = [url, _URLS[self.DEFAULT_CONFIG_NAME]] |
| path = dl_manager.download_and_extract(url) |
| if self.config.name == self.DEFAULT_CONFIG_NAME: |
| dir = 'dataset' |
| train_filepath = os.path.join(path, dir, 'train.json') |
| val_filepath = os.path.join(path, dir, 'validation.json') |
| test_filepath = os.path.join(path, dir, 'test.json') |
| elif self.config.name == "ocw_randomized": |
| |
| dir = 'OCW_randomized' |
| dir2 = 'dataset' |
| train_filepath = os.path.join(path[1], dir2, 'train.json') |
| val_filepath = os.path.join(path[1], dir2, 'validation.json') |
| test_filepath = os.path.join(path[0], dir, 'easy_test.json') |
| else: |
| dir = 'OCW_wordnet' |
| train_filepath = os.path.join(path, dir, 'easy_train_wordnet.json') |
| val_filepath = os.path.join(path, dir, 'easy_validation_wordnet.json') |
| test_filepath = os.path.join(path, dir, 'easy_test_wordnet.json') |
|
|
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": train_filepath}), |
| datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": val_filepath}), |
| datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": test_filepath}), |
| ] |
|
|
|
|
| def _generate_examples(self, filepath): |
| """This function returns the examples in the raw (text) form.""" |
| key = 0 |
| with open(filepath, encoding="utf-8") as f: |
| ocw = json.load(f) |
| for data in ocw["dataset"]: |
| wall_id = data.get("wall_id") |
| season = data.get("season") |
| |
| |
| |
| |
| episode = data.get("episode") |
| words = data.get("words") |
| gt_connections = data.get("gt_connections") |
| group_1 = data['groups']['group_1'] |
| group_1_human_performance = group_1['human_performance'] |
| group_2 = data['groups']['group_2'] |
| group_2_human_performance = group_2['human_performance'] |
| group_3 = data['groups']['group_3'] |
| group_3_human_performance = group_3['human_performance'] |
| group_4 = data['groups']['group_4'] |
| group_4_human_performance = group_4['human_performance'] |
| yield key, { |
| |
| |
| |
| "wall_id": wall_id, |
| "season": season, |
| "episode": episode, |
| "words": words, |
| "gt_connections": gt_connections, |
| "group_1": { |
| "group_id": group_1.get("group_id"), |
| "gt_words": group_1.get("gt_words"), |
| "gt_connection": group_1.get("gt_connection"), |
| "human_performance": { |
| "grouping": group_1_human_performance.get("grouping"), |
| "connection": group_1_human_performance.get("connection") |
| } |
| }, |
| "group_2": { |
| "group_id": group_2.get("group_id"), |
| "gt_words": group_2.get("gt_words"), |
| "gt_connection": group_2.get("gt_connection"), |
| "human_performance": { |
| "grouping": group_2_human_performance.get("grouping"), |
| "connection": group_2_human_performance.get("connection") |
| } |
| }, |
| "group_3": { |
| "group_id": group_3.get("group_id"), |
| "gt_words": group_3.get("gt_words"), |
| "gt_connection": group_3.get("gt_connection"), |
| "human_performance": { |
| "grouping": group_3_human_performance.get("grouping"), |
| "connection": group_3_human_performance.get("connection") |
| } |
| }, |
| "group_4": { |
| "group_id": group_4.get("group_id"), |
| "gt_words": group_4.get("gt_words"), |
| "gt_connection": group_4.get("gt_connection"), |
| "human_performance": { |
| "grouping": group_4_human_performance.get("grouping"), |
| "connection": group_4_human_performance.get("connection") |
| } |
| }, |
| } |
| key += 1 |