Oleg Baskakov commited on
Commit
24a410c
·
1 Parent(s): b24dbf9

generate dataset splits

Browse files
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ train.jsonl filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,134 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .DS_Store
2
+ # Byte-compiled / optimized / DLL files
3
+ __pycache__/
4
+ *.py[cod]
5
+ *$py.class
6
+
7
+ # C extensions
8
+ *.so
9
+
10
+ # Distribution / packaging
11
+ .Python
12
+ build/
13
+ develop-eggs/
14
+ dist/
15
+ downloads/
16
+ eggs/
17
+ .eggs/
18
+ lib/
19
+ lib64/
20
+ parts/
21
+ sdist/
22
+ var/
23
+ wheels/
24
+ pip-wheel-metadata/
25
+ share/python-wheels/
26
+ *.egg-info/
27
+ .installed.cfg
28
+ *.egg
29
+ MANIFEST
30
+
31
+ # PyInstaller
32
+ # Usually these files are written by a python script from a template
33
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
34
+ *.manifest
35
+ *.spec
36
+
37
+ # Installer logs
38
+ pip-log.txt
39
+ pip-delete-this-directory.txt
40
+
41
+ # Unit test / coverage reports
42
+ htmlcov/
43
+ .tox/
44
+ .nox/
45
+ .coverage
46
+ .coverage.*
47
+ .cache
48
+ nosetests.xml
49
+ coverage.xml
50
+ *.cover
51
+ *.py,cover
52
+ .hypothesis/
53
+ .pytest_cache/
54
+
55
+ # Translations
56
+ *.mo
57
+ *.pot
58
+
59
+ # Django stuff:
60
+ *.log
61
+ local_settings.py
62
+ db.sqlite3
63
+ db.sqlite3-journal
64
+
65
+ # Flask stuff:
66
+ instance/
67
+ .webassets-cache
68
+
69
+ # Scrapy stuff:
70
+ .scrapy
71
+
72
+ # Sphinx documentation
73
+ docs/_build/
74
+
75
+ # PyBuilder
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ .python-version
87
+
88
+ # pipenv
89
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
90
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
91
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
92
+ # install all needed dependencies.
93
+ #Pipfile.lock
94
+
95
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow
96
+ __pypackages__/
97
+
98
+ # Celery stuff
99
+ celerybeat-schedule
100
+ celerybeat.pid
101
+
102
+ # SageMath parsed files
103
+ *.sage.py
104
+
105
+ # Environments
106
+ .env
107
+ .venv
108
+ env/
109
+ venv/
110
+ ENV/
111
+ env.bak/
112
+ venv.bak/
113
+
114
+ # Spyder project settings
115
+ .spyderproject
116
+ .spyproject
117
+
118
+ # Rope project settings
119
+ .ropeproject
120
+
121
+ # mkdocs documentation
122
+ /site
123
+
124
+ # mypy
125
+ .mypy_cache/
126
+ .dmypy.json
127
+ dmypy.json
128
+
129
+ # Pyre type checker
130
+ .pyre/
131
+
132
+ htmls/
133
+ dataframes/
134
+ urls/
scripts/00_gen_urls.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from concurrent.futures import ThreadPoolExecutor
3
+
4
+ import requests
5
+
6
+ OUTPUT_DIR = "urls"
7
+ NUM_ROWS = 1800
8
+ MAX_WORKERS = 10
9
+
10
+ def main():
11
+ if not os.path.isdir(OUTPUT_DIR):
12
+ os.mkdir(OUTPUT_DIR)
13
+ for i, grid_size in enumerate(['4x7', '4x6', '4x5', '4x4', '3x5', '3x4']):
14
+ for j, difficulty in enumerate(['challenging', 'moderate', 'easy']):
15
+ file_path = f'{OUTPUT_DIR}/{difficulty}{grid_size}.txt'
16
+ count = 0
17
+ if os.path.exists(file_path):
18
+ with open(file_path) as rr:
19
+ count = len(rr.readlines())
20
+ print(f"PROCESS: {file_path}, {count=}")
21
+ with open(file_path, 'a') as file:
22
+ with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
23
+ for _ in range(NUM_ROWS - count):
24
+ executor.submit(fetch_one, file, i, j)
25
+
26
+
27
+ def fetch_one(file, i, j):
28
+ response = requests.post('https://logic.puzzlebaron.com/init2.php', data={'sg': 6 - i, 'sd': 3 - j})
29
+ html_content = response.text
30
+ sfx = html_content.partition('<form method="post" action="play.php?u2=')[2].partition('">')[0]
31
+ if not sfx.isalnum():
32
+ print("FAIL", sfx)
33
+ url = f"https://logic.puzzlebaron.com/play.php?u2={sfx}\n"
34
+ file.write(url)
35
+
36
+
37
+ main()
scripts/01_gen_htmls.py ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from concurrent.futures import ThreadPoolExecutor
3
+
4
+ import requests
5
+
6
+ OUTPUT_DIR = "htmls"
7
+ MAX_WORKERS = 10
8
+
9
+ def download_url(url, full_path):
10
+ try:
11
+ response = requests.get(url)
12
+ response.raise_for_status()
13
+ with open(full_path, 'wb') as f:
14
+ f.write(response.content)
15
+ except Exception as e:
16
+ print(f"Failed to download {url}: {e}")
17
+
18
+
19
+ def main():
20
+ if not os.path.isdir(OUTPUT_DIR):
21
+ os.mkdir(OUTPUT_DIR)
22
+ for grid_size in ['4x7', '4x6', '4x5', '4x4', '3x5', '3x4']:
23
+ for difficulty in ['challenging', 'moderate', 'easy']:
24
+ file_prefix = f"htmls/{difficulty}{grid_size}"
25
+ if not os.path.isdir(file_prefix):
26
+ os.mkdir(file_prefix)
27
+ print(f"PROCESSING: {file_prefix}")
28
+ with open(f'urls/{difficulty}{grid_size}.txt') as urls:
29
+ with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
30
+ for uu in urls:
31
+ executor.submit(process_one_chunk, file_prefix, uu.strip())
32
+
33
+ def process_one_chunk(file_prefix, url):
34
+ url_suffix = url.removeprefix("https://logic.puzzlebaron.com/")
35
+ file_path = os.path.join(file_prefix, url_suffix)
36
+ if not os.path.exists(file_path):
37
+ download_url(url, file_path)
38
+
39
+
40
+ main()
scripts/02_gen_dataframes.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+
4
+ import pandas as pd
5
+ from bs4 import BeautifulSoup
6
+
7
+
8
+ def extract_puzzle_data(html):
9
+ """extract data from HTML content"""
10
+ soup = BeautifulSoup(html, 'html.parser')
11
+
12
+ # Extract task description
13
+ task_description_section = soup.find('div', id='tabs-2') or ""
14
+ task_description = task_description_section.get_text(strip=True).replace('\xa0', ' ')
15
+ task_description = task_description.removeprefix("Backstory and Goal")
16
+ task_description = task_description.partition("Remember, as with all")[0]
17
+
18
+ categories = [cc.get_text(strip=True).replace('\xa0', ' ') for cc in soup.find_all('td', class_='answergrid_head')]
19
+
20
+ # Extract clues, and clean them
21
+ clues = []
22
+ for clue_div in soup.find_all('div', class_='clue'):
23
+ clue_raw = clue_div.get_text(strip=True).replace('\xa0', ' ')
24
+ # Remove numbers at the beginning of the string followed by a period and whitespace
25
+ cleaned_clue = re.sub(r'^\d+\.\s*', '', clue_raw)
26
+ clues.append(cleaned_clue)
27
+
28
+ # Extract label names such as from labelboxh
29
+ label_categories = dict(label_a=[])
30
+ for label in soup.find_all('td', class_='labelboxh'):
31
+ if label['id'].startswith("labelleftA"):
32
+ label_categories["label_a"].append(label.get_text(strip=True).replace('\xa0', ' '))
33
+ for letter in "bcd":
34
+ pattern = re.compile(f'label{letter}_ary' + r'\[\d+]\s*=\s*"([^"]+)";')
35
+ items = pattern.findall(html)
36
+ label_categories[f"label_{letter}"] = items
37
+ return dict(story=task_description,
38
+ clues=clues,
39
+ categories=categories,
40
+ **label_categories)
41
+
42
+
43
+ global_stories = set()
44
+ global_clues = set()
45
+
46
+
47
+ def process_one(difficulty, grid_size):
48
+ puzzle_data = []
49
+ with open(f'urls/{difficulty}{grid_size}.txt') as rr:
50
+ all_paths = [p.strip() for p in rr]
51
+ dir_path = f'htmls/{difficulty}{grid_size}/'
52
+ for c, puzzle_url in enumerate(all_paths):
53
+ filename = puzzle_url.removeprefix("https://logic.puzzlebaron.com/")
54
+ if c % 200 == 0:
55
+ print(f"{c=}")
56
+ file_path = os.path.join(dir_path, filename)
57
+ with open(file_path, 'r', encoding='utf-8') as file:
58
+ html_content = file.read()
59
+ data = extract_puzzle_data(html_content)
60
+ if len(global_clues.intersection(data['clues'])) >= 3:
61
+ continue
62
+ # elif len(global_clues.intersection(data['clues'])) >= 4:
63
+ # print("FAIL:", difficulty, grid_size)
64
+ # print(filename)
65
+ # print(global_clues.intersection(data['clues']))
66
+ # continue
67
+ # raise RuntimeError(global_clues.intersection(data['clues']))
68
+ global_clues.update(data['clues'])
69
+ data['grid_size'] = grid_size
70
+ data['difficulty'] = difficulty
71
+ data['url'] = puzzle_url
72
+ puzzle_data.append(data)
73
+ return puzzle_data
74
+
75
+
76
+ OUTPUT_DIR = "dataframes"
77
+
78
+
79
+ def main():
80
+ if not os.path.exists(OUTPUT_DIR):
81
+ os.makedirs(OUTPUT_DIR)
82
+ for grid_size in ['4x7', '4x6', '4x5', '4x4', '3x5', '3x4']:
83
+ for difficulty in ['challenging', 'moderate', 'easy']:
84
+ puzzle_data = process_one(difficulty, grid_size)
85
+ df = pd.DataFrame(puzzle_data)
86
+ jsonl_file_path = f'{OUTPUT_DIR}/{difficulty}{grid_size}.jsonl'
87
+ df.to_json(jsonl_file_path, orient='records', lines=True)
88
+ print(f'Data saved to {jsonl_file_path}', df.shape)
89
+
90
+
91
+ main()
scripts/03_gen_splits.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from collections import Counter
3
+
4
+ import pandas as pd
5
+
6
+ dfs = []
7
+ base_path = "dataframes"
8
+ for file_path in os.listdir(base_path):
9
+ dfs.append(pd.read_json(os.path.join(base_path, file_path), lines=True))
10
+ raw_df = pd.concat(dfs, ignore_index=True)
11
+ print(f"{raw_df=}")
12
+
13
+ clue_counts = Counter(clue for clues in raw_df['clues'] for clue in clues)
14
+
15
+ # take 20% of data with unique stories for a test set
16
+ unique_stories = set(c for c in raw_df['story'])
17
+ test_stories = set(x for i, x in enumerate(unique_stories) if i % 5 == 0)
18
+ test_df0 = raw_df[raw_df['story'].apply(lambda story: story in test_stories)]
19
+ test_df = test_df0[test_df0['clues'].apply(lambda clues: sum(clue_counts[clue] > 1 for clue in clues) <= 1)]
20
+ test_df.to_json("test.jsonl", orient='records', lines=True)
21
+ print(f"{test_df=}")
22
+
23
+ train_val_df = raw_df[raw_df['story'].apply(lambda story: story not in test_stories)]
24
+
25
+ val_df = train_val_df[train_val_df.index % 5 == 0]
26
+ val_df.to_json("validation.jsonl", orient='records', lines=True)
27
+ print(f"{val_df=}")
28
+
29
+ train_df = train_val_df[train_val_df.index % 5 != 0]
30
+ train_df.to_json("train.jsonl", orient='records', lines=True)
31
+ print(f"{train_df=}")
test.jsonl ADDED
The diff for this file is too large to render. See raw diff
 
train.jsonl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38cd68181ef5fff4709385bee393d74397302af9846df6f34969763f66248109
3
+ size 27153998
validation.jsonl ADDED
The diff for this file is too large to render. See raw diff