theophile-ltt commited on
Commit
21ac981
·
verified ·
1 Parent(s): 436df6a

Chess Challenge submission by theophile-ltt

Browse files
Files changed (8) hide show
  1. README.md +26 -0
  2. config.json +24 -0
  3. model.py +348 -0
  4. model.safetensors +3 -0
  5. special_tokens_map.json +6 -0
  6. tokenizer.py +218 -0
  7. tokenizer_config.json +50 -0
  8. vocab.json +86 -0
README.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: transformers
3
+ tags:
4
+ - chess
5
+ - llm-course
6
+ - chess-challenge
7
+ license: mit
8
+ ---
9
+
10
+ # chess-theophile
11
+
12
+ Chess model submitted to the LLM Course Chess Challenge.
13
+
14
+ ## Submission Info
15
+
16
+ - **Submitted by**: [theophile-ltt](https://huggingface.co/theophile-ltt)
17
+ - **Parameters**: 705,280
18
+ - **Organization**: LLM-course
19
+
20
+ ## Model Details
21
+
22
+ - **Architecture**: Chess Transformer (GPT-style)
23
+ - **Vocab size**: 84
24
+ - **Embedding dim**: 128
25
+ - **Layers**: 4
26
+ - **Heads**: 4
config.json ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ChessForCausalLM"
4
+ ],
5
+ "auto_map": {
6
+ "AutoConfig": "model.ChessConfig",
7
+ "AutoModelForCausalLM": "model.ChessForCausalLM"
8
+ },
9
+ "bos_token_id": 1,
10
+ "dropout": 0.1,
11
+ "dtype": "float32",
12
+ "eos_token_id": 2,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "chess_transformer",
15
+ "n_ctx": 256,
16
+ "n_embd": 128,
17
+ "n_head": 4,
18
+ "n_inner": 384,
19
+ "n_layer": 4,
20
+ "pad_token_id": 0,
21
+ "tie_weights": true,
22
+ "transformers_version": "4.57.1",
23
+ "vocab_size": 84
24
+ }
model.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Chess Transformer Model for the Chess Challenge.
3
+
4
+ This module provides a simple GPT-style transformer architecture
5
+ designed to fit within the 1M parameter constraint.
6
+
7
+ Key components:
8
+ - ChessConfig: Configuration class for model hyperparameters
9
+ - ChessForCausalLM: The main model class for next-move prediction
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import math
15
+ from typing import Optional, Tuple, Union
16
+
17
+ import torch
18
+ import torch.nn as nn
19
+ import torch.nn.functional as F
20
+ from transformers import PretrainedConfig, PreTrainedModel
21
+ from transformers.modeling_outputs import CausalLMOutputWithPast
22
+
23
+
24
+ class ChessConfig(PretrainedConfig):
25
+ """
26
+ Configuration class for the Chess Transformer model.
27
+
28
+ This configuration is designed for a ~1M parameter model.
29
+ Students can adjust these values to explore different architectures.
30
+
31
+ Parameter budget breakdown (with default values):
32
+ - Embeddings (vocab): 1200 x 128 = 153,600
33
+ - Position Embeddings: 256 x 128 = 32,768
34
+ - Transformer Layers: 6 x ~120,000 = ~720,000
35
+ - LM Head (with weight tying): 0 (shared with embeddings)
36
+ - Total: ~906,000 parameters
37
+
38
+ Attributes:
39
+ vocab_size: Size of the vocabulary (number of unique moves).
40
+ n_embd: Embedding dimension (d_model).
41
+ n_layer: Number of transformer layers.
42
+ n_head: Number of attention heads.
43
+ n_ctx: Maximum sequence length (context window).
44
+ n_inner: Feed-forward inner dimension (default: 3 * n_embd).
45
+ dropout: Dropout probability.
46
+ layer_norm_epsilon: Epsilon for layer normalization.
47
+ tie_weights: Whether to tie embedding and output weights.
48
+ """
49
+
50
+ model_type = "chess_transformer"
51
+
52
+ def __init__(
53
+ self,
54
+ vocab_size: int = 84,
55
+ n_embd: int = 128,
56
+ n_layer: int = 7,
57
+ n_head: int = 4,
58
+ n_ctx: int = 512,
59
+ n_inner: Optional[int] = 256,
60
+ dropout: float = 0.1,
61
+ layer_norm_epsilon: float = 1e-5,
62
+ tie_weights: bool = True,
63
+ pad_token_id: int = 0,
64
+ bos_token_id: int = 1,
65
+ eos_token_id: int = 2,
66
+ **kwargs,
67
+ ):
68
+ super().__init__(
69
+ pad_token_id=pad_token_id,
70
+ bos_token_id=bos_token_id,
71
+ eos_token_id=eos_token_id,
72
+ **kwargs,
73
+ )
74
+
75
+ self.vocab_size = vocab_size
76
+ self.n_embd = n_embd
77
+ self.n_layer = n_layer
78
+ self.n_head = n_head
79
+ self.n_ctx = n_ctx
80
+ self.n_inner = n_inner if n_inner is not None else 3 * n_embd # Reduced from 4x to 3x
81
+ self.dropout = dropout
82
+ self.layer_norm_epsilon = layer_norm_epsilon
83
+ self.tie_weights = tie_weights
84
+ # Inform HF base class about tying behavior
85
+ self.tie_word_embeddings = bool(tie_weights)
86
+
87
+
88
+ class MultiHeadAttention(nn.Module):
89
+ """
90
+ Multi-head self-attention module.
91
+
92
+ This is a standard scaled dot-product attention implementation
93
+ with causal masking for autoregressive generation.
94
+ """
95
+
96
+ def __init__(self, config: ChessConfig):
97
+ super().__init__()
98
+
99
+ assert config.n_embd % config.n_head == 0, \
100
+ f"n_embd ({config.n_embd}) must be divisible by n_head ({config.n_head})"
101
+
102
+ self.n_head = config.n_head
103
+ self.n_embd = config.n_embd
104
+ self.head_dim = config.n_embd // config.n_head
105
+
106
+ # Combined QKV projection for efficiency
107
+ self.c_attn = nn.Linear(config.n_embd, 3 * config.n_embd)
108
+ self.c_proj = nn.Linear(config.n_embd, config.n_embd)
109
+
110
+ self.dropout = nn.Dropout(config.dropout)
111
+
112
+ def forward(
113
+ self,
114
+ x: torch.Tensor,
115
+ attention_mask: Optional[torch.Tensor] = None,
116
+ ) -> torch.Tensor:
117
+ batch_size, seq_len, _ = x.size()
118
+
119
+ qkv = self.c_attn(x)
120
+ q, k, v = qkv.split(self.n_embd, dim=2)
121
+
122
+ q = q.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
123
+ k = k.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
124
+ v = v.view(batch_size, seq_len, self.n_head, self.head_dim).transpose(1, 2)
125
+
126
+ attn_weights = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.head_dim)
127
+
128
+ causal = torch.ones(seq_len, seq_len, device=x.device, dtype=torch.bool).tril()
129
+ attn_weights = attn_weights.masked_fill(~causal.view(1, 1, seq_len, seq_len), float("-inf"))
130
+
131
+ if attention_mask is not None:
132
+ attention_mask = attention_mask.to(torch.bool)
133
+ attn_weights = attn_weights.masked_fill(~attention_mask.view(batch_size, 1, 1, seq_len), float("-inf"))
134
+
135
+ attn_weights = F.softmax(attn_weights, dim=-1)
136
+ attn_weights = self.dropout(attn_weights)
137
+
138
+ attn_output = torch.matmul(attn_weights, v)
139
+
140
+ attn_output = attn_output.transpose(1, 2).contiguous().view(
141
+ batch_size, seq_len, self.n_embd
142
+ )
143
+
144
+ attn_output = self.c_proj(attn_output)
145
+
146
+ return attn_output
147
+
148
+
149
+ class FeedForward(nn.Module):
150
+ """
151
+ Feed-forward network (MLP) module.
152
+
153
+ Standard two-layer MLP with GELU activation.
154
+ """
155
+
156
+ def __init__(self, config: ChessConfig):
157
+ super().__init__()
158
+
159
+ self.c_fc = nn.Linear(config.n_embd, config.n_inner)
160
+ self.c_proj = nn.Linear(config.n_inner, config.n_embd)
161
+ self.dropout = nn.Dropout(config.dropout)
162
+
163
+ def forward(self, x: torch.Tensor) -> torch.Tensor:
164
+ x = self.c_fc(x)
165
+ x = F.gelu(x)
166
+ x = self.c_proj(x)
167
+ x = self.dropout(x)
168
+ return x
169
+
170
+
171
+ class TransformerBlock(nn.Module):
172
+ """
173
+ A single transformer block with attention and feed-forward layers.
174
+
175
+ Uses pre-normalization (LayerNorm before attention/FFN) for better
176
+ training stability.
177
+ """
178
+
179
+ def __init__(self, config: ChessConfig):
180
+ super().__init__()
181
+
182
+ self.ln_1 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
183
+ self.attn = MultiHeadAttention(config)
184
+ self.ln_2 = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
185
+ self.mlp = FeedForward(config)
186
+
187
+ def forward(
188
+ self,
189
+ x: torch.Tensor,
190
+ attention_mask: Optional[torch.Tensor] = None,
191
+ ) -> torch.Tensor:
192
+ x = x + self.attn(self.ln_1(x), attention_mask=attention_mask)
193
+ x = x + self.mlp(self.ln_2(x))
194
+ return x
195
+
196
+
197
+ class ChessForCausalLM(PreTrainedModel):
198
+ """
199
+ Chess Transformer for Causal Language Modeling (next-move prediction).
200
+
201
+ This model is designed to predict the next chess move given a sequence
202
+ of previous moves. It uses a GPT-style architecture with:
203
+ - Token embeddings for chess moves
204
+ - Learned positional embeddings
205
+ - Stacked transformer blocks
206
+ - Linear head for next-token prediction
207
+
208
+ The model supports weight tying between the embedding layer and the
209
+ output projection to save parameters.
210
+
211
+ Example:
212
+ >>> config = ChessConfig(vocab_size=1200, n_embd=128, n_layer=6)
213
+ >>> model = ChessForCausalLM(config)
214
+ >>> inputs = {"input_ids": torch.tensor([[1, 42, 87]])}
215
+ >>> outputs = model(**inputs)
216
+ >>> next_move_logits = outputs.logits[:, -1, :]
217
+ """
218
+
219
+ config_class = ChessConfig
220
+ base_model_prefix = "transformer"
221
+ supports_gradient_checkpointing = True
222
+ # Suppress missing-key warning for tied lm_head when loading
223
+ keys_to_ignore_on_load_missing = ["lm_head.weight"]
224
+
225
+ def __init__(self, config: ChessConfig):
226
+ super().__init__(config)
227
+
228
+ self.wte = nn.Embedding(config.vocab_size, config.n_embd)
229
+ self.wpe = nn.Embedding(config.n_ctx, config.n_embd)
230
+
231
+ self.drop = nn.Dropout(config.dropout)
232
+
233
+ self.h = nn.ModuleList([
234
+ TransformerBlock(config) for _ in range(config.n_layer)
235
+ ])
236
+
237
+ self.ln_f = nn.LayerNorm(config.n_embd, eps=config.layer_norm_epsilon)
238
+
239
+ self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False)
240
+
241
+ if config.tie_weights:
242
+ self._tied_weights_keys = ["lm_head.weight"]
243
+
244
+ self.post_init()
245
+
246
+ if config.tie_weights:
247
+ self.tie_weights()
248
+
249
+ def get_input_embeddings(self) -> nn.Module:
250
+ return self.wte
251
+
252
+ def set_input_embeddings(self, new_embeddings: nn.Module):
253
+ self.wte = new_embeddings
254
+ if getattr(self.config, "tie_weights", False):
255
+ self.tie_weights()
256
+
257
+ def get_output_embeddings(self) -> nn.Module:
258
+ return self.lm_head
259
+
260
+ def set_output_embeddings(self, new_embeddings: nn.Module):
261
+ self.lm_head = new_embeddings
262
+
263
+ def tie_weights(self):
264
+ if getattr(self.config, "tie_weights", False) or getattr(self.config, "tie_word_embeddings", False):
265
+ self._tie_or_clone_weights(self.lm_head, self.wte)
266
+
267
+ def _init_weights(self, module: nn.Module):
268
+ """Initialize weights following GPT-2 style."""
269
+ if isinstance(module, nn.Linear):
270
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
271
+ if module.bias is not None:
272
+ torch.nn.init.zeros_(module.bias)
273
+ elif isinstance(module, nn.Embedding):
274
+ torch.nn.init.normal_(module.weight, mean=0.0, std=0.02)
275
+ elif isinstance(module, nn.LayerNorm):
276
+ torch.nn.init.ones_(module.weight)
277
+ torch.nn.init.zeros_(module.bias)
278
+
279
+ def forward(
280
+ self,
281
+ input_ids: torch.LongTensor,
282
+ attention_mask: Optional[torch.Tensor] = None,
283
+ position_ids: Optional[torch.LongTensor] = None,
284
+ labels: Optional[torch.LongTensor] = None,
285
+ return_dict: Optional[bool] = None,
286
+ **kwargs,
287
+ ) -> Union[Tuple, CausalLMOutputWithPast]:
288
+ """
289
+ Forward pass of the model.
290
+
291
+ Args:
292
+ input_ids: Token IDs of shape (batch_size, seq_len).
293
+ attention_mask: Attention mask of shape (batch_size, seq_len).
294
+ position_ids: Position IDs of shape (batch_size, seq_len).
295
+ labels: Labels for language modeling loss.
296
+ return_dict: Whether to return a ModelOutput object.
297
+
298
+ Returns:
299
+ CausalLMOutputWithPast containing loss (if labels provided) and logits.
300
+ """
301
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
302
+
303
+ batch_size, seq_len = input_ids.size()
304
+ device = input_ids.device
305
+
306
+ if position_ids is None:
307
+ position_ids = torch.arange(seq_len, device=device).unsqueeze(0).expand(batch_size, -1)
308
+
309
+ token_embeds = self.wte(input_ids)
310
+ position_embeds = self.wpe(position_ids)
311
+ hidden_states = self.drop(token_embeds + position_embeds)
312
+
313
+ for block in self.h:
314
+ hidden_states = block(hidden_states, attention_mask=attention_mask)
315
+
316
+ hidden_states = self.ln_f(hidden_states)
317
+
318
+ logits = self.lm_head(hidden_states)
319
+
320
+ loss = None
321
+ if labels is not None:
322
+ shift_logits = logits[..., :-1, :].contiguous()
323
+ shift_labels = labels[..., 1:].contiguous()
324
+
325
+ loss_fct = nn.CrossEntropyLoss(ignore_index=-100)
326
+ loss = loss_fct(
327
+ shift_logits.view(-1, shift_logits.size(-1)),
328
+ shift_labels.view(-1),
329
+ )
330
+
331
+ if not return_dict:
332
+ output = (logits,)
333
+ return ((loss,) + output) if loss is not None else output
334
+
335
+ return CausalLMOutputWithPast(
336
+ loss=loss,
337
+ logits=logits,
338
+ past_key_values=None,
339
+ hidden_states=None,
340
+ attentions=None,
341
+ )
342
+
343
+
344
+ # Register the model with Auto classes for easy loading
345
+ from transformers import AutoConfig, AutoModelForCausalLM
346
+
347
+ AutoConfig.register("chess_transformer", ChessConfig)
348
+ AutoModelForCausalLM.register(ChessConfig, ChessForCausalLM)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3feb7d1cdd8b7853baf23f364986e78b5bdcf707f9537fb243b5a7e5fdd2185
3
+ size 2825520
special_tokens_map.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": "[BOS]",
3
+ "eos_token": "[EOS]",
4
+ "pad_token": "[PAD]",
5
+ "unk_token": "[UNK]"
6
+ }
tokenizer.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Chess Tokenizer for the Chess Challenge.
3
+
4
+ This tokenizer treats each move as a sequence of structured tokens derived from the
5
+ extended UCI notation from the Lichess dataset (e.g., WPe2e4, BNg8f6).
6
+
7
+ The dataset format uses:
8
+ - W/B prefix for White/Black
9
+ - Piece letter: P=Pawn, N=Knight, B=Bishop, R=Rook, Q=Queen, K=King
10
+ - Source and destination squares (e.g., e2e4)
11
+ - Special suffixes: (x)=capture, (+)=check, (+*)=checkmate, (o)/(O)=castling
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ import json
17
+ import os
18
+ import re
19
+ from typing import Dict, List, Optional, Sequence, Union
20
+
21
+ from transformers import PreTrainedTokenizer
22
+
23
+
24
+ _MOVE_RE = re.compile(
25
+ r"^(?P<side>[WB])"
26
+ r"(?P<piece>[PNBRQK])"
27
+ r"(?P<src>[a-h][1-8])"
28
+ r"(?P<dst>[a-h][1-8])"
29
+ r"(?P<rest>.*)$"
30
+ )
31
+
32
+
33
+ class ChessTokenizer(PreTrainedTokenizer):
34
+ """
35
+ A structured tokenizer for chess moves.
36
+
37
+ Each move is decomposed into:
38
+ SIDE_(W/B), PIECE_(P/N/B/R/Q/K), SQ_<src>, SQ_<dst>,
39
+ and optional flags: CAPTURE, CHECK, MATE, CASTLE, PROMO_(Q/R/B/N).
40
+
41
+ This avoids UNK explosions when using a move-as-token vocabulary.
42
+ """
43
+
44
+ model_input_names = ["input_ids", "attention_mask"]
45
+ vocab_files_names = {"vocab_file": "vocab.json"}
46
+
47
+ # Special tokens
48
+ PAD_TOKEN = "[PAD]"
49
+ BOS_TOKEN = "[BOS]"
50
+ EOS_TOKEN = "[EOS]"
51
+ UNK_TOKEN = "[UNK]"
52
+
53
+ # Fixed token set
54
+ SIDE_W = "SIDE_W"
55
+ SIDE_B = "SIDE_B"
56
+
57
+ PIECES = ["P", "N", "B", "R", "Q", "K"]
58
+
59
+ PROMO_PREFIX = "PROMO_"
60
+ CAPTURE = "CAPTURE"
61
+ CHECK = "CHECK"
62
+ MATE = "MATE"
63
+ CASTLE = "CASTLE"
64
+
65
+ def __init__(
66
+ self,
67
+ vocab_file: Optional[str] = None,
68
+ vocab: Optional[Dict[str, int]] = None,
69
+ **kwargs,
70
+ ):
71
+ kwargs.pop("pad_token", None)
72
+ kwargs.pop("bos_token", None)
73
+ kwargs.pop("eos_token", None)
74
+ kwargs.pop("unk_token", None)
75
+
76
+ self._pad_token = self.PAD_TOKEN
77
+ self._bos_token = self.BOS_TOKEN
78
+ self._eos_token = self.EOS_TOKEN
79
+ self._unk_token = self.UNK_TOKEN
80
+
81
+ if vocab is not None:
82
+ self._vocab = vocab
83
+ elif vocab_file is not None and os.path.exists(vocab_file):
84
+ with open(vocab_file, "r", encoding="utf-8") as f:
85
+ self._vocab = json.load(f)
86
+ else:
87
+ self._vocab = self._build_fixed_vocab()
88
+
89
+ self._ids_to_tokens = {v: k for k, v in self._vocab.items()}
90
+
91
+ super().__init__(
92
+ pad_token=self._pad_token,
93
+ bos_token=self._bos_token,
94
+ eos_token=self._eos_token,
95
+ unk_token=self._unk_token,
96
+ **kwargs,
97
+ )
98
+
99
+ def _build_fixed_vocab(self) -> Dict[str, int]:
100
+ special = [self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN]
101
+ sides = [self.SIDE_W, self.SIDE_B]
102
+ pieces = [f"PIECE_{p}" for p in self.PIECES]
103
+ squares = [f"SQ_{file}{rank}" for file in "abcdefgh" for rank in "12345678"]
104
+ promos = [f"{self.PROMO_PREFIX}{p}" for p in ["Q", "R", "B", "N"]]
105
+ flags = [self.CAPTURE, self.CHECK, self.MATE, self.CASTLE]
106
+ tokens = special + sides + pieces + squares + promos + flags
107
+ return {tok: i for i, tok in enumerate(tokens)}
108
+
109
+ @classmethod
110
+ def build_vocab_from_dataset(cls, *args, **kwargs) -> "ChessTokenizer":
111
+ """
112
+ Kept for API compatibility with the template training script.
113
+ This tokenizer uses a fixed vocabulary (no dataset-dependent pruning).
114
+ """
115
+ return cls()
116
+
117
+ @classmethod
118
+ def build_vocab_from_iterator(cls, *args, **kwargs) -> "ChessTokenizer":
119
+ """
120
+ Kept for API compatibility. This tokenizer uses a fixed vocabulary.
121
+ """
122
+ return cls()
123
+
124
+ @property
125
+ def vocab_size(self) -> int:
126
+ return len(self._vocab)
127
+
128
+ def get_vocab(self) -> Dict[str, int]:
129
+ return dict(self._vocab)
130
+
131
+ def _tokenize(self, text: str) -> List[str]:
132
+ tokens: List[str] = []
133
+ moves = text.strip().split()
134
+ for mv in moves:
135
+ tokens.extend(self._tokenize_move(mv))
136
+ return tokens
137
+
138
+ def _tokenize_move(self, move: str) -> List[str]:
139
+ m = _MOVE_RE.match(move)
140
+ if not m:
141
+ return [self.UNK_TOKEN]
142
+
143
+ side = m.group("side")
144
+ piece = m.group("piece")
145
+ src = m.group("src")
146
+ dst = m.group("dst")
147
+ rest = m.group("rest") or ""
148
+
149
+ out: List[str] = []
150
+ out.append(self.SIDE_W if side == "W" else self.SIDE_B)
151
+ out.append(f"PIECE_{piece}")
152
+ out.append(f"SQ_{src}")
153
+ out.append(f"SQ_{dst}")
154
+
155
+ promo = self._parse_promotion(rest)
156
+ if promo is not None:
157
+ out.append(f"{self.PROMO_PREFIX}{promo}")
158
+
159
+ if "(x)" in rest or "x" in rest:
160
+ out.append(self.CAPTURE)
161
+
162
+ if "(+*)" in rest or "++" in rest or "#" in rest:
163
+ out.append(self.MATE)
164
+ elif "(+)" in rest or "+" in rest:
165
+ out.append(self.CHECK)
166
+
167
+ if "(o)" in rest or "(O)" in rest or "O-O" in rest:
168
+ out.append(self.CASTLE)
169
+
170
+ return out
171
+
172
+ def _parse_promotion(self, rest: str) -> Optional[str]:
173
+ m = re.search(r"=([QRBNqrbn])", rest)
174
+ if m:
175
+ return m.group(1).upper()
176
+ return None
177
+
178
+ def _convert_token_to_id(self, token: str) -> int:
179
+ return self._vocab.get(token, self._vocab[self.UNK_TOKEN])
180
+
181
+ def _convert_id_to_token(self, index: int) -> str:
182
+ return self._ids_to_tokens.get(index, self.UNK_TOKEN)
183
+
184
+ def convert_tokens_to_string(self, tokens: List[str]) -> str:
185
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
186
+ out: List[str] = []
187
+ for t in tokens:
188
+ if t in special:
189
+ continue
190
+ out.append(t)
191
+ return " ".join(out)
192
+
193
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> tuple:
194
+ if not os.path.isdir(save_directory):
195
+ os.makedirs(save_directory, exist_ok=True)
196
+
197
+ vocab_file = os.path.join(
198
+ save_directory,
199
+ (filename_prefix + "-" if filename_prefix else "") + "vocab.json",
200
+ )
201
+ with open(vocab_file, "w", encoding="utf-8") as f:
202
+ json.dump(self._vocab, f, ensure_ascii=False, indent=2)
203
+
204
+ return (vocab_file,)
205
+
206
+ def decode(self, token_ids: Union[int, Sequence[int]], skip_special_tokens: bool = False, **kwargs) -> str:
207
+ if isinstance(token_ids, int):
208
+ ids = [token_ids]
209
+ elif "torch" in str(type(token_ids)):
210
+ ids = token_ids.detach().cpu().flatten().tolist()
211
+ else:
212
+ ids = list(token_ids)
213
+
214
+ toks = [self._convert_id_to_token(i) for i in ids]
215
+ if skip_special_tokens:
216
+ special = {self.PAD_TOKEN, self.BOS_TOKEN, self.EOS_TOKEN, self.UNK_TOKEN}
217
+ toks = [t for t in toks if t not in special]
218
+ return self.convert_tokens_to_string(toks)
tokenizer_config.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "added_tokens_decoder": {
3
+ "0": {
4
+ "content": "[PAD]",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false,
9
+ "special": true
10
+ },
11
+ "1": {
12
+ "content": "[BOS]",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false,
17
+ "special": true
18
+ },
19
+ "2": {
20
+ "content": "[EOS]",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false,
25
+ "special": true
26
+ },
27
+ "3": {
28
+ "content": "[UNK]",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false,
33
+ "special": true
34
+ }
35
+ },
36
+ "auto_map": {
37
+ "AutoTokenizer": [
38
+ "tokenizer.ChessTokenizer",
39
+ null
40
+ ]
41
+ },
42
+ "bos_token": "[BOS]",
43
+ "clean_up_tokenization_spaces": false,
44
+ "eos_token": "[EOS]",
45
+ "extra_special_tokens": {},
46
+ "model_max_length": 1000000000000000019884624838656,
47
+ "pad_token": "[PAD]",
48
+ "tokenizer_class": "ChessTokenizer",
49
+ "unk_token": "[UNK]"
50
+ }
vocab.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "[PAD]": 0,
3
+ "[BOS]": 1,
4
+ "[EOS]": 2,
5
+ "[UNK]": 3,
6
+ "SIDE_W": 4,
7
+ "SIDE_B": 5,
8
+ "PIECE_P": 6,
9
+ "PIECE_N": 7,
10
+ "PIECE_B": 8,
11
+ "PIECE_R": 9,
12
+ "PIECE_Q": 10,
13
+ "PIECE_K": 11,
14
+ "SQ_a1": 12,
15
+ "SQ_a2": 13,
16
+ "SQ_a3": 14,
17
+ "SQ_a4": 15,
18
+ "SQ_a5": 16,
19
+ "SQ_a6": 17,
20
+ "SQ_a7": 18,
21
+ "SQ_a8": 19,
22
+ "SQ_b1": 20,
23
+ "SQ_b2": 21,
24
+ "SQ_b3": 22,
25
+ "SQ_b4": 23,
26
+ "SQ_b5": 24,
27
+ "SQ_b6": 25,
28
+ "SQ_b7": 26,
29
+ "SQ_b8": 27,
30
+ "SQ_c1": 28,
31
+ "SQ_c2": 29,
32
+ "SQ_c3": 30,
33
+ "SQ_c4": 31,
34
+ "SQ_c5": 32,
35
+ "SQ_c6": 33,
36
+ "SQ_c7": 34,
37
+ "SQ_c8": 35,
38
+ "SQ_d1": 36,
39
+ "SQ_d2": 37,
40
+ "SQ_d3": 38,
41
+ "SQ_d4": 39,
42
+ "SQ_d5": 40,
43
+ "SQ_d6": 41,
44
+ "SQ_d7": 42,
45
+ "SQ_d8": 43,
46
+ "SQ_e1": 44,
47
+ "SQ_e2": 45,
48
+ "SQ_e3": 46,
49
+ "SQ_e4": 47,
50
+ "SQ_e5": 48,
51
+ "SQ_e6": 49,
52
+ "SQ_e7": 50,
53
+ "SQ_e8": 51,
54
+ "SQ_f1": 52,
55
+ "SQ_f2": 53,
56
+ "SQ_f3": 54,
57
+ "SQ_f4": 55,
58
+ "SQ_f5": 56,
59
+ "SQ_f6": 57,
60
+ "SQ_f7": 58,
61
+ "SQ_f8": 59,
62
+ "SQ_g1": 60,
63
+ "SQ_g2": 61,
64
+ "SQ_g3": 62,
65
+ "SQ_g4": 63,
66
+ "SQ_g5": 64,
67
+ "SQ_g6": 65,
68
+ "SQ_g7": 66,
69
+ "SQ_g8": 67,
70
+ "SQ_h1": 68,
71
+ "SQ_h2": 69,
72
+ "SQ_h3": 70,
73
+ "SQ_h4": 71,
74
+ "SQ_h5": 72,
75
+ "SQ_h6": 73,
76
+ "SQ_h7": 74,
77
+ "SQ_h8": 75,
78
+ "PROMO_Q": 76,
79
+ "PROMO_R": 77,
80
+ "PROMO_B": 78,
81
+ "PROMO_N": 79,
82
+ "CAPTURE": 80,
83
+ "CHECK": 81,
84
+ "MATE": 82,
85
+ "CASTLE": 83
86
+ }