tsor13 commited on
Commit
e29aaf3
·
verified ·
1 Parent(s): b2977fe

Initial upload of fine‑tuned Gemma + custom tokenizer

Browse files
Files changed (2) hide show
  1. gemma_special_tokenizer.py +407 -0
  2. tokenizer_config.json +3 -3
gemma_special_tokenizer.py ADDED
@@ -0,0 +1,407 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Custom Gemma Tokenizer for special Format
3
+
4
+ This tokenizer implements the special format for message processing:
5
+ Format: {description}\n{input}\n<<{output}>>\n
6
+
7
+ The special format wraps output content in double angle specials (<<output>>)
8
+ and includes proper loss computation flags for training.
9
+
10
+ To save:
11
+ uv run tokenizers/gemma_special_tokenizer.py
12
+ which will save the tokenizer to the repos/special-gemma-tokenizer directory.
13
+ mkdir repos/special12b
14
+ # copy model over
15
+ cp models_v8/base_modified-google-gemma-3-12b-pt-/models/_special/checkpoint-8/* repos/special12b/
16
+ # copy tokenizer over
17
+ cp repos/special-gemma-tokenizer/* repos/special12b/
18
+ # upload to hf
19
+
20
+ uv run upload_to_hf.py \
21
+ --folder repos/special12b \
22
+ --repo-id tsor13/special12b
23
+ """
24
+
25
+ from typing import List, Dict, Any, Optional, Union
26
+ from transformers import AutoTokenizer
27
+ from transformers.models.gemma.tokenization_gemma_fast import GemmaTokenizerFast
28
+ from transformers.models.gemma.tokenization_gemma import GemmaTokenizer
29
+ import warnings
30
+ import difflib
31
+ import json
32
+ import os
33
+
34
+
35
+ class GemmaSpecialTokenizer(GemmaTokenizerFast):
36
+ """
37
+ Custom tokenizer for Gemma models that implements special format message processing.
38
+
39
+ This tokenizer formats messages using the special format where:
40
+ - Description and input content are displayed as plain text with newlines
41
+ - Output content is wrapped in double angle specials: <<output>>
42
+ - Loss is computed on the specialed output sections
43
+
44
+ Attributes:
45
+ start_string (str): The starting string used for output generation ("<<")
46
+ end_string (str): The ending string used for output generation (">>")
47
+ """
48
+
49
+ def __init__(self, *args, **kwargs):
50
+ """
51
+ Initialize the custom tokenizer.
52
+
53
+ Accepts the same arguments as GemmaTokenizerFast.
54
+ """
55
+ super().__init__(*args, **kwargs)
56
+
57
+ # Store the end string for special format
58
+ # self.start_string = "<<"
59
+ # self.end_string = ">>"
60
+ self.start_string = "<start_of_turn>"
61
+ self.end_string = "<end_of_turn>"
62
+
63
+ # Add custom attributes to the tokenizer config for saving/loading
64
+ if not hasattr(self, 'init_kwargs'):
65
+ self.init_kwargs = {}
66
+ self.init_kwargs['start_string'] = self.start_string
67
+ self.init_kwargs['end_string'] = self.end_string
68
+
69
+ @classmethod
70
+ # def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
71
+ def from_gemma_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
72
+ """
73
+ Load a tokenizer from a pretrained model or path.
74
+
75
+ This method ensures our custom class is used instead of the base GemmaTokenizerFast.
76
+ """
77
+ # TODO - there's a warning here when loading the tokenizer from the hub
78
+ # Load the base tokenizer first to get all configuration
79
+ base_tokenizer = GemmaTokenizerFast.from_pretrained(
80
+ pretrained_model_name_or_path, *args, **kwargs
81
+ )
82
+
83
+ # Create new instance of our custom class by copying the base tokenizer
84
+ custom_tokenizer = cls.__new__(cls)
85
+
86
+ # Copy all attributes from base tokenizer
87
+ for attr, value in base_tokenizer.__dict__.items():
88
+ setattr(custom_tokenizer, attr, value)
89
+
90
+ # Initialize our custom attributes
91
+ # custom_tokenizer.start_string = "<<"
92
+ # custom_tokenizer.end_string = ">>"
93
+ custom_tokenizer.start_string = "<start_of_turn>"
94
+ custom_tokenizer.end_string = "<end_of_turn>"
95
+
96
+ # Update init_kwargs to include our custom attributes
97
+ if not hasattr(custom_tokenizer, 'init_kwargs'):
98
+ custom_tokenizer.init_kwargs = {}
99
+ custom_tokenizer.init_kwargs['start_string'] = custom_tokenizer.start_string
100
+ custom_tokenizer.init_kwargs['end_string'] = custom_tokenizer.end_string
101
+
102
+ return custom_tokenizer
103
+
104
+ def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
105
+ """
106
+ Save the tokenizer to a directory, including custom configuration.
107
+ """
108
+ # Call parent save method
109
+ super().save_pretrained(save_directory, **kwargs)
110
+
111
+ # Save custom configuration
112
+ config_file = os.path.join(save_directory, "tokenizer_config.json")
113
+ if os.path.exists(config_file):
114
+ with open(config_file, 'r') as f:
115
+ config = json.load(f)
116
+ else:
117
+ config = {}
118
+
119
+ # Add our custom class info
120
+ config["tokenizer_class"] = "GemmaSpecialTokenizer"
121
+ config["start_string"] = self.start_string
122
+ config["end_string"] = self.end_string
123
+ # Point to our custom class in the uploaded file
124
+ # config["tokenizer_class"] = "GemmaTokenizerCustom"
125
+ config["auto_map"] = {
126
+ "AutoTokenizer": ["gemma_special_tokenizer.GemmaSpecialTokenizer", "gemma_special_tokenizer.GemmaSpecialTokenizer"]
127
+ }
128
+
129
+ with open(config_file, 'w') as f:
130
+ json.dump(config, f, indent=2)
131
+
132
+ def messages_to_loss_texts(
133
+ self,
134
+ messages: List[Dict[str, Any]],
135
+ loss_on_start_token: bool = False,
136
+ ) -> List[Dict[str, Any]]:
137
+ """
138
+ From messages (description / input / output) to texts (text / compute_loss) with whether or not loss should be calculated on the text for training.
139
+ """
140
+ texts = []
141
+ for message in messages:
142
+ role = message["role"]
143
+ content = message["content"]
144
+
145
+ if role == "description":
146
+ text = f"{content}\n"
147
+ texts.append({"text": text, "compute_loss": False, **message})
148
+ elif role == "input":
149
+ text = f"{content}\n"
150
+ texts.append({"text": text, "compute_loss": False, **message})
151
+ elif role == "output":
152
+ if loss_on_start_token:
153
+ # For output, wrap content in double angle specials and include newline
154
+ # text = f"<<{content}>>\n"
155
+ text = f"{self.start_string}{content}{self.end_string}\n"
156
+ texts.append({"text": text, "compute_loss": True, **message})
157
+ else:
158
+ texts.append({"text": self.start_string, "compute_loss": False, **message})
159
+ text = f"{content}{self.end_string}\n"
160
+ texts.append({"text": text, "compute_loss": True, **message})
161
+ else:
162
+ raise ValueError(f"Unknown role: {role}. Must be description, input, or output.")
163
+
164
+ # # Add generation prompt if start_generation is True
165
+ # if start_generation:
166
+ # texts.append({"text": self.start_string, "compute_loss": False})
167
+
168
+ return texts
169
+
170
+ def messages_to_text(
171
+ self,
172
+ messages: List[Dict[str, Any]],
173
+ start_generation: bool = False,
174
+ ) -> str:
175
+ """
176
+ Messages (description / input / output) to raw text (text).
177
+ """
178
+ texts = self.messages_to_loss_texts(messages)
179
+ text = "".join([text["text"] for text in texts])
180
+ if start_generation:
181
+ text = text + self.start_string
182
+ return text
183
+
184
+
185
+ def tokenize_messages(
186
+ self,
187
+ messages: List[Dict[str, Any]] | List[List[Dict[str, Any]]],
188
+ start_generation: bool = False,
189
+ **kwargs,
190
+ ):
191
+ """
192
+ For tokenizing from messages to texts. Supports batching. Good for generation
193
+ """
194
+ if isinstance(messages, list) and isinstance(messages[0], list):
195
+ # Handle list of lists of messages
196
+ all_texts = []
197
+ for message_list in messages:
198
+ texts = self.messages_to_raw_text(message_list, start_generation)
199
+ all_texts.append(texts)
200
+ else:
201
+ # Handle single list of messages
202
+ texts = self.messages_to_raw_text(messages, start_generation)
203
+ all_texts = [texts]
204
+
205
+ # Tokenize all texts
206
+ processed = self(text=all_texts, **kwargs)
207
+ return processed
208
+
209
+
210
+ def tokenize_loss_texts(
211
+ self,
212
+ texts: List[Dict[str, Any]],
213
+ loss_on_start_token: bool = False,
214
+ loss_on_eos: bool = False,
215
+ include_eos: bool = True,
216
+ ):
217
+ """
218
+ Tokenize texts (text / compute_loss) to tokenized texts (input_ids / attention_mask / labels).
219
+
220
+ Needs more complex logic to handle the back and forth labeling.
221
+ """
222
+ if loss_on_eos:
223
+ raise ValueError("Loss on EOS is not currently supported.")
224
+
225
+ # Handle single string input
226
+ if isinstance(texts, str):
227
+ processed = self(text=texts)
228
+ # Add EOS token if needed
229
+ if (self.eos_token_id is not None and
230
+ processed["input_ids"][-1] != self.eos_token_id):
231
+ processed["input_ids"] = processed["input_ids"] + [self.eos_token_id]
232
+ processed["attention_mask"] = processed["attention_mask"] + [1]
233
+ return processed
234
+
235
+ # Handle list of text dictionaries
236
+ all_processed = []
237
+ all_texts = ''
238
+ example_inds = []
239
+ dataset_inds = []
240
+
241
+ for i, item in enumerate(texts):
242
+ processed = self(text=item["text"])
243
+
244
+ # Remove BOS token from all but first item
245
+ if i != 0 and self.bos_token_id == processed["input_ids"][0]:
246
+ processed["input_ids"] = processed["input_ids"][1:]
247
+ processed["attention_mask"] = processed["attention_mask"][1:]
248
+
249
+ # Remove EOS token if present at the end
250
+ if processed["input_ids"][-1] == self.eos_token_id:
251
+ processed["input_ids"] = processed["input_ids"][:-1]
252
+ processed["attention_mask"] = processed["attention_mask"][:-1]
253
+
254
+ # Check for EOS token in the middle (with special handling for <|im_end|>)
255
+ if self.eos_token_id in processed["input_ids"]:
256
+ if not self.decode([self.eos_token_id]) == "<|im_end|>":
257
+ raise ValueError(f"EOS token is present in input_ids: {processed['input_ids']}. Not currently supported.")
258
+
259
+ # Set labels based on compute_loss flag
260
+ if item["compute_loss"]:
261
+ processed["labels"] = processed["input_ids"].copy()
262
+ else:
263
+ processed["labels"] = [-100] * len(processed["input_ids"])
264
+
265
+ # Remove duplicate BOS tokens
266
+ if all_processed:
267
+ if processed["input_ids"][0] == self.bos_token_id:
268
+ processed["input_ids"] = processed["input_ids"][1:]
269
+ processed["attention_mask"] = processed["attention_mask"][1:]
270
+ processed["labels"] = processed["labels"][1:]
271
+
272
+ all_processed.append(processed)
273
+ all_texts += item["text"]
274
+
275
+ # Handle example indices
276
+ this_num = -1
277
+ if 'example_ind' in item.keys():
278
+ if item["example_ind"] is not None:
279
+ this_num = item["example_ind"]
280
+ example_inds.extend([this_num] * len(processed["input_ids"]))
281
+
282
+ # Handle dataset indices
283
+ dataset_ind = -1
284
+ if "data_id" in item.keys():
285
+ if item["data_id"] is not None:
286
+ dataset_ind = item["data_id"]
287
+ dataset_inds.extend([dataset_ind] * len(processed["input_ids"]))
288
+
289
+ # Combine all processed results
290
+ processed = all_processed[0].copy()
291
+ processed["input_ids"] = [item for sublist in [p["input_ids"] for p in all_processed] for item in sublist]
292
+ processed["attention_mask"] = [item for sublist in [p["attention_mask"] for p in all_processed] for item in sublist]
293
+ processed["labels"] = [item for sublist in [p["labels"] for p in all_processed] for item in sublist]
294
+ processed["example_inds"] = example_inds
295
+ processed["data_ids"] = dataset_inds
296
+
297
+ # Validate by tokenizing all_texts at once and comparing
298
+ processed_all = self(text=all_texts)
299
+ if len(processed_all["input_ids"]) != len(processed["input_ids"]):
300
+ warnings.warn(f"All texts are not the same length as the first text. Please check your dataset. {len(processed_all['input_ids'])} != {len(processed['input_ids'])}")
301
+
302
+ # Generate diff for debugging
303
+ all_text = self.decode(processed_all["input_ids"], skip_special_tokens=False)
304
+ processed_text = self.decode(processed["input_ids"], skip_special_tokens=False)
305
+
306
+ diff = difflib.unified_diff(all_text.splitlines(), processed_text.splitlines())
307
+ diff_str = "\n".join(diff)
308
+ print("Diff between texts:")
309
+ print(diff_str)
310
+
311
+ # Token diff
312
+ all_tokens_str = '\n'.join([str(s) for s in processed_all["input_ids"]])
313
+ processed_tokens_str = '\n'.join([str(s) for s in processed["input_ids"]])
314
+ token_diff = difflib.unified_diff(all_tokens_str.splitlines(), processed_tokens_str.splitlines())
315
+ token_diff_str = "\n".join(token_diff)
316
+ print("Diff between tokenized texts:")
317
+ print(token_diff_str)
318
+
319
+ # Add EOS token if needed
320
+ if (self.eos_token_id is not None and
321
+ processed["input_ids"][-1] != self.eos_token_id):
322
+ processed["input_ids"] = processed["input_ids"] + [self.eos_token_id]
323
+ processed["example_inds"] = processed["example_inds"] + [-1]
324
+ processed["attention_mask"] = processed["attention_mask"] + [1]
325
+ if processed["labels"] is not None:
326
+ if loss_on_eos:
327
+ processed["labels"] = processed["labels"] + [self.eos_token_id]
328
+ else:
329
+ processed["labels"] = processed["labels"] + [-100]
330
+ if "data_ids" in processed:
331
+ processed["data_ids"] = processed["data_ids"] + [-1]
332
+
333
+ if not include_eos:
334
+ # check if EOS token is present
335
+ if processed["input_ids"][-1] == self.eos_token_id:
336
+ # remove EOS token
337
+ processed["input_ids"] = processed["input_ids"][:-1]
338
+ processed["attention_mask"] = processed["attention_mask"][:-1]
339
+ processed["labels"] = processed["labels"][:-1]
340
+ processed["example_inds"] = processed["example_inds"][:-1]
341
+ processed["data_ids"] = processed["data_ids"][:-1]
342
+
343
+ return processed
344
+
345
+ def tokenize_messages(
346
+ self,
347
+ messages: List[Dict[str, Any]],
348
+ loss_on_start_token: bool = False,
349
+ loss_on_eos: bool = False,
350
+ include_eos: bool = True,
351
+ ) -> Dict[str, Any]:
352
+ """
353
+ Intended for tokenize from messages to tokenized texts with the loss applied.
354
+ """
355
+ # First convert messages to text with loss computation flags
356
+ texts = self.messages_to_text_loss(messages, loss_on_start_token)
357
+
358
+ # Then tokenize the texts
359
+ return self.tokenize_loss_texts(texts, loss_on_eos, include_eos = include_eos)
360
+
361
+
362
+
363
+
364
+ # Register tokenizer classes for AutoTokenizer
365
+ # Note: We register them separately to avoid conflicts
366
+ AutoTokenizer.register("GemmaSpecialTokenizer", slow_tokenizer_class=None, fast_tokenizer_class=GemmaSpecialTokenizer)
367
+ # AutoTokenizer.register("GemmaSpecialTokenizerSlow", slow_tokenizer_class=GemmaSpecialTokenizerSlow, fast_tokenizer_class=None)
368
+
369
+
370
+ if __name__ == "__main__":
371
+ # Example usage
372
+ # for first load
373
+ custom_tokenizer = GemmaSpecialTokenizer.from_gemma_pretrained("google/gemma-3-1b-pt")
374
+
375
+ # for subsequent loads
376
+ # custom_tokenizer = GemmaSpecialTokenizer.from_pretrained("tsor13/special-gemma-12b-pt")
377
+ # custom_tokenizer = GemmaSpecialTokenizer.from_pretrained("repos/special-gemma-12b-pt")
378
+
379
+ # Test messages in role/content format
380
+ messages = [
381
+ {"role": "description", "content": "This is a test task"},
382
+ {"role": "input", "content": "What is 2+2?"},
383
+ {"role": "output", "content": "4"},
384
+ {"role": "input", "content": "What is 3+3?"},
385
+ # {"role": "output", "content": "6"}
386
+ ]
387
+
388
+ # tokenized = custom_tokenizer.tokenize_messages(messages, start_generation=True, return_tensors="pt")
389
+ # print(tokenized)
390
+
391
+ # get messages to text_loss
392
+ texts = custom_tokenizer.messages_to_loss_texts(messages)
393
+ print(texts)
394
+
395
+ text = custom_tokenizer.messages_to_text(messages, start_generation=True)
396
+ print(text)
397
+
398
+ print("\nTesting save/load cycle:")
399
+ # Test saving and loading
400
+ tokenizer_path = "repos/special-gemma-tokenizer"
401
+ custom_tokenizer.save_pretrained(tokenizer_path)
402
+ print("Tokenizer saved successfully!")
403
+
404
+ # also save this file in the tokenizer_path
405
+ import shutil
406
+ shutil.copy(__file__, os.path.join(tokenizer_path, "gemma_special_tokenizer.py"))
407
+ print("GemmaSpecialTokenizer.py saved successfully!")
tokenizer_config.json CHANGED
@@ -51340,13 +51340,13 @@
51340
  "sp_model_kwargs": null,
51341
  "spaces_between_special_tokens": false,
51342
  "start_string": "<start_of_turn>",
51343
- "tokenizer_class": "GemmaBracketTokenizer",
51344
  "unk_token": "<unk>",
51345
  "use_default_system_prompt": false,
51346
  "auto_map": {
51347
  "AutoTokenizer": [
51348
- "gemma_bracket_tokenizer.GemmaBracketTokenizer",
51349
- "gemma_bracket_tokenizer.GemmaBracketTokenizer"
51350
  ]
51351
  }
51352
  }
 
51340
  "sp_model_kwargs": null,
51341
  "spaces_between_special_tokens": false,
51342
  "start_string": "<start_of_turn>",
51343
+ "tokenizer_class": "GemmaSpecialTokenizer",
51344
  "unk_token": "<unk>",
51345
  "use_default_system_prompt": false,
51346
  "auto_map": {
51347
  "AutoTokenizer": [
51348
+ "gemma_special_tokenizer.GemmaSpecialTokenizer",
51349
+ "gemma_special_tokenizer.GemmaSpecialTokenizer"
51350
  ]
51351
  }
51352
  }