rtferraz commited on
Commit
6ccb9e6
·
verified ·
1 Parent(s): 1dfd4e2

Add pretrain.py — pretrain_domain_model with HF Trainer, cosine schedule, DataCollatorForLanguageModeling

Browse files
src/domain_tokenizer/training/pretrain.py ADDED
@@ -0,0 +1,132 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pre-training function for DomainTransformer.
3
+
4
+ Uses HuggingFace Trainer with DataCollatorForLanguageModeling(mlm=False)
5
+ which automatically sets labels = input_ids and masks padding with -100.
6
+
7
+ Usage:
8
+ from domain_tokenizer.training import pretrain_domain_model, prepare_clm_dataset
9
+ dataset = prepare_clm_dataset(user_sequences, builder, hf_tokenizer, block_size=512)
10
+ config = DomainTransformerConfig.from_preset("24m", vocab_size=hf_tokenizer.vocab_size)
11
+ model = DomainTransformerForCausalLM(config)
12
+ pretrain_domain_model(model, hf_tokenizer, dataset)
13
+ """
14
+
15
+ import logging
16
+ from typing import Optional
17
+
18
+ from datasets import Dataset as HFDataset
19
+ from transformers import (
20
+ DataCollatorForLanguageModeling,
21
+ PreTrainedTokenizerFast,
22
+ Trainer,
23
+ TrainingArguments,
24
+ )
25
+
26
+ logger = logging.getLogger(__name__)
27
+
28
+
29
+ def pretrain_domain_model(
30
+ model,
31
+ tokenizer: PreTrainedTokenizerFast,
32
+ train_dataset: HFDataset,
33
+ eval_dataset: Optional[HFDataset] = None,
34
+ output_dir: str = "./domain_pretrain_checkpoints",
35
+ hub_model_id: Optional[str] = None,
36
+ num_epochs: int = 10,
37
+ per_device_batch_size: int = 32,
38
+ gradient_accumulation_steps: int = 4,
39
+ learning_rate: float = 3e-4,
40
+ lr_scheduler_type: str = "cosine",
41
+ warmup_steps: int = 500,
42
+ weight_decay: float = 0.01,
43
+ max_grad_norm: float = 1.0,
44
+ bf16: bool = False,
45
+ fp16: bool = False,
46
+ logging_steps: int = 50,
47
+ save_steps: int = 500,
48
+ eval_steps: int = 500,
49
+ save_total_limit: int = 3,
50
+ dataloader_num_workers: int = 4,
51
+ report_to: str = "none",
52
+ run_name: Optional[str] = None,
53
+ seed: int = 42,
54
+ gradient_checkpointing: bool = False,
55
+ resume_from_checkpoint: Optional[str] = None,
56
+ **extra_training_args,
57
+ ) -> Trainer:
58
+ """Pre-train a DomainTransformerForCausalLM with HF Trainer.
59
+
60
+ The dataset should be packed via prepare_clm_dataset() for 100% token utilization.
61
+
62
+ Returns:
63
+ The Trainer instance (for inspection, continued training, etc.).
64
+ """
65
+ if tokenizer.pad_token_id is None:
66
+ raise ValueError(
67
+ "Tokenizer must have pad_token set. "
68
+ "DomainTokenizerBuilder.build() should set this automatically."
69
+ )
70
+
71
+ data_collator = DataCollatorForLanguageModeling(tokenizer=tokenizer, mlm=False)
72
+ push_to_hub = hub_model_id is not None
73
+
74
+ training_args = TrainingArguments(
75
+ output_dir=output_dir,
76
+ num_train_epochs=num_epochs,
77
+ per_device_train_batch_size=per_device_batch_size,
78
+ per_device_eval_batch_size=per_device_batch_size,
79
+ gradient_accumulation_steps=gradient_accumulation_steps,
80
+ learning_rate=learning_rate,
81
+ lr_scheduler_type=lr_scheduler_type,
82
+ warmup_steps=warmup_steps,
83
+ weight_decay=weight_decay,
84
+ max_grad_norm=max_grad_norm,
85
+ bf16=bf16, fp16=fp16,
86
+ logging_strategy="steps",
87
+ logging_steps=logging_steps,
88
+ logging_first_step=True,
89
+ disable_tqdm=True,
90
+ eval_strategy="steps" if eval_dataset else "no",
91
+ eval_steps=eval_steps if eval_dataset else None,
92
+ save_strategy="steps",
93
+ save_steps=save_steps,
94
+ save_total_limit=save_total_limit,
95
+ push_to_hub=push_to_hub,
96
+ hub_model_id=hub_model_id if push_to_hub else None,
97
+ dataloader_num_workers=dataloader_num_workers,
98
+ report_to=report_to,
99
+ run_name=run_name,
100
+ seed=seed,
101
+ gradient_checkpointing=gradient_checkpointing,
102
+ remove_unused_columns=True,
103
+ **extra_training_args,
104
+ )
105
+
106
+ effective_batch = per_device_batch_size * gradient_accumulation_steps
107
+ n_params = sum(p.numel() for p in model.parameters())
108
+
109
+ logger.info(f"=== Domain Pre-Training ===")
110
+ logger.info(f" Model params: {n_params:,}")
111
+ logger.info(f" Train samples: {len(train_dataset):,}")
112
+ logger.info(f" Block size: {len(train_dataset[0]['input_ids'])}")
113
+ logger.info(f" Batch size: {per_device_batch_size} x {gradient_accumulation_steps} = {effective_batch}")
114
+ logger.info(f" Epochs: {num_epochs}, LR: {learning_rate} ({lr_scheduler_type})")
115
+ logger.info(f" Push to hub: {hub_model_id if push_to_hub else 'disabled'}")
116
+
117
+ trainer = Trainer(
118
+ model=model,
119
+ args=training_args,
120
+ train_dataset=train_dataset,
121
+ eval_dataset=eval_dataset,
122
+ data_collator=data_collator,
123
+ processing_class=tokenizer,
124
+ )
125
+
126
+ trainer.train(resume_from_checkpoint=resume_from_checkpoint)
127
+
128
+ if push_to_hub:
129
+ logger.info(f"Pushing model to hub: {hub_model_id}")
130
+ trainer.push_to_hub()
131
+
132
+ return trainer