| from datasets import load_dataset |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, Seq2SeqTrainer, Seq2SeqTrainingArguments |
|
|
| model_checkpoint = "google/flan-t5-large" |
| output_dir = "./finetuned-flan-t5" |
|
|
| dataset = load_dataset("json", data_files={"train": "train_data.jsonl"}) |
|
|
| tokenizer = AutoTokenizer.from_pretrained(model_checkpoint) |
|
|
| def preprocess_function(examples): |
| inputs = examples["input"] |
| targets = examples["output"] |
| model_inputs = tokenizer(inputs, max_length=512, truncation=True) |
| labels = tokenizer(targets, max_length=128, truncation=True) |
| model_inputs["labels"] = labels["input_ids"] |
| return model_inputs |
|
|
| tokenized_datasets = dataset.map(preprocess_function, batched=True) |
|
|
| model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint) |
|
|
| training_args = Seq2SeqTrainingArguments( |
| output_dir=output_dir, |
| evaluation_strategy="no", |
| learning_rate=5e-5, |
| per_device_train_batch_size=2, |
| num_train_epochs=3, |
| weight_decay=0.01, |
| save_total_limit=2, |
| push_to_hub=False |
| ) |
|
|
| trainer = Seq2SeqTrainer( |
| model=model, |
| args=training_args, |
| train_dataset=tokenized_datasets["train"] |
| ) |
|
|
| trainer.train() |
|
|
| model.save_pretrained(output_dir) |
| tokenizer.save_pretrained(output_dir) |
|
|