| import pandas as pd | |
| from transformers import GPT2LMHeadModel, GPT2Tokenizer, Trainer, TrainingArguments | |
| from datasets import load_dataset | |
| # Load MedQuAD dataset | |
| dataset = load_dataset("marianeft/MedQuAD", split="train") | |
| # Load the GPT-2 model and tokenizer | |
| model_name = "gpt2" # Or use a medical fine-tuned model | |
| model = GPT2LMHeadModel.from_pretrained(model_name) | |
| tokenizer = GPT2Tokenizer.from_pretrained(model_name) | |
| # Preprocess the dataset | |
| def preprocess(example): | |
| return {"text": f"{example['question']} {example['answer']}"} | |
| dataset = dataset.map(preprocess) | |
| # Tokenize the dataset | |
| def tokenize_function(examples): | |
| return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=512) | |
| tokenized_datasets = dataset.map(tokenize_function, batched=True) | |
| # Training arguments | |
| training_args = TrainingArguments( | |
| output_dir="./results", | |
| num_train_epochs=1, | |
| per_device_train_batch_size=4, | |
| save_steps=10_000, | |
| save_total_limit=2, | |
| logging_dir="./logs", | |
| ) | |
| # Initialize Trainer | |
| trainer = Trainer( | |
| model=model, | |
| args=training_args, | |
| train_dataset=tokenized_datasets, | |
| ) | |
| # Fine-tune the model | |
| trainer.train() | |
| # Save the model to a new directory | |
| model.save_pretrained("fine_tuned_medquad") | |
| tokenizer.save_pretrained("fine_tuned_medquad") |