| import pandas as pd
|
| import numpy as np
|
| from datasets import Dataset
|
| from transformers import AutoTokenizer, AutoModelForTokenClassification, TrainingArguments, Trainer
|
| from seqeval.metrics import classification_report
|
| import torch
|
|
|
|
|
| def main():
|
|
|
| device = "cuda" if torch.cuda.is_available() else "cpu"
|
| print(f"Using device: {device}")
|
|
|
|
|
| df = pd.read_excel('Augmented_Dataset.xlsx')
|
|
|
|
|
| df = df.dropna(subset=['Word', 'Tag'])
|
| df['Word'] = df['Word'].astype(str)
|
| df['Tag'] = df['Tag'].astype(str)
|
|
|
|
|
| grouped_data = df.groupby("Sentence").apply(lambda s: {
|
| 'words': s['Word'].tolist(),
|
| 'labels': s['Tag'].tolist()
|
| }).tolist()
|
|
|
|
|
| dataset = Dataset.from_list(grouped_data)
|
| print(f"Total dataset size: {len(dataset)}")
|
|
|
|
|
| train_test_split = dataset.train_test_split(test_size=0.2)
|
| train_dataset = train_test_split['train']
|
| test_dataset = train_test_split['test']
|
|
|
|
|
| tokenizer = AutoTokenizer.from_pretrained("abdulhade/RoBERTa-large-SizeCorpus_1B")
|
|
|
|
|
| unique_labels = list(set(df['Tag']))
|
| label2id = {label: i for i, label in enumerate(unique_labels)}
|
| id2label = {i: label for label, i in label2id.items()}
|
|
|
|
|
| def tokenize_and_align_labels(examples):
|
| tokenized_inputs = tokenizer(
|
| examples['words'],
|
| truncation=True,
|
| is_split_into_words=True,
|
| padding='max_length',
|
| max_length=128
|
| )
|
| labels = []
|
| for i, label in enumerate(examples['labels']):
|
| word_ids = tokenized_inputs.word_ids(batch_index=i)
|
| label_ids = [-100 if word_id is None else label2id[label[word_id]] for word_id in word_ids]
|
| labels.append(label_ids)
|
|
|
| tokenized_inputs["labels"] = labels
|
| return tokenized_inputs
|
|
|
|
|
| train_dataset = train_dataset.map(tokenize_and_align_labels, batched=True)
|
| test_dataset = test_dataset.map(tokenize_and_align_labels, batched=True)
|
|
|
|
|
| model = AutoModelForTokenClassification.from_pretrained(
|
| "abdulhade/RoBERTa-large-SizeCorpus_1B",
|
| num_labels=len(unique_labels),
|
| id2label=id2label,
|
| label2id=label2id
|
| ).to(device)
|
|
|
|
|
| training_args = TrainingArguments(
|
| output_dir='results',
|
| evaluation_strategy="epoch",
|
| learning_rate=2e-5,
|
| per_device_train_batch_size=64,
|
| per_device_eval_batch_size=64,
|
| num_train_epochs=50,
|
| weight_decay=0.01,
|
| save_steps=5000,
|
| save_total_limit=2,
|
| logging_dir='./logs',
|
| fp16=True,
|
| )
|
|
|
|
|
| trainer = Trainer(
|
| model=model,
|
| args=training_args,
|
| train_dataset=train_dataset,
|
| eval_dataset=test_dataset,
|
| tokenizer=tokenizer,
|
| )
|
|
|
|
|
| trainer.train()
|
|
|
|
|
| output_dir = 'NER_RoBERTa_fineTuning'
|
| model.save_pretrained(output_dir)
|
| tokenizer.save_pretrained(output_dir)
|
|
|
| print(f"Model and tokenizer saved to {output_dir}")
|
|
|
|
|
| predictions, labels, _ = trainer.predict(test_dataset)
|
| predictions = np.argmax(predictions, axis=2)
|
|
|
| true_labels = [[id2label[label] for label in label_set if label != -100] for label_set in labels]
|
| true_predictions = [[id2label[pred] for pred, label in zip(pred_set, label_set) if label != -100]
|
| for pred_set, label_set in zip(predictions, labels)]
|
|
|
|
|
| print(classification_report(true_labels, true_predictions))
|
|
|
|
|
| if __name__ == "__main__":
|
| main()
|
|
|