| from transformers import PegasusForConditionalGeneration, PegasusTokenizer
|
| from transformers import pipeline
|
|
|
|
|
| model_path = "./fine_tune_model_pegasus_50"
|
|
|
|
|
| tokenizer_path = "./fine_tune_tokenizer_pegasus_50"
|
|
|
|
|
| model = PegasusForConditionalGeneration.from_pretrained(model_path, ignore_mismatched_sizes=True)
|
|
|
|
|
| tokenizer = PegasusTokenizer.from_pretrained(tokenizer_path)
|
|
|
|
|
| model.eval()
|
|
|
|
|
| gen_kwargs = {"length_penalty": 1.0, "num_beams": 8, "max_length": 700}
|
| custom_summarization_pipeline = pipeline('summarization', model=model, tokenizer=tokenizer, **gen_kwargs)
|
|
|
|
|
| file_path = "./9.txt"
|
| with open(file_path, 'r') as file:
|
| text = file.read()
|
|
|
|
|
| summary = custom_summarization_pipeline(text)
|
|
|
| print('Summary:\n',summary[0]['summary_text'])
|
|
|