| from transformers import BartForConditionalGeneration, BartTokenizer
|
| from transformers import pipeline
|
|
|
|
|
| model_path = "./fine_tune_model_bart_large_25"
|
|
|
|
|
| tokenizer_path = "./fine_tune_tokenizer_bart_large_25"
|
|
|
|
|
| model = BartForConditionalGeneration.from_pretrained(model_path, ignore_mismatched_sizes=True)
|
|
|
|
|
| tokenizer = BartTokenizer.from_pretrained(tokenizer_path)
|
|
|
|
|
| model.eval()
|
|
|
|
|
|
|
| gen_kwargs = {"length_penalty": 1.0, "num_beams": 8, "max_length": 700}
|
| custom_summarization_pipeline = pipeline('summarization', model=model, tokenizer=tokenizer, **gen_kwargs)
|
|
|
| file_path = "./9.txt"
|
| with open(file_path, 'r') as file:
|
| text = file.read()
|
|
|
|
|
| summary = custom_summarization_pipeline(text)
|
| print('Summary:\n',summary[0]['summary_text'])
|
|
|