| import torch |
| import re |
| from itertools import combinations |
| from typing import List |
|
|
|
|
| def remove_citations(sent): |
| return re.sub(r"\[\d+", "", re.sub(r" \[\d+", "", sent)).replace(" |", "").replace("]", "") |
|
|
| class CitationVerifier: |
| def __init__(self, model, tokenizer): |
| self.model = model |
| self.tokenizer = tokenizer |
|
|
| @torch.inference_mode() |
| def _verify(self, premise: str, hypothesis: str) -> bool: |
| |
| prompt = f"premise: {premise} hypothesis: {hypothesis}" |
| input_ids = self.tokenizer(prompt, return_tensors="pt")["input_ids"].to(self.model.device) |
| |
| outputs = self.model.generate(input_ids, max_new_tokens=10) |
| |
| result = self.tokenizer.decode(outputs[0], skip_special_tokens=True) |
| inference = True if result == "1" else False |
| |
| return inference |
| |
| def verify(self, doc_list: list, ref_list: List[int], sentence: str) -> bool: |
| premise = "\n".join([ |
| f"Title: {doc_list[idx]['title']}\n{doc_list[idx]['text']}" |
| for idx in ref_list |
| ]) |
| hypothesis = sentence |
| return self._verify(premise, hypothesis) |
| |
| |
| |
|
|
| class CitationSimplifier: |
| def __init__(self, citation_verifier: CitationVerifier): |
| self.citation_verifier = citation_verifier |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| def simplify(self, doc_list: list, ref_list: List[int], sentence: str) -> List[int]: |
| ref_num = len(ref_list) - 1 |
| while ref_num > 0: |
| is_all_false = True |
| for cur_ref_list in combinations(ref_list, ref_num): |
| result = self.citation_verifier.verify(doc_list=doc_list, ref_list=cur_ref_list, sentence=sentence) |
|
|
| if result: |
| is_all_false = False |
| ref_list = cur_ref_list |
| break |
| if is_all_false: |
| break |
| else: |
| ref_num -= 1 |
| |
| return ref_list |
|
|