import jsonlines import faiss from dataclasses import dataclass from tqdm import tqdm from typing import List import os import mmap import numpy as np from transformers import DPRQuestionEncoder, DPRQuestionEncoderTokenizer class DprQueryEncoder: def __init__(self, model_name_or_path: str, cache_dir: str, device: str): self.device = device self.model = DPRQuestionEncoder.from_pretrained( model_name_or_path, cache_dir=cache_dir, ) self.model.to(self.device) self.tokenizer = DPRQuestionEncoderTokenizer.from_pretrained( model_name_or_path, cache_dir=cache_dir, ) def encode(self, query: str): input_ids = self.tokenizer(query, return_tensors='pt') input_ids.to(self.device) embeddings = self.model(input_ids["input_ids"]).pooler_output.detach().cpu().numpy() return embeddings # * (1, 768) def file_len(fname): with open(fname, "r+") as f: buf = mmap.mmap(f.fileno(), 0) lines = 0 while buf.readline(): lines += 1 return lines @dataclass class DenseSearchResult: doc_id: int score: float class Searcher: def __init__(self, query_encoder: DprQueryEncoder, index_dir: str, doc_path: str): self.query_encoder = query_encoder self.index, self.doc_ids = self.load_index(index_dir) self.doc_list = self.load_doc_list(doc_path) def load_index(self, index_dir: str): index_path = os.path.join(index_dir, "index") doc_ids_path = os.path.join(index_dir, "docid") index = faiss.read_index(index_path) doc_ids = self.load_doc_ids(doc_ids_path) return index, doc_ids def load_doc_ids(self, doc_ids_path: str): with open(doc_ids_path, "r") as f: doc_ids = [int(line.rstrip()) for line in f.readlines()] return doc_ids def load_doc_list(self, doc_path): lines = file_len(doc_path) doc_list = [None] # * 0 is not used with jsonlines.open(doc_path, "r") as f: for line in tqdm(f, desc="load doc list", total=lines): doc_list.append(line) return doc_list def search(self, query: str, k: int): emb_q = self.query_encoder.encode(query) # * (1, 768) distances, indexes = self.index.search(emb_q, k) distances = distances.flat indexes = indexes.flat return [DenseSearchResult(self.doc_ids[idx], score.item()) for score, idx in zip(distances, indexes) if idx != -1] def batch_search(self, query_list: List[str], k: int): emb_q = np.concatenate([self.query_encoder.encode(query) for query in query_list]) # * (B, 768) D, I = self.index.search(emb_q, k) return [[DenseSearchResult(self.doc_ids[idx], score.item()) for score, idx in zip(distances, indexes) if idx != -1] for distances, indexes in zip(D, I)] def get_doc(self, doc_id: int): return self.doc_list[doc_id]