| from __future__ import annotations |
|
|
| import json |
| import logging |
| import os |
| from collections import defaultdict |
| from pathlib import Path |
| from time import time |
| from typing import Dict, Tuple |
|
|
| from datasets import Features, Value, load_dataset |
|
|
| from ..evaluation.evaluators import RetrievalEvaluator |
| from ..MTEBResults import ScoresDict |
| from .AbsTask import AbsTask |
|
|
| logger = logging.getLogger(__name__) |
|
|
|
|
| |
| class HFDataLoader: |
| def __init__( |
| self, |
| hf_repo: str | None = None, |
| hf_repo_qrels: str | None = None, |
| data_folder: str | None = None, |
| prefix: str | None = None, |
| corpus_file: str = "corpus.jsonl", |
| query_file: str = "queries.jsonl", |
| qrels_folder: str = "qrels", |
| qrels_file: str = "", |
| streaming: bool = False, |
| keep_in_memory: bool = False, |
| ): |
| self.corpus = {} |
| self.queries = {} |
| self.qrels = {} |
| self.hf_repo = hf_repo |
| if hf_repo: |
| |
| self.hf_repo_qrels = hf_repo_qrels if hf_repo_qrels else hf_repo |
| else: |
| |
| |
| |
| |
| if prefix: |
| query_file = prefix + "-" + query_file |
| qrels_folder = prefix + "-" + qrels_folder |
|
|
| self.corpus_file = ( |
| os.path.join(data_folder, corpus_file) if data_folder else corpus_file |
| ) |
| self.query_file = ( |
| os.path.join(data_folder, query_file) if data_folder else query_file |
| ) |
| self.qrels_folder = ( |
| os.path.join(data_folder, qrels_folder) if data_folder else None |
| ) |
| self.qrels_file = qrels_file |
| self.streaming = streaming |
| self.keep_in_memory = keep_in_memory |
|
|
| @staticmethod |
| def check(fIn: str, ext: str): |
| if not os.path.exists(fIn): |
| raise ValueError( |
| "File {} not present! Please provide accurate file.".format(fIn) |
| ) |
|
|
| if not fIn.endswith(ext): |
| raise ValueError( |
| "File {} must be present with extension {}".format(fIn, ext) |
| ) |
|
|
| def load( |
| self, split="test" |
| ) -> Tuple[Dict[str, dict[str, str]], dict[str, str], dict[str, dict[str, int]]]: |
| if not self.hf_repo: |
| self.qrels_file = os.path.join(self.qrels_folder, split + ".tsv") |
| self.check(fIn=self.corpus_file, ext="jsonl") |
| self.check(fIn=self.query_file, ext="jsonl") |
| self.check(fIn=self.qrels_file, ext="tsv") |
|
|
| if not len(self.corpus): |
| logger.info("Loading Corpus...") |
| self._load_corpus() |
| logger.info("Loaded %d %s Documents.", len(self.corpus), split.upper()) |
| logger.info("Doc Example: %s", self.corpus[0]) |
|
|
| if not len(self.queries): |
| logger.info("Loading Queries...") |
| self._load_queries() |
|
|
| self._load_qrels(split) |
| |
| qrels_dict = defaultdict(dict) |
|
|
| def qrels_dict_init(row): |
| qrels_dict[row["query-id"]][row["corpus-id"]] = int(row["score"]) |
|
|
| self.qrels.map(qrels_dict_init) |
| self.qrels = qrels_dict |
| self.queries = self.queries.filter(lambda x: x["id"] in self.qrels) |
| logger.info("Loaded %d %s Queries.", len(self.queries), split.upper()) |
| logger.info("Query Example: %s", self.queries[0]) |
|
|
| return self.corpus, self.queries, self.qrels |
|
|
| def load_corpus(self) -> dict[str, dict[str, str]]: |
| if not self.hf_repo: |
| self.check(fIn=self.corpus_file, ext="jsonl") |
|
|
| if not len(self.corpus): |
| logger.info("Loading Corpus...") |
| self._load_corpus() |
| logger.info("Loaded %d %s Documents.", len(self.corpus)) |
| logger.info("Doc Example: %s", self.corpus[0]) |
|
|
| return self.corpus |
|
|
| def _load_corpus(self): |
| if self.hf_repo: |
| corpus_ds = load_dataset( |
| self.hf_repo, |
| "corpus", |
| keep_in_memory=self.keep_in_memory, |
| streaming=self.streaming, |
| ) |
| else: |
| corpus_ds = load_dataset( |
| "json", |
| data_files=self.corpus_file, |
| streaming=self.streaming, |
| keep_in_memory=self.keep_in_memory, |
| ) |
| corpus_ds = next(iter(corpus_ds.values())) |
| corpus_ds = corpus_ds.cast_column("_id", Value("string")) |
| corpus_ds = corpus_ds.rename_column("_id", "id") |
| corpus_ds = corpus_ds.remove_columns( |
| [ |
| col |
| for col in corpus_ds.column_names |
| if col not in ["id", "text", "title"] |
| ] |
| ) |
| self.corpus = corpus_ds |
|
|
| def _load_queries(self): |
| if self.hf_repo: |
| queries_ds = load_dataset( |
| self.hf_repo, |
| "queries", |
| keep_in_memory=self.keep_in_memory, |
| streaming=self.streaming, |
| ) |
| else: |
| queries_ds = load_dataset( |
| "json", |
| data_files=self.query_file, |
| streaming=self.streaming, |
| keep_in_memory=self.keep_in_memory, |
| ) |
| queries_ds = next(iter(queries_ds.values())) |
| queries_ds = queries_ds.cast_column("_id", Value("string")) |
| queries_ds = queries_ds.rename_column("_id", "id") |
| queries_ds = queries_ds.remove_columns( |
| [col for col in queries_ds.column_names if col not in ["id", "text"]] |
| ) |
| self.queries = queries_ds |
|
|
| def _load_qrels(self, split): |
| if self.hf_repo: |
| qrels_ds = load_dataset( |
| self.hf_repo_qrels, |
| keep_in_memory=self.keep_in_memory, |
| streaming=self.streaming, |
| )[split] |
| else: |
| qrels_ds = load_dataset( |
| "csv", |
| data_files=self.qrels_file, |
| delimiter="\t", |
| keep_in_memory=self.keep_in_memory, |
| ) |
| features = Features( |
| { |
| "query-id": Value("string"), |
| "corpus-id": Value("string"), |
| "score": Value("float"), |
| } |
| ) |
| qrels_ds = qrels_ds.cast(features) |
| self.qrels = qrels_ds |
|
|
|
|
| class AbsTaskRetrieval(AbsTask): |
| """Abstract class for re-ranking experiments. |
| |
| Child-classes must implement the following properties: |
| |
| self.corpus: dict[str, dict[str, str]] |
| Semantically, it should contain dict[split_name, dict[sample_id, dict[str, str]]] |
| E.g. {"test": {"document_one": {"_id": "d1", "title": "title", "text": "text"}}} |
| |
| self.queries: dict[str, dict[str, Union[str, List[str]]]] |
| Semantically, it should contain dict[split_name, dict[sample_id, str]] or dict[split_name, dict[sample_id, List[str]]] for conversations |
| E.g. {"test": {"q1": "query"}} |
| or {"test": {"q1": ["turn1", "turn2", "turn3"]}} |
| |
| self.relevant_docs: dict[str, dict[str, dict[str, int]]] |
| Semantically, it should contain dict[split_name, dict[sample_id, dict[doc_id, score]]] |
| E.g.: {"test": {"q1": {"document_one": 1}}} |
| """ |
|
|
| def __init__(self, **kwargs): |
| super().__init__(**kwargs) |
|
|
| def load_data(self, **kwargs): |
| if self.data_loaded: |
| return |
| self.corpus, self.queries, self.relevant_docs = {}, {}, {} |
| dataset_path = self.metadata_dict["dataset"]["path"] |
| hf_repo_qrels = ( |
| dataset_path + "-qrels" if "clarin-knext" in dataset_path else None |
| ) |
| for split in kwargs.get("eval_splits", self.metadata_dict["eval_splits"]): |
| corpus, queries, qrels = HFDataLoader( |
| hf_repo=dataset_path, |
| hf_repo_qrels=hf_repo_qrels, |
| streaming=False, |
| keep_in_memory=False, |
| ).load(split=split) |
| |
| queries = {query["id"]: query["text"] for query in queries} |
| corpus = { |
| doc["id"]: {"title": doc["title"], "text": doc["text"]} |
| for doc in corpus |
| } |
| self.corpus[split], self.queries[split], self.relevant_docs[split] = ( |
| corpus, |
| queries, |
| qrels, |
| ) |
|
|
| self.data_loaded = True |
|
|
| def evaluate(self, model, split="test", **kwargs): |
| retriever = RetrievalEvaluator(model, **kwargs) |
|
|
| scores = {} |
| hf_subsets = ( |
| [l for l in self.hf_subsets] |
| if (self.is_multilingual or self.is_crosslingual) |
| else ["default"] |
| ) |
|
|
| for hf_subset in hf_subsets: |
| logger.info(f"Subset: {hf_subset}") |
|
|
| if hf_subset == "default": |
| corpus, queries, relevant_docs = ( |
| self.corpus[split], |
| self.queries[split], |
| self.relevant_docs[split], |
| ) |
| else: |
| corpus, queries, relevant_docs = ( |
| self.corpus[hf_subset][split], |
| self.queries[hf_subset][split], |
| self.relevant_docs[hf_subset][split], |
| ) |
| scores[hf_subset] = self._evaluate_subset( |
| retriever, corpus, queries, relevant_docs, hf_subset, **kwargs |
| ) |
| return scores |
|
|
| def _evaluate_subset( |
| self, retriever, corpus, queries, relevant_docs, hf_subset: str, **kwargs |
| ): |
| start_time = time() |
| results = retriever(corpus, queries) |
| end_time = time() |
| logger.info( |
| "Time taken to retrieve: {:.2f} seconds".format(end_time - start_time) |
| ) |
|
|
| if kwargs.get("save_predictions", False): |
| output_folder = Path(kwargs.get("output_folder", "results")) |
| if not os.path.isdir(output_folder): |
| os.makedirs(output_folder) |
| top_k = kwargs.get("top_k", None) |
| if top_k is not None: |
| for qid in list(results.keys()): |
| doc_ids = set( |
| sorted( |
| results[qid], key=lambda x: results[qid][x], reverse=True |
| )[:top_k] |
| ) |
| results[qid] = { |
| k: v for k, v in results[qid].items() if k in doc_ids |
| } |
| qrels_save_path = ( |
| output_folder |
| / f"{self.metadata_dict['name']}_{hf_subset}_predictions.json" |
| ) |
|
|
| with open(qrels_save_path, "w") as f: |
| json.dump(results, f) |
|
|
| ndcg, _map, recall, precision = retriever.evaluate( |
| relevant_docs, |
| results, |
| retriever.k_values, |
| ignore_identical_ids=kwargs.get("ignore_identical_ids", True), |
| ) |
| mrr = retriever.evaluate_custom( |
| relevant_docs, results, retriever.k_values, "mrr" |
| ) |
| scores = { |
| **{f"ndcg_at_{k.split('@')[1]}": v for (k, v) in ndcg.items()}, |
| **{f"map_at_{k.split('@')[1]}": v for (k, v) in _map.items()}, |
| **{f"recall_at_{k.split('@')[1]}": v for (k, v) in recall.items()}, |
| **{f"precision_at_{k.split('@')[1]}": v for (k, v) in precision.items()}, |
| **{f"mrr_at_{k.split('@')[1]}": v for (k, v) in mrr.items()}, |
| } |
| self._add_main_score(scores) |
| return scores |
|
|
| def _add_main_score(self, scores: ScoresDict) -> None: |
| scores["main_score"] = scores[self.metadata.main_score] |
|
|
| def calculate_metadata_metrics(self) -> None: |
| self.load_data() |
|
|
| for split in self.metadata_dict["eval_splits"]: |
| if self.is_multilingual: |
| for lang in self.relevant_docs.keys(): |
| process_language( |
| self.relevant_docs[lang][split], |
| self.queries[lang][split], |
| self.corpus[lang][split], |
| lang, |
| ) |
| else: |
| process_language( |
| self.relevant_docs[split], self.queries[split], self.corpus[split] |
| ) |
|
|
|
|
| def process_language(relevant_docs, queries, corpus, lang=None): |
| total_length, num_pairs = calculate_length_and_count(relevant_docs, queries, corpus) |
| average_length = total_length / num_pairs if num_pairs else 0 |
| num_documents = len(queries) + len(corpus) |
|
|
| language_description = f" for language {lang}" if lang else "" |
| print(f"Average character length{language_description} is {average_length}") |
| print(f"Number of queries and documents{language_description} is {num_documents}") |
|
|
|
|
| def calculate_length_and_count(relevant_docs, queries, corpus): |
| total_length = 0 |
| num_pairs = 0 |
| for query_id, docs in relevant_docs.items(): |
| query = queries[query_id] |
| for doc_id in docs: |
| |
| if docs[doc_id] == 0: |
| continue |
| doc = corpus[doc_id] |
| doc_text = doc["title"] + doc["text"] |
| total_length += len(query) + len(doc_text) |
| num_pairs += 1 |
| return total_length, num_pairs |
|
|