File size: 3,377 Bytes
73cc8d2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 | from __future__ import annotations
import datasets
from mteb.abstasks import MultilingualTask
from mteb.abstasks.AbsTaskRetrieval import AbsTaskRetrieval
from mteb.abstasks.TaskMetadata import TaskMetadata
_LANGS = ["python", "javascript", "go", "ruby", "java", "php"]
class CodeSearchNetRetrieval(MultilingualTask, AbsTaskRetrieval):
_EVAL_SPLIT = "test"
metadata = TaskMetadata(
name="CodeSearchNetRetrieval",
description="The dataset is a collection of code snippets and their corresponding natural language queries. The task is to retrieve the most relevant code snippet for a given query.",
reference="https://huggingface.co/datasets/code_search_net/viewer",
dataset={
"path": "code_search_net",
"revision": "fdc6a9e39575768c27eb8a2a5f702bf846eb4759",
},
type="Retrieval",
category="p2p",
eval_splits=[_EVAL_SPLIT],
eval_langs={lang: [lang + "-Code"] for lang in _LANGS},
main_score="ndcg_at_10",
date=("2019-01-01", "2019-12-31"),
form=["written"],
domains=["Programming"],
task_subtypes=["Code retrieval"],
license="Not specified",
socioeconomic_status="high",
annotations_creators="derived",
dialect=[],
text_creation="found",
bibtex_citation="@article{husain2019codesearchnet, title={{CodeSearchNet} challenge: Evaluating the state of semantic code search}, author={Husain, Hamel and Wu, Ho-Hsiang and Gazit, Tiferet and Allamanis, Miltiadis and Brockschmidt, Marc}, journal={arXiv preprint arXiv:1909.09436}, year={2019} }",
n_samples={
_EVAL_SPLIT: 1000,
},
avg_character_length={"test": 1196.4609},
)
def load_data(self, **kwargs):
if self.data_loaded:
return
data = datasets.load_dataset(
split=self._EVAL_SPLIT,
trust_remote_code=True,
streaming=True,
**self.metadata_dict["dataset"],
)
data = data.shuffle(seed=42)
# remove any leaked labels. quite common in this dataset
data = data.map(
lambda ex: {
"func_code_string": ex["func_code_string"].replace(
ex["func_documentation_string"], ""
)
}
)
lang_subs = {lang: [] for lang in _LANGS}
for ex in data:
lang_subs[ex["language"]].append(ex)
self.queries = {}
self.corpus = {}
self.relevant_docs = {}
for lang, sub in lang_subs.items():
sub = sub[
: min(len(sub), self.metadata_dict["n_samples"][self._EVAL_SPLIT])
]
self.queries[lang] = {
self._EVAL_SPLIT: {
str(i): row["func_documentation_string"]
for i, row in enumerate(sub)
}
}
self.corpus[lang] = {
self._EVAL_SPLIT: {
str(row["func_code_url"]): {"text": row["func_code_string"]}
for row in sub
}
}
self.relevant_docs[lang] = {
self._EVAL_SPLIT: {
str(i): {row["func_code_url"]: 1} for i, row in enumerate(sub)
}
}
self.data_loaded = True
|