| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| import json |
|
|
| import datasets |
| from datasets import BuilderConfig |
|
|
| |
| |
| _CITATION = """\ |
| @article{duan2024boosting, |
| title={Boosting the Predictive Power of Protein Representations with a Corpus of Text Annotations}, |
| author={Duan, Haonan and Skreta, Marta and Cotta, Leonardo and Rajaonson, Ella Miray and Dhawan, Nikita and Aspuru-Guzik, Alán and Maddison, Chris J}, |
| journal={bioRxiv}, |
| pages={2024--07}, |
| year={2024}, |
| publisher={Cold Spring Harbor Laboratory} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| This new dataset is designed to solve this great NLP task and is crafted with a lot of care. |
| """ |
|
|
| |
| _HOMEPAGE = "" |
|
|
| |
| _LICENSE = "" |
|
|
| |
| |
| |
| _URLS = { |
| "first_domain": "https://huggingface.co/datasets/mskrt/PAIR/raw/main/test.json", |
| } |
|
|
|
|
| |
| annotation2type = { |
| "names": datasets.Value("string"), |
| "function": datasets.Value("string"), |
| "EC": datasets.Sequence(datasets.Value("string")), |
| } |
|
|
|
|
| class CustomConfig(datasets.BuilderConfig): |
| """CustomConfig.""" |
|
|
| def __init__(self, **kwargs): |
| """__init__. |
| |
| Parameters |
| ---------- |
| kwargs : |
| kwargs |
| """ |
| self.annotation_type = kwargs.pop("annotation_type", "function") |
| super(CustomConfig, self).__init__(**kwargs) |
|
|
|
|
| class PAIRDataset(datasets.GeneratorBasedBuilder): |
| """PAIRDataset.""" |
|
|
| BUILDER_CONFIGS = [ |
| CustomConfig( |
| name="custom_config", |
| version="1.0.0", |
| description="your description", |
| ), |
| ] |
| BUILDER_CONFIG_CLASS = CustomConfig |
|
|
| def _info(self): |
| """_info.""" |
| self.annotation_type = self.config_kwargs["annotation_type"] |
| |
| return datasets.DatasetInfo( |
| description="My custom dataset.", |
| features=datasets.Features( |
| { |
| self.annotation_type: annotation2type[self.annotation_type], |
| "sequence": datasets.Value("string"), |
| "pid": datasets.Value("string"), |
| } |
| ), |
| supervised_keys=None, |
| ) |
| |
| |
|
|
| def _split_generators(self, dl_manager): |
| """_split_generators. |
| |
| Parameters |
| ---------- |
| dl_manager : |
| dl_manager |
| """ |
| |
| |
| print("in generator self.annotation", self.annotation_type) |
| data_files = { |
| "train": "train.json", |
| "test": "test.json", |
| } |
| return [ |
| |
| |
| |
| |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, |
| gen_kwargs={"filepath": data_files["test"]}, |
| ), |
| ] |
|
|
| def _generate_examples(self, filepath): |
| """_generate_examples. |
| |
| Parameters |
| ---------- |
| filepath : |
| filepath |
| """ |
| |
| print("in generator 2 self.annotation", self.annotation_type) |
| with open(filepath, encoding="utf-8") as f: |
| data = json.load(f) |
| counter = 0 |
| for idx, annotation_type in enumerate(data.keys()): |
| print(annotation_type, self.annotation_type) |
| if annotation_type != self.annotation_type: |
| continue |
| |
| samples = data[annotation_type] |
| for idx_2, elem in enumerate(samples): |
| |
| if elem["content"] != [None]: |
| content = elem["content"][0] |
| |
| yield counter, { |
| "sequence": elem["seq"], |
| "pid": elem["pid"], |
| annotation_type: content, |
| } |
| counter += 1 |
|
|
| |
|
|