| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| """Script for the dataset containing the "promoter_all" and "enhancers" downstream tasks from the Nucleotide |
| Transformer paper.""" |
|
|
| from typing import List |
|
|
| import datasets |
| from Bio import SeqIO |
|
|
| |
| _CITATION = """\ |
| @article{dalla2023nucleotide, |
| title={The Nucleotide Transformer: Building and Evaluating Robust Foundation Models for Human Genomics}, |
| author={Dalla-Torre, Hugo and Gonzalez, Liam and Mendoza-Revilla, Javier and Carranza, Nicolas Lopez and Grzywaczewski, Adam Henryk and Oteri, Francesco and Dallago, Christian and Trop, Evan and Sirelkhatim, Hassan and Richard, Guillaume and others}, |
| journal={bioRxiv}, |
| pages={2023--01}, |
| year={2023}, |
| publisher={Cold Spring Harbor Laboratory} |
| } |
| """ |
|
|
| |
| _DESCRIPTION = """\ |
| Multilabel datasets used in the Nucleotide Transformer paper. |
| """ |
|
|
| _HOMEPAGE = "https://github.com/instadeepai/nucleotide-transformer" |
|
|
| _LICENSE = "https://github.com/instadeepai/nucleotide-transformer/LICENSE.md" |
|
|
| |
| |
| |
| _TASKS_NUM_LABELS_DTYPE = [ |
| ("deepstarr", 6, "float32"), |
| ("toy_classification", 2, "int32"), |
| ("toy_regression", 2, "float32"), |
| ] |
|
|
|
|
| class NucleotideTransformerDownstreamTasksConfig(datasets.BuilderConfig): |
| """BuilderConfig for The Nucleotide Transformer downstream taks dataset.""" |
|
|
| def __init__( |
| self, *args, task: str, num_labels=int, dtype: str = "int32", **kwargs |
| ): |
| """BuilderConfig downstream tasks dataset. |
| Args: |
| task (:obj:`str`): Task name. |
| **kwargs: keyword arguments forwarded to super. |
| """ |
| super().__init__( |
| *args, |
| name=f"{task}", |
| **kwargs, |
| ) |
| self.task = task |
| self.num_labels = num_labels |
| self.dtype = dtype |
|
|
|
|
| class NucleotideTransformerDownstreamTasks(datasets.GeneratorBasedBuilder): |
| VERSION = datasets.Version("1.1.0") |
| BUILDER_CONFIG_CLASS = NucleotideTransformerDownstreamTasksConfig |
| BUILDER_CONFIGS = [ |
| NucleotideTransformerDownstreamTasksConfig( |
| task=task, num_labels=num_labels, dtype=dtype |
| ) |
| for (task, num_labels, dtype) in _TASKS_NUM_LABELS_DTYPE |
| ] |
| DEFAULT_CONFIG_NAME = "deepstarr" |
|
|
| def _info(self): |
| features_dict = { |
| "sequence": datasets.Value("string"), |
| "name": datasets.Value("string"), |
| } |
| labels_dict = { |
| f"label_{i}": datasets.Value(self.config.dtype) |
| for i in range(self.config.num_labels) |
| } |
| features_dict.update(labels_dict) |
| features = datasets.Features(features_dict) |
|
|
| return datasets.DatasetInfo( |
| |
| description=_DESCRIPTION, |
| |
| features=features, |
| |
| homepage=_HOMEPAGE, |
| |
| license=_LICENSE, |
| |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators( |
| self, dl_manager: datasets.DownloadManager |
| ) -> List[datasets.SplitGenerator]: |
| train_file = dl_manager.download_and_extract(self.config.task + "/train.fna") |
| test_file = dl_manager.download_and_extract(self.config.task + "/test.fna") |
|
|
| return [ |
| datasets.SplitGenerator( |
| name=datasets.Split.TRAIN, gen_kwargs={"file": train_file} |
| ), |
| datasets.SplitGenerator( |
| name=datasets.Split.TEST, gen_kwargs={"file": test_file} |
| ), |
| ] |
|
|
| |
| def _generate_examples(self, file): |
| key = 0 |
| with open(file, "rt") as f: |
| fasta_sequences = SeqIO.parse(f, "fasta") |
|
|
| for record in fasta_sequences: |
|
|
| |
| sequence, name = str(record.seq), str(record.name) |
| labels = [float(label) for label in name.split("|")[1:]] |
|
|
| sequence_name_dict = { |
| "sequence": sequence, |
| "name": name, |
| } |
|
|
| labels_dict = { |
| f"label_{i}": labels[i] for i in range(self.config.num_labels) |
| } |
| sequence_name_dict.update(labels_dict) |
| |
| yield key, sequence_name_dict |
| key += 1 |
|
|