| import os |
| import numpy as np |
| from enum import IntEnum |
| import datasets |
|
|
|
|
| logger = datasets.logging.get_logger(__name__) |
|
|
|
|
| _CITATION = """\ |
| @article{iwano2022generative, |
| title={Generative aptamer discovery using RaptGen}, |
| author={Iwano, Natsuki and Adachi, Tatsuo and Aoki, Kazuteru and Nakamura, Yoshikazu and Hamada, Michiaki}, |
| journal={Nature Computational Science}, |
| pages={1--9}, |
| year={2022}, |
| publisher={Nature Publishing Group} |
| } |
| """ |
|
|
| _DESCRIPTION = """\ |
| https://github.com/hmdlab/raptgen/blob/master/raptgen/data.py |
| """ |
|
|
|
|
| class SNV(IntEnum): |
| Mutation = 0 |
| Insertion = 1 |
| Deletion = 2 |
|
|
|
|
| class SequenceGenerator(): |
| def __init__(self, num_motifs=1, motif_length=10, motifs=None, |
| target_length=20, fix_random_region_length=True, error_rate=0.0, generate_motifs=True, middle_insert_range=(2, 6), |
| seed=0, add_primer=True, forward_primer="AAAAA", reverse_primer="GGGGG", one_side_proba=0.5, paired=False): |
| np.random.seed(seed) |
|
|
| if generate_motifs: |
| self.motifs = ["".join(np.random.choice( |
| list("ATGC"), motif_length)) for _ in range(num_motifs)] |
| else: |
| self.motifs = motifs |
|
|
| self.error_indices = 1 + \ |
| np.argsort(np.random.random(size=motif_length-1))[:3] |
| self.mut_idx, self.ins_idx, self.del_idx = self.error_indices |
|
|
| logger.info(f"error rate is {error_rate*100:.1f}%") |
| for idx, motif in enumerate(self.motifs): |
| seq = [ch for ch in motif] |
| mut = self.mutate(seq[self.mut_idx]) |
| if error_rate != 0: |
| seq[self.mut_idx] = f"[{seq[self.mut_idx]}>{mut}]" |
| seq[self.ins_idx] = f"[+]{seq[self.ins_idx]}" |
| seq[self.del_idx] = f"{seq[self.del_idx].lower()}" |
| seq = "".join(seq) |
| logger.info(f"motif {idx} is {seq}") |
|
|
| self.num_motifs = num_motifs |
| self.error_rate = error_rate |
| self.target_length = target_length |
| self.forward_primer = forward_primer |
| self.reverse_primer = reverse_primer |
| self.add_primer = add_primer |
|
|
| self.one_side_proba = one_side_proba |
| self.middle_insert_range = middle_insert_range |
| self.paired = paired |
|
|
| def mutate(self, char): |
| return "TGCA"["ATGC".index(char)] |
|
|
| def sample_motif(self, n): |
| motif_indices = np.random.randint(self.num_motifs, size=n) |
| has_errors = np.random.random(size=n) < self.error_rate |
| |
| error_types = np.random.choice(SNV, size=n) |
| sequences = [] |
| valid_masks = [] |
| for motif_index, has_error, error_type in zip(motif_indices, has_errors, error_types): |
| motif = self.motifs[motif_index] |
| seq = [ch for ch in motif] |
| mask = [1]*len(motif) |
| if has_error: |
| if error_type == SNV.Mutation: |
| seq[self.mut_idx] = self.mutate(seq[self.mut_idx]) |
| mask[self.mut_idx] = 0 |
| elif error_type == SNV.Insertion: |
| seq[self.ins_idx] = np.random.choice( |
| list("ATGC")) + seq[self.ins_idx] |
| mask.insert(self.ins_idx, 0) |
| elif error_type == SNV.Deletion: |
| seq[self.del_idx] = "" |
| del mask[self.del_idx] |
| else: |
| raise NotImplementedError |
| seq = "".join(seq) |
| sequences.append(seq) |
| valid_masks.append(mask) |
| return sequences, valid_masks, motif_indices.tolist() |
|
|
| def sample(self, n=1, with_indices=True): |
| motifs, valid_masks, motif_indices = self.sample_motif(n) |
| sequences = [] |
| motif_masks = [] |
| paired_indices = [] |
| for seq, mask in zip(motifs, valid_masks): |
| if self.paired: |
| seq, mask, idx = self.insert_in_the_middle( |
| seq, mask, nrange=self.middle_insert_range, one_side_proba=self.one_side_proba) |
| paired_indices += [idx] |
| random_region = "".join(np.random.choice( |
| list("ATGC"), size=self.target_length-len(seq))) |
| l = np.random.randint(len(random_region)) |
| if self.add_primer: |
| sequences.append( |
| self.forward_primer + random_region[:l] + seq + random_region[l:] + self.reverse_primer) |
| motif_masks.append([0]*(len(self.forward_primer)+l)+mask+[0]*(len(random_region)-l+len(self.reverse_primer))) |
| else: |
| sequences.append(random_region[:l] + seq + random_region[l:]) |
| motif_masks.append([0]*l+mask+[0]*(len(random_region)-l)) |
|
|
| if self.paired and with_indices: |
| return sequences, motif_masks, motif_indices, paired_indices |
| elif with_indices: |
| return sequences, motif_masks, motif_indices |
| return sequences, motif_masks |
|
|
| def insert_in_the_middle(self, sequence, mask, nrange=(2, 6), one_side_proba=0.5): |
| n = np.random.randint(*nrange) |
| if np.random.random() < one_side_proba: |
| if np.random.choice(["l", "r"]) == "l": |
| l_motif = sequence[:len(sequence)//2] |
| r_motif = "" |
| idx = 1 |
| else: |
| l_motif = "" |
| r_motif = sequence[len(sequence)//2:] |
| idx = 2 |
| else: |
| l_motif = sequence[:len(sequence)//2] |
| r_motif = sequence[len(sequence)//2:] |
| idx = 0 |
| seq = l_motif + "".join(np.random.choice(list("ATGC"), size=n)) + r_motif |
| new_mask = mask[:len(l_motif)]+([0]*n)+mask[len(sequence)-len(r_motif):] |
| return seq, new_mask, idx |
|
|
|
|
| DATA_FILES = {"multiple-666": {"train": "https://huggingface.co/datasets/thewall/Simulation/resolve/main/data/multiple-666-train.parquet", |
| "test": "https://huggingface.co/datasets/thewall/Simulation/resolve/main/data/multiple-666-test.parquet"}, |
| "paired-666": {"train": "https://huggingface.co/datasets/thewall/Simulation/resolve/main/data/paired-666-train.parquet", |
| "test": "https://huggingface.co/datasets/thewall/Simulation/resolve/main/data/paired-666-test.parquet"}, |
| "paired-42": {"train": "https://huggingface.co/datasets/thewall/Simulation/resolve/main/data/paired-42-train.parquet", |
| "test": "https://huggingface.co/datasets/thewall/Simulation/resolve/main/data/paired-42-test.parquet"}, |
| } |
|
|
| class SimulationConfig(datasets.BuilderConfig): |
| def __init__(self, n_seq, num_motifs=1, motif_length=10, error_rate=0.0, seed=0, add_primer=False, paired=False, **kwargs): |
| super(SimulationConfig, self).__init__(**kwargs) |
| self.n_seq = n_seq |
| self.num_motifs = num_motifs |
| self.motif_length = motif_length |
| self.error_rate = error_rate |
| self.seed = seed |
| self.add_primer = add_primer |
| self.paired = paired |
| |
| |
| |
| |
|
|
|
|
| class Simulation(datasets.GeneratorBasedBuilder): |
|
|
| BUILDER_CONFIGS = [ |
| SimulationConfig(name="multiple", num_motifs=10, error_rate=0.1, n_seq=10000, seed=0), |
| SimulationConfig(name="paired", n_seq=5000, seed=0, paired=True), |
| SimulationConfig(name="multiple-666", num_motifs=10, error_rate=0.1, n_seq=10000, seed=0), |
| SimulationConfig(name="paired-666", n_seq=5000, seed=0, paired=True), |
| SimulationConfig(name="paired-42", n_seq=10000, seed=0, paired=True), |
| ] |
|
|
| DEFAULT_CONFIG_NAME = "multiple-666" |
|
|
| def _info(self): |
| return datasets.DatasetInfo( |
| description=_DESCRIPTION, |
| features=datasets.Features( |
| { |
| "id": datasets.Value("int32"), |
| "seq": datasets.Value("string"), |
| "motif": datasets.Value("string"), |
| "motif_ids": datasets.Value("int32"), |
| "motif_mask": datasets.Sequence(feature=datasets.Value("int32")), |
| } |
| ), |
| homepage="https://github.com/hmdlab/raptgen/blob/master/raptgen/data.py", |
| citation=_CITATION, |
| ) |
|
|
| def _split_generators(self, dl_manager): |
| if self.config.name in DATA_FILES: |
| train_data_file = dl_manager.download(DATA_FILES[self.config.name]['train']) |
| test_data_file = dl_manager.download(DATA_FILES[self.config.name]['test']) |
| dataset = datasets.load_dataset("parquet", data_files={"train": train_data_file, |
| "test": test_data_file}) |
| train_iterator = self._iterator(dataset['train']) |
| test_iterator = self._iterator(dataset['test']) |
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"iterator_fn": train_iterator}), |
| datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"iterator_fn": test_iterator}), |
| ] |
| else: |
| kwargs = {"num_motifs": self.config.num_motifs, |
| "motif_length": self.config.motif_length, |
| "error_rate": self.config.error_rate, |
| "seed": self.config.seed, |
| "add_primer": self.config.add_primer, |
| "sample_num": self.config.n_seq, |
| "paired": self.config.paired |
| } |
| iterator = self._sample(**kwargs) |
|
|
| return [ |
| datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"iterator_fn": iterator}), |
| ] |
|
|
| def _sample(self, num_motifs, motif_length, error_rate, seed, add_primer, sample_num, paired): |
| simulator = SequenceGenerator(num_motifs=num_motifs, motif_length=motif_length, |
| error_rate=error_rate, seed=seed, |
| add_primer=add_primer, paired=paired) |
| data = simulator.sample(sample_num) |
| motifs = simulator.motifs |
| for key, (seq, mask, motif_ids, label) in enumerate(zip(data[0], data[1], data[2], data[-1])): |
| yield key, {"id": key, |
| "seq": seq, |
| "motif": motifs[motif_ids], |
| "motif_ids": label, |
| "motif_mask": mask, |
| } |
|
|
| def _iterator(self, dataset): |
| for row in dataset: |
| yield row['id'], row |
|
|
| def _generate_examples(self, iterator_fn): |
| yield from iterator_fn |
|
|
|
|
| if __name__=="__main__": |
| from datasets import load_dataset |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| dataset = load_dataset("simulation.py", name="paired-666", split="test") |