adding source files
Browse files- src/seq_cluster.sh +5 -0
- src/set_splits.sh +33 -0
- src/splitter.py +40 -0
src/seq_cluster.sh
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
mkdir tmp
|
| 4 |
+
|
| 5 |
+
mmseqs easy-linclust iiab_db.fasta iiab_db tmp
|
src/set_splits.sh
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
grep ">" test.fasta | sed 's/>//;s/[[:space:]].*//' > test_keys.txt
|
| 4 |
+
grep ">" train.fasta | sed 's/>//;s/[[:space:]].*//' > train_keys.txt
|
| 5 |
+
grep ">" val.fasta | sed 's/>//;s/[[:space:]].*//' > validate_keys.txt
|
| 6 |
+
|
| 7 |
+
awk -F',' '
|
| 8 |
+
FILENAME == "test_keys.txt" { test[$1]; next }
|
| 9 |
+
FILENAME == "train_keys.txt" { train[$1]; next }
|
| 10 |
+
FILENAME == "validate_keys.txt" { validate[$1]; next }
|
| 11 |
+
|
| 12 |
+
FILENAME == "iiab_db.csv" {
|
| 13 |
+
if (FNR == 1) {
|
| 14 |
+
print $0 > "test_data.csv"
|
| 15 |
+
print $0 > "train_data.csv"
|
| 16 |
+
print $0 > "validate_data.csv"
|
| 17 |
+
next
|
| 18 |
+
}
|
| 19 |
+
|
| 20 |
+
if ($1 in test) {
|
| 21 |
+
print $0 >> "test_data.csv"
|
| 22 |
+
} else if ($1 in train) {
|
| 23 |
+
print $0 >> "train_data.csv"
|
| 24 |
+
} else if ($1 in validate) {
|
| 25 |
+
print $0 >> "validate_data.csv"
|
| 26 |
+
}
|
| 27 |
+
}
|
| 28 |
+
' test_keys.txt train_keys.txt validate_keys.txt iiab_db.csv
|
| 29 |
+
|
| 30 |
+
head -n 5 test_keys.txt
|
| 31 |
+
|
| 32 |
+
rm test_keys.txt train_keys.txt validate_keys.txt
|
| 33 |
+
|
src/splitter.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pandas as pd
|
| 2 |
+
from Bio import SeqIO
|
| 3 |
+
|
| 4 |
+
def run_full_split_workflow(tsv_path, fasta_path, train_pct=0.6, val_pct=0.2, test_pct=0.2):
|
| 5 |
+
df = pd.read_csv(tsv_path, sep='\t', names=['rep', 'member'])
|
| 6 |
+
cluster_groups = df.groupby('rep')['member'].apply(list).to_dict()
|
| 7 |
+
sorted_reps = sorted(cluster_groups.keys(), key=lambda x: len(cluster_groups[x]), reverse=True)
|
| 8 |
+
|
| 9 |
+
total_seqs = len(df)
|
| 10 |
+
targets = {'train': total_seqs * train_pct, 'val': total_seqs * val_pct, 'test': total_seqs * test_pct}
|
| 11 |
+
|
| 12 |
+
split_ids = {'train': set(), 'val': set(), 'test': set()}
|
| 13 |
+
counts = {'train': 0, 'val': 0, 'test': 0}
|
| 14 |
+
|
| 15 |
+
for rep in sorted_reps:
|
| 16 |
+
members = cluster_groups[rep]
|
| 17 |
+
deficit = {k: targets[k] - counts[k] for k in split_ids.keys()}
|
| 18 |
+
best_fit = max(deficit, key=deficit.get)
|
| 19 |
+
|
| 20 |
+
split_ids[best_fit].update(members)
|
| 21 |
+
counts[best_fit] += len(members)
|
| 22 |
+
|
| 23 |
+
files = {k: open(f"{k}.fasta", "w") for k in split_ids.keys()}
|
| 24 |
+
|
| 25 |
+
written_counts = {k: 0 for k in split_ids.keys()}
|
| 26 |
+
|
| 27 |
+
for record in SeqIO.parse(fasta_path, "fasta"):
|
| 28 |
+
for split_name, id_set in split_ids.items():
|
| 29 |
+
if record.id in id_set:
|
| 30 |
+
SeqIO.write(record, files[split_name], "fasta")
|
| 31 |
+
written_counts[split_name] += 1
|
| 32 |
+
break
|
| 33 |
+
|
| 34 |
+
for f in files.values():
|
| 35 |
+
f.close()
|
| 36 |
+
|
| 37 |
+
mmseqs_tsv = "iiab_db_cluster.tsv"
|
| 38 |
+
fasta = "iiab_db.fasta"
|
| 39 |
+
|
| 40 |
+
run_full_split_workflow(mmseqs_tsv, fasta)
|