| """ |
| Programmatic Index Creation |
| =========================== |
| Use this script to create an index from a DataFrame without the web interface. |
| |
| Example usage: |
| python create_index.py --input data.csv --sequence-column text |
| |
| Or use programmatically: |
| from create_index import create_index_from_dataframe |
| import pandas as pd |
| |
| df = pd.DataFrame({ |
| 'sequence': ['Hello world', 'Machine learning is great', ...], |
| 'category': ['greeting', 'tech', ...], |
| 'id': [1, 2, ...] |
| }) |
| |
| create_index_from_dataframe(df, sequence_column='sequence') |
| """ |
|
|
| import argparse |
| import pickle |
| from pathlib import Path |
| import numpy as np |
| import pandas as pd |
| from pickle import dump, load |
| import tiktoken |
| import faiss |
| import torch.nn as nn |
| from x_transformers import TransformerWrapper, Encoder |
| import torch |
| from tqdm import tqdm |
|
|
| |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| n_gpus = torch.cuda.device_count() |
| print(f"Using device: {device}") |
| print(f"Available GPUs: {n_gpus}") |
| for i in range(n_gpus): |
| print(f" GPU {i}: {torch.cuda.get_device_name(i)}") |
|
|
|
|
| class GenomicTransformer(nn.Module): |
| def __init__(self, vocab_size=40000, hidden_dim=32, layers=2, heads=3, max_length=6000): |
| super().__init__() |
| self.model = TransformerWrapper( |
| num_tokens=vocab_size, |
| max_seq_len=max_length, |
| attn_layers=Encoder( |
| dim=hidden_dim, |
| depth=layers, |
| heads=heads, |
| rotary_pos_emb=True, |
| attn_orthog_projected_values=True, |
| attn_orthog_projected_values_per_head=True, |
| attn_flash=True |
| ) |
| ) |
| |
| def forward(self, input_ids, return_embeddings=False): |
| logits = self.model(input_ids, return_embeddings=return_embeddings) |
| return logits |
|
|
|
|
| |
| DATA_DIR = Path("data") |
| DATA_DIR.mkdir(exist_ok=True) |
| INDEX_PATH = DATA_DIR / "faiss.index" |
| METADATA_PATH = DATA_DIR / "metadata.pkl" |
| EMBEDDINGS_PATH = DATA_DIR / "embeddings.npy" |
| EMBEDDING_MODEL = "all-MiniLM-L6-v2" |
|
|
| pattern = load(open("/user/hassanahmed.hassan/u21055/.project/dir.project/towards_better_genomic_models/data/tokenizer_components_bpe_with_repeats.pkl", "rb"))['pattern'] |
| mergable_ranks = load(open("/user/hassanahmed.hassan/u21055/.project/dir.project/towards_better_genomic_models/data/tokenizer_components_bpe_with_repeats.pkl", "rb"))['mergable_ranks'] |
|
|
| recreated_enc = tiktoken.Encoding( |
| name="genomic_bpe_recreated", |
| pat_str=pattern, |
| mergeable_ranks=mergable_ranks, |
| special_tokens={} |
| ) |
|
|
| |
| MODEL = GenomicTransformer( |
| vocab_size=40_000, hidden_dim=512, layers=12, heads=8 |
| ) |
|
|
| |
| if n_gpus > 1: |
| print(f"Using DataParallel across {n_gpus} GPUs") |
| MODEL = nn.DataParallel(MODEL) |
|
|
| MODEL = MODEL.to(device) |
| MODEL.eval() |
|
|
|
|
| def create_index_from_dataframe( |
| df: pd.DataFrame, |
| sequence_column: str = "sequence", |
| model=MODEL, |
| encoder=recreated_enc, |
| batch_size: int = 8 |
| ) -> dict: |
| """ |
| Create a FAISS index from a pandas DataFrame. |
| |
| Args: |
| df: DataFrame containing sequences and metadata |
| sequence_column: Name of the column containing text sequences |
| model: The transformer model to use |
| encoder: The tokenizer/encoder |
| batch_size: Batch size for encoding (increase for multi-GPU) |
| |
| Returns: |
| dict with index statistics |
| """ |
| if sequence_column not in df.columns: |
| raise ValueError(f"Column '{sequence_column}' not found. Available: {list(df.columns)}") |
| |
| |
| sequences = df[sequence_column].astype(str).tolist()[:10] |
| df = df.iloc[:10].copy() |
| df["__sequence__"] = sequences |
| |
| |
| print(f"Creating embeddings for {len(sequences)} sequences...") |
| encodings = encoder.encode_batch(sequences) |
| embeddings = [] |
| print(f"Total encodings: {len(encodings)}") |
| |
| |
| effective_batch_size = batch_size * n_gpus if n_gpus > 1 else batch_size |
| print(f"Using effective batch size: {effective_batch_size}") |
| |
| for i in tqdm(range(0, len(encodings), effective_batch_size)): |
| batch_encodings = encodings[i:i+effective_batch_size] |
| |
| max_len = max(len(enc) for enc in batch_encodings) |
| batch_encodings = [enc + [0]*(max_len - len(enc)) for enc in batch_encodings] |
| |
| |
| batch_tensor = torch.tensor(batch_encodings).long().to(device) |
| print(f"Batch tensor shape: {batch_tensor.shape}") |
| print(f"Sample batch tensor: {batch_tensor[0][:10]}") |
| |
| with torch.no_grad(): |
| print("Generating embeddings...") |
| batch_embeddings = model(batch_tensor, return_embeddings=True) |
| print(f"Raw batch embeddings shape: {batch_embeddings.shape}") |
| |
| batch_embeddings = batch_embeddings.mean(dim=1).cpu().numpy().tolist() |
| print(f"Batch embeddings shape: {np.array(batch_embeddings).shape}") |
|
|
| if i == 0: |
| embeddings = batch_embeddings |
| else: |
| embeddings = embeddings + batch_embeddings |
|
|
| embeddings = np.array(embeddings) |
| embeddings = embeddings.astype(np.float32) |
| |
| |
| dimension = embeddings.shape[1] |
| index = faiss.IndexFlatIP(dimension) |
| index.add(embeddings) |
| |
| |
| print("Saving index to disk...") |
| faiss.write_index(index, str(INDEX_PATH)) |
| with open(METADATA_PATH, "wb") as f: |
| pickle.dump(df, f) |
| np.save(EMBEDDINGS_PATH, embeddings) |
| |
| stats = { |
| "documents_indexed": index.ntotal, |
| "embedding_dimension": dimension, |
| "model": 'MODEL', |
| "index_path": str(INDEX_PATH), |
| "metadata_path": str(METADATA_PATH), |
| "gpus_used": n_gpus |
| } |
| |
| print(f"Index created successfully!") |
| print(f" - Documents: {stats['documents_indexed']}") |
| print(f" - Dimensions: {stats['embedding_dimension']}") |
| print(f" - GPUs used: {stats['gpus_used']}") |
| |
| return stats |
|
|
|
|
| def search_index( |
| query: str, |
| top_k: int = 10, |
| model=MODEL, |
| encoder=recreated_enc |
| ) -> list[dict]: |
| """ |
| Search the index for similar sequences. |
| """ |
| if not INDEX_PATH.exists(): |
| raise FileNotFoundError("No index found. Create one first with create_index_from_dataframe()") |
| |
| |
| index = faiss.read_index(str(INDEX_PATH)) |
| with open(METADATA_PATH, "rb") as f: |
| metadata = pickle.load(f) |
| |
| |
| encodings = encoder.encode_ordinary(query) |
| query_tensor = torch.tensor([encodings]).long().to(device) |
| |
| with torch.no_grad(): |
| query_embedding = model(query_tensor, return_embeddings=True).mean(dim=1).cpu().numpy() |
| |
| query_embedding = query_embedding.astype(np.float32) |
| |
| |
| k = min(top_k, index.ntotal) |
| scores, indices = index.search(query_embedding, k) |
| |
| |
| results = [] |
| for score, idx in zip(scores[0], indices[0]): |
| if idx == -1: |
| continue |
| |
| row = metadata.iloc[idx].to_dict() |
| sequence = row.pop("__sequence__", "") |
| |
| results.append({ |
| "score": float(score), |
| "sequence": sequence, |
| "metadata": row |
| }) |
| |
| return results |
|
|
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Create semantic search index from CSV") |
| parser.add_argument("--sequence-column", "-c", default="seq_with_repeat_tokens", help="Column containing sequences") |
| parser.add_argument("--batch-size", "-b", type=int, default=8, help="Batch size per GPU") |
| |
| args = parser.parse_args() |
| |
| df = pd.read_parquet("/user/hassanahmed.hassan/u21055/.project/dir.project/towards_better_genomic_models/data/sample.parquet") |
| print(f"Loaded {len(df)} rows with columns: {list(df.columns)}") |
| |
| create_index_from_dataframe(df, args.sequence_column, MODEL, recreated_enc, batch_size=args.batch_size) |
|
|
|
|
| if __name__ == "__main__": |
| main() |