lfj-code / transfer /code /prompt_selection /prompt_selector.py
ethan1115's picture
Upload folder using huggingface_hub
0161e74 verified
"""Two-stage embedding-based prompt selection.
Stage 1: For each query batch, find top-K1 most similar control prompts
based on cosine similarity in embedding space.
Stage 2: Map those K1 control prompts to their predicted perturbation
embeddings, average, and find top-n_base_cells real perturbed
prompts closest to that average.
"""
from __future__ import annotations
import numpy as np
from sklearn.metrics.pairwise import cosine_similarity
def select_prompt_indices(
query_embeddings: np.ndarray,
batch_global_indices: np.ndarray,
prompt_ctrl_embeddings: np.ndarray,
predicted_pert_embeddings: np.ndarray,
prompt_pert_embeddings: np.ndarray,
n_base_cells: int,
top_k1: int = 512,
) -> np.ndarray:
"""Select best prompt indices for a single query batch.
Parameters
----------
query_embeddings : (N_query, D)
Precomputed embeddings for all query cells.
batch_global_indices : (batch_len,)
Indices into query_embeddings for the current batch.
prompt_ctrl_embeddings : (N_ctrl, D)
Embeddings of control prompt cells.
predicted_pert_embeddings : (N_ctrl, D)
Embeddings of predicted perturbation cells (1-to-1 with ctrl).
prompt_pert_embeddings : (N_pert, D)
Embeddings of real perturbed prompt cells.
n_base_cells : int
Number of prompt cells to select.
top_k1 : int
Number of control prompts to shortlist in stage 1.
Returns
-------
selected_idx : (n_base_cells,)
Indices into the prompt_pert AnnData for the selected prompts.
"""
# Mean embedding of current query batch
mean_query_emb = query_embeddings[batch_global_indices].mean(axis=0, keepdims=True)
# Stage 1: cosine similarity against control prompts
sim_ctrl = cosine_similarity(mean_query_emb, prompt_ctrl_embeddings)[0]
k1 = min(top_k1, len(sim_ctrl))
top_k1_idx = np.argpartition(sim_ctrl, -k1)[-k1:]
# Stage 2: average predicted perturbation embeddings of stage-1 hits
pred_emb_mean = predicted_pert_embeddings[top_k1_idx].mean(axis=0, keepdims=True)
# Cosine similarity against real perturbed prompts
sim_pert = cosine_similarity(pred_emb_mean, prompt_pert_embeddings)[0]
n_select = min(n_base_cells, len(sim_pert))
selected_idx = np.argpartition(sim_pert, -n_select)[-n_select:]
return selected_idx