| """Main pipeline: embedding-based prompt selection for Stack generation. |
| |
| Supports two modes: |
| --shared-only : Run shared steps only (download ckpt, extract query_ctrl |
| & prompt_ctrl, compute their embeddings). |
| --perturbation X: Run per-perturbation steps (extract prompt_pert, bridge |
| prediction, pert embeddings, custom generation). |
| |
| Execution order for per-perturbation (3 model loads): |
| Step 0' : Extract prompt_pert from source AnnData |
| Load 1 : bc_large_aligned.ckpt → bridge prediction → release |
| Load 2 : bc_large.ckpt → pert embeddings → release |
| Load 3 : bc_large_aligned.ckpt → custom generation → release |
| """ |
| from __future__ import annotations |
|
|
| import argparse |
| import gc |
| import logging |
| import sys |
| from pathlib import Path |
|
|
| _THIS_DIR = Path(__file__).resolve().parent |
| if str(_THIS_DIR.parent) not in sys.path: |
| sys.path.insert(0, str(_THIS_DIR.parent)) |
|
|
| import anndata as ad |
| import numpy as np |
| import torch |
| from scipy.sparse import csr_matrix, issparse |
|
|
| from stack.model_loading import load_model_from_checkpoint |
|
|
| from prompt_selection import config as cfg |
| from prompt_selection.custom_generation import custom_generation_loop |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s [%(name)s] %(levelname)s: %(message)s", |
| ) |
| LOGGER = logging.getLogger("prompt_selection.pipeline") |
|
|
|
|
| |
| |
| |
|
|
| def _release_model(model): |
| """Delete model and free GPU memory.""" |
| del model |
| gc.collect() |
| if torch.cuda.is_available(): |
| torch.cuda.empty_cache() |
|
|
|
|
| def _filter_adata(adata: ad.AnnData, filters: dict) -> ad.AnnData: |
| """Subset AnnData by column-value filters.""" |
| mask = np.ones(adata.n_obs, dtype=bool) |
| for col, val in filters.items(): |
| mask &= (adata.obs[col] == val).values |
| return adata[mask].copy() |
|
|
|
|
| |
| |
| |
|
|
| def step0_download_checkpoint(): |
| """Download bc_large.ckpt from HuggingFace if not present.""" |
| if cfg.EMBED_CKPT.exists(): |
| LOGGER.info("Embedding checkpoint already exists: %s", cfg.EMBED_CKPT) |
| return |
|
|
| LOGGER.info("Downloading %s from HuggingFace...", cfg.HF_EMBED_REPO) |
| from huggingface_hub import snapshot_download |
|
|
| snapshot_download( |
| repo_id=cfg.HF_EMBED_REPO, |
| local_dir=str(cfg.EMBED_MODEL_DIR), |
| allow_patterns=["bc_large.ckpt"], |
| ) |
| if not cfg.EMBED_CKPT.exists(): |
| import glob |
| matches = glob.glob(str(cfg.EMBED_MODEL_DIR / "**" / "bc_large.ckpt"), recursive=True) |
| if matches: |
| Path(matches[0]).rename(cfg.EMBED_CKPT) |
| assert cfg.EMBED_CKPT.exists(), f"Failed to download {cfg.EMBED_CKPT}" |
| LOGGER.info("Downloaded embedding checkpoint to %s", cfg.EMBED_CKPT) |
|
|
|
|
| def step0_extract_shared_subsets(): |
| """Extract query_ctrl and prompt_ctrl (shared across all perturbations).""" |
| cfg.RESULTS_DIR.mkdir(parents=True, exist_ok=True) |
|
|
| query_path = cfg.RESULTS_DIR / cfg.QUERY_CTRL_H5AD |
| ctrl_path = cfg.RESULTS_DIR / cfg.PROMPT_CTRL_H5AD |
|
|
| if query_path.exists() and ctrl_path.exists(): |
| LOGGER.info("Shared cell subsets already extracted, skipping.") |
| return |
|
|
| LOGGER.info("Loading source AnnData: %s", cfg.SOURCE_ADATA) |
| adata = ad.read_h5ad(str(cfg.SOURCE_ADATA)) |
|
|
| if not query_path.exists(): |
| query = _filter_adata(adata, cfg.QUERY_FILTER) |
| LOGGER.info("query_ctrl: %d cells", query.n_obs) |
| query.write_h5ad(query_path) |
| del query |
|
|
| if not ctrl_path.exists(): |
| ctrl = _filter_adata(adata, cfg.PROMPT_CTRL_FILTER) |
| LOGGER.info("prompt_ctrl: %d cells", ctrl.n_obs) |
| ctrl.write_h5ad(ctrl_path) |
| del ctrl |
|
|
| del adata |
| gc.collect() |
|
|
|
|
| def step_extract_shared_embeddings(): |
| """Extract embeddings for query_ctrl and prompt_ctrl (shared).""" |
| emb_files = [ |
| (cfg.QUERY_EMB_NPY, cfg.QUERY_CTRL_H5AD), |
| (cfg.PROMPT_CTRL_EMB_NPY, cfg.PROMPT_CTRL_H5AD), |
| ] |
|
|
| all_exist = all((cfg.RESULTS_DIR / npy).exists() for npy, _ in emb_files) |
| if all_exist: |
| LOGGER.info("Shared embeddings already exist, skipping.") |
| return |
|
|
| LOGGER.info("=== Loading bc_large.ckpt for shared embedding extraction ===") |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| model = load_model_from_checkpoint(str(cfg.EMBED_CKPT), device=device) |
|
|
| for npy_name, h5ad_name in emb_files: |
| npy_path = cfg.RESULTS_DIR / npy_name |
| if npy_path.exists(): |
| LOGGER.info(" %s already exists, skipping.", npy_name) |
| continue |
|
|
| h5ad_path = str(cfg.RESULTS_DIR / h5ad_name) |
| LOGGER.info(" Extracting embeddings: %s -> %s", h5ad_name, npy_name) |
|
|
| embeddings, _ = model.get_latent_representation( |
| adata_path=h5ad_path, |
| genelist_path=str(cfg.GENELIST_PATH), |
| batch_size=cfg.BATCH_SIZE, |
| num_workers=cfg.NUM_WORKERS, |
| show_progress=True, |
| ) |
|
|
| np.save(npy_path, embeddings) |
| LOGGER.info(" Saved: %s, shape=%s", npy_path, embeddings.shape) |
|
|
| _release_model(model) |
|
|
|
|
| def run_shared_steps(): |
| """Run all shared steps (checkpoint download, subset extraction, embeddings).""" |
| LOGGER.info("=" * 60) |
| LOGGER.info("Running shared steps") |
| LOGGER.info("=" * 60) |
|
|
| step0_download_checkpoint() |
| step0_extract_shared_subsets() |
| step_extract_shared_embeddings() |
|
|
| LOGGER.info("Shared steps complete.") |
|
|
|
|
| |
| |
| |
|
|
| def step_extract_prompt_pert(pcfg: cfg.PertConfig): |
| """Extract prompt_pert (T cells + drug X) for a specific perturbation.""" |
| pcfg.results_dir.mkdir(parents=True, exist_ok=True) |
| pert_path = pcfg.results_dir / cfg.PROMPT_PERT_H5AD |
|
|
| if pert_path.exists(): |
| LOGGER.info("prompt_pert already exists for %s, skipping.", pcfg.perturbation_name) |
| return True |
|
|
| LOGGER.info("Loading source AnnData: %s", cfg.SOURCE_ADATA) |
| adata = ad.read_h5ad(str(cfg.SOURCE_ADATA)) |
|
|
| pert = _filter_adata(adata, pcfg.prompt_pert_filter) |
| LOGGER.info("prompt_pert (%s): %d cells", pcfg.perturbation_name, pert.n_obs) |
|
|
| if pert.n_obs == 0: |
| LOGGER.warning("No T cells found for perturbation '%s'. Skipping.", pcfg.perturbation_name) |
| (pcfg.results_dir / "SKIPPED_no_tcells.txt").touch() |
| del adata, pert |
| gc.collect() |
| return False |
|
|
| pert.write_h5ad(pert_path) |
|
|
| del adata, pert |
| gc.collect() |
| return True |
|
|
|
|
| def step_bridge_prediction(pcfg: cfg.PertConfig): |
| """Generate predicted perturbation for control T cells using aligned model.""" |
| pred_path = pcfg.results_dir / cfg.PREDICTED_PERT_H5AD |
| if pred_path.exists(): |
| LOGGER.info("Predicted perturbation already exists for %s, skipping.", pcfg.perturbation_name) |
| return |
|
|
| LOGGER.info("=== Load 1: bc_large_aligned.ckpt for bridge prediction (%s) ===", pcfg.perturbation_name) |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| model = load_model_from_checkpoint( |
| str(cfg.ALIGNED_CKPT), |
| model_class="ICL_FinetunedModel", |
| device=device, |
| ) |
|
|
| prompt_pert_path = str(pcfg.results_dir / cfg.PROMPT_PERT_H5AD) |
| prompt_ctrl_path = str(cfg.RESULTS_DIR / cfg.PROMPT_CTRL_H5AD) |
|
|
| LOGGER.info("Running bridge prediction: prompt_pert → prompt_ctrl") |
| result = model.get_incontext_generation( |
| base_adata_or_path=prompt_pert_path, |
| test_adata_or_path=prompt_ctrl_path, |
| genelist_path=str(cfg.GENELIST_PATH), |
| mode="mdm", |
| num_steps=cfg.NUM_STEPS, |
| prompt_ratio=cfg.PROMPT_RATIO, |
| context_ratio=cfg.CONTEXT_RATIO, |
| context_ratio_min=cfg.CONTEXT_RATIO_MIN, |
| batch_size=cfg.BATCH_SIZE, |
| num_workers=cfg.NUM_WORKERS, |
| ) |
| pred_X, _ = result |
|
|
| _release_model(model) |
|
|
| ctrl_adata = ad.read_h5ad(prompt_ctrl_path) |
| if issparse(pred_X): |
| pred_X_dense = pred_X.toarray() |
| else: |
| pred_X_dense = np.asarray(pred_X) |
|
|
| pred_adata = ad.AnnData( |
| X=csr_matrix(pred_X_dense.astype(np.float32)), |
| obs=ctrl_adata.obs.copy(), |
| var=ctrl_adata.var.copy(), |
| ) |
| pred_adata.obs["sm_name"] = pcfg.perturbation_name |
| pred_adata.write_h5ad(pred_path) |
|
|
| LOGGER.info("Saved predicted perturbation: %s (%d cells)", pred_path, pred_adata.n_obs) |
| del ctrl_adata, pred_adata, pred_X, pred_X_dense |
| gc.collect() |
|
|
|
|
| def step_extract_pert_embeddings(pcfg: cfg.PertConfig): |
| """Extract embeddings for prompt_pert and predicted_pert.""" |
| emb_files = [ |
| (cfg.PROMPT_PERT_EMB_NPY, cfg.PROMPT_PERT_H5AD), |
| (cfg.PREDICTED_PERT_EMB_NPY, cfg.PREDICTED_PERT_H5AD), |
| ] |
|
|
| all_exist = all((pcfg.results_dir / npy).exists() for npy, _ in emb_files) |
| if all_exist: |
| LOGGER.info("Pert embeddings already exist for %s, skipping.", pcfg.perturbation_name) |
| return |
|
|
| LOGGER.info("=== Load 2: bc_large.ckpt for pert embedding extraction (%s) ===", pcfg.perturbation_name) |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| model = load_model_from_checkpoint(str(cfg.EMBED_CKPT), device=device) |
|
|
| for npy_name, h5ad_name in emb_files: |
| npy_path = pcfg.results_dir / npy_name |
| if npy_path.exists(): |
| LOGGER.info(" %s already exists, skipping.", npy_name) |
| continue |
|
|
| h5ad_path = str(pcfg.results_dir / h5ad_name) |
| LOGGER.info(" Extracting embeddings: %s -> %s", h5ad_name, npy_name) |
|
|
| embeddings, _ = model.get_latent_representation( |
| adata_path=h5ad_path, |
| genelist_path=str(cfg.GENELIST_PATH), |
| batch_size=cfg.BATCH_SIZE, |
| num_workers=cfg.NUM_WORKERS, |
| show_progress=True, |
| ) |
|
|
| np.save(npy_path, embeddings) |
| LOGGER.info(" Saved: %s, shape=%s", npy_path, embeddings.shape) |
|
|
| _release_model(model) |
|
|
|
|
| def step_custom_generation(pcfg: cfg.PertConfig): |
| """Run MDM generation with per-step embedding-based prompt selection.""" |
| final_path = pcfg.results_dir / pcfg.final_result_h5ad |
| if final_path.exists(): |
| LOGGER.info("Final result already exists for %s, skipping.", pcfg.perturbation_name) |
| return |
|
|
| LOGGER.info("=== Load 3: bc_large_aligned.ckpt for custom generation (%s) ===", pcfg.perturbation_name) |
| device = torch.device("cuda" if torch.cuda.is_available() else "cpu") |
| model = load_model_from_checkpoint( |
| str(cfg.ALIGNED_CKPT), |
| model_class="ICL_FinetunedModel", |
| device=device, |
| ) |
|
|
| |
| query_adata = ad.read_h5ad(str(cfg.RESULTS_DIR / cfg.QUERY_CTRL_H5AD)) |
| prompt_pert_adata = ad.read_h5ad(str(pcfg.results_dir / cfg.PROMPT_PERT_H5AD)) |
|
|
| |
| query_emb = np.load(cfg.RESULTS_DIR / cfg.QUERY_EMB_NPY) |
| prompt_ctrl_emb = np.load(cfg.RESULTS_DIR / cfg.PROMPT_CTRL_EMB_NPY) |
| predicted_pert_emb = np.load(pcfg.results_dir / cfg.PREDICTED_PERT_EMB_NPY) |
| prompt_pert_emb = np.load(pcfg.results_dir / cfg.PROMPT_PERT_EMB_NPY) |
|
|
| LOGGER.info("Embeddings loaded: query=%s, ctrl=%s, pred_pert=%s, pert=%s", |
| query_emb.shape, prompt_ctrl_emb.shape, |
| predicted_pert_emb.shape, prompt_pert_emb.shape) |
|
|
| result, final_logit = custom_generation_loop( |
| model=model, |
| query_adata=query_adata, |
| prompt_pert_adata=prompt_pert_adata, |
| genelist_path=str(cfg.GENELIST_PATH), |
| query_embeddings=query_emb, |
| prompt_ctrl_embeddings=prompt_ctrl_emb, |
| predicted_pert_embeddings=predicted_pert_emb, |
| prompt_pert_embeddings=prompt_pert_emb, |
| num_steps=cfg.NUM_STEPS, |
| prompt_ratio=cfg.PROMPT_RATIO, |
| context_ratio=cfg.CONTEXT_RATIO, |
| context_ratio_min=cfg.CONTEXT_RATIO_MIN, |
| top_k1=cfg.TOP_K1, |
| batch_size=cfg.BATCH_SIZE, |
| num_workers=cfg.NUM_WORKERS, |
| ) |
|
|
| _release_model(model) |
|
|
| query_ctrl_clean = ad.read_h5ad(str(cfg.RESULTS_DIR / cfg.QUERY_CTRL_H5AD)) |
| result_adata = ad.AnnData( |
| X=result, |
| obs=query_ctrl_clean.obs.copy(), |
| var=query_ctrl_clean.var.copy(), |
| ) |
| result_adata.obs["sm_name"] = pcfg.perturbation_name |
| result_adata.obs["control"] = False |
| result_adata.obs["gen_logit"] = np.asarray(final_logit) |
|
|
| result_adata.write_h5ad(final_path) |
| LOGGER.info("Saved final result: %s (%d cells)", final_path, result_adata.n_obs) |
|
|
|
|
| def run_perturbation(pert_name: str): |
| """Run all per-perturbation steps for a given drug.""" |
| pcfg = cfg.get_pert_config(pert_name) |
|
|
| LOGGER.info("=" * 60) |
| LOGGER.info("Running perturbation: %s", pert_name) |
| LOGGER.info("=" * 60) |
|
|
| |
| for shared_file in [cfg.QUERY_CTRL_H5AD, cfg.PROMPT_CTRL_H5AD, |
| cfg.QUERY_EMB_NPY, cfg.PROMPT_CTRL_EMB_NPY]: |
| if not (cfg.RESULTS_DIR / shared_file).exists(): |
| raise FileNotFoundError( |
| f"Shared file {shared_file} not found. Run --shared-only first." |
| ) |
|
|
| |
| has_data = step_extract_prompt_pert(pcfg) |
| if not has_data: |
| LOGGER.warning("Skipping %s (no T cell data).", pert_name) |
| return |
|
|
| |
| step_bridge_prediction(pcfg) |
|
|
| |
| step_extract_pert_embeddings(pcfg) |
|
|
| |
| step_custom_generation(pcfg) |
|
|
| LOGGER.info("=" * 60) |
| LOGGER.info("Perturbation %s complete!", pert_name) |
| LOGGER.info("Final result: %s", pcfg.results_dir / pcfg.final_result_h5ad) |
| LOGGER.info("=" * 60) |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser( |
| description="Prompt Selection Pipeline for Stack generation." |
| ) |
| parser.add_argument( |
| "--perturbation", type=str, default=None, |
| help="Perturbation name (e.g., Dabrafenib). Required unless --shared-only.", |
| ) |
| parser.add_argument( |
| "--shared-only", action="store_true", |
| help="Only run shared steps (extract query_ctrl, prompt_ctrl, their embeddings).", |
| ) |
| args = parser.parse_args() |
|
|
| if args.shared_only: |
| run_shared_steps() |
| elif args.perturbation: |
| run_perturbation(args.perturbation) |
| else: |
| parser.error("Either --perturbation or --shared-only is required.") |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|