lfj-code / transfer /code /prompt_selection /run_baseline.py
ethan1115's picture
Upload folder using huggingface_hub
0161e74 verified
"""Random baseline generation using Stack's get_incontext_generation().
Uses random prompt ordering (the default Stack behaviour) instead of
embedding-based selection. Provides a baseline for comparison.
Usage:
python code/prompt_selection/run_baseline.py --perturbation Dabrafenib
"""
from __future__ import annotations
import argparse
import gc
import logging
import sys
from pathlib import Path
_THIS_DIR = Path(__file__).resolve().parent
if str(_THIS_DIR.parent) not in sys.path:
sys.path.insert(0, str(_THIS_DIR.parent))
import anndata as ad
import numpy as np
import torch
from scipy.sparse import csr_matrix, issparse
from stack.model_loading import load_model_from_checkpoint
from prompt_selection import config as cfg
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(name)s] %(levelname)s: %(message)s",
)
LOGGER = logging.getLogger("prompt_selection.baseline")
def _filter_adata(adata: ad.AnnData, filters: dict) -> ad.AnnData:
"""Subset AnnData by column-value filters."""
mask = np.ones(adata.n_obs, dtype=bool)
for col, val in filters.items():
mask &= (adata.obs[col] == val).values
return adata[mask].copy()
def ensure_prompt_pert(pcfg: cfg.PertConfig):
"""Extract prompt_pert.h5ad if it doesn't exist yet."""
pcfg.results_dir.mkdir(parents=True, exist_ok=True)
pert_path = pcfg.results_dir / cfg.PROMPT_PERT_H5AD
if pert_path.exists():
return True
LOGGER.info("prompt_pert not found, extracting from source data...")
adata = ad.read_h5ad(str(cfg.SOURCE_ADATA))
pert = _filter_adata(adata, pcfg.prompt_pert_filter)
LOGGER.info("prompt_pert (%s): %d cells", pcfg.perturbation_name, pert.n_obs)
if pert.n_obs == 0:
LOGGER.warning("No T cells found for '%s'. Skipping.", pcfg.perturbation_name)
del adata, pert
gc.collect()
return False
pert.write_h5ad(pert_path)
del adata, pert
gc.collect()
return True
def main():
parser = argparse.ArgumentParser(description="Random Baseline Generation")
parser.add_argument(
"--perturbation", type=str, required=True,
help="Perturbation name (e.g., Dabrafenib).",
)
args = parser.parse_args()
pert_name = args.perturbation
pcfg = cfg.get_pert_config(pert_name)
LOGGER.info("=" * 60)
LOGGER.info("Random Baseline Generation — %s", pert_name)
LOGGER.info("=" * 60)
pcfg.baseline_dir.mkdir(parents=True, exist_ok=True)
output_path = pcfg.baseline_dir / pcfg.baseline_result_h5ad
if output_path.exists():
LOGGER.info("Baseline result already exists: %s — skipping.", output_path)
return
# Ensure prompt_pert data exists
has_data = ensure_prompt_pert(pcfg)
if not has_data:
LOGGER.warning("Skipping baseline for %s (no T cell data).", pert_name)
return
# --- Load model ---
LOGGER.info("Loading model: %s", cfg.ALIGNED_CKPT)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = load_model_from_checkpoint(
str(cfg.ALIGNED_CKPT),
model_class="ICL_FinetunedModel",
device=device,
)
# --- Load data ---
query_ctrl_path = str(cfg.RESULTS_DIR / cfg.QUERY_CTRL_H5AD)
prompt_pert_path = str(pcfg.results_dir / cfg.PROMPT_PERT_H5AD)
LOGGER.info("Query (test): %s", query_ctrl_path)
LOGGER.info("Prompt (base): %s", prompt_pert_path)
# --- Run random-prompt generation ---
LOGGER.info("Running get_incontext_generation (random prompt baseline)...")
result = model.get_incontext_generation(
base_adata_or_path=prompt_pert_path,
test_adata_or_path=query_ctrl_path,
genelist_path=str(cfg.GENELIST_PATH),
mode="mdm",
num_steps=cfg.NUM_STEPS,
prompt_ratio=cfg.PROMPT_RATIO,
context_ratio=cfg.CONTEXT_RATIO,
context_ratio_min=cfg.CONTEXT_RATIO_MIN,
batch_size=cfg.BATCH_SIZE,
num_workers=cfg.NUM_WORKERS,
)
if isinstance(result, tuple):
predictions, test_logit = result
else:
predictions, test_logit = result, None
# --- Build output AnnData ---
query_ctrl = ad.read_h5ad(query_ctrl_path)
if issparse(predictions):
pred_X = predictions
else:
pred_X = csr_matrix(np.asarray(predictions, dtype=np.float32))
result_adata = ad.AnnData(
X=pred_X,
obs=query_ctrl.obs.copy(),
var=query_ctrl.var.copy(),
)
result_adata.obs["sm_name"] = pert_name
result_adata.obs["control"] = False
if test_logit is not None:
result_adata.obs["gen_logit"] = np.asarray(test_logit)
result_adata.write_h5ad(output_path)
LOGGER.info("Saved baseline result: %s shape=%s", output_path, result_adata.shape)
LOGGER.info("=" * 60)
LOGGER.info("Random Baseline Generation — %s — Done", pert_name)
LOGGER.info("=" * 60)
if __name__ == "__main__":
main()