| |
| """Evaluate Prompt Selection vs Random Baseline using cell-eval. |
| |
| Compares predicted B-cell perturbation results from two methods: |
| 1. Prompt Selection (embedding-based prompt ordering) |
| 2. Random Baseline (default Stack random prompt ordering) |
| |
| Usage: |
| python code/prompt_selection/evaluate_results.py --perturbation Dabrafenib |
| """ |
| from __future__ import annotations |
|
|
| import gc |
| import logging |
| import sys |
| from pathlib import Path |
|
|
| _THIS_DIR = Path(__file__).resolve().parent |
| _REPO_ROOT = _THIS_DIR.parents[1] |
| for _p in [ |
| str(_REPO_ROOT / "code" / "cell-eval" / "src"), |
| str(_THIS_DIR.parent), |
| ]: |
| if _p not in sys.path: |
| sys.path.insert(0, _p) |
|
|
| import argparse |
|
|
| import anndata as ad |
| import numpy as np |
| import polars as pl |
| from scipy.sparse import issparse |
|
|
| from cell_eval import MetricsEvaluator |
| from prompt_selection import config as cfg |
|
|
| logging.basicConfig( |
| level=logging.INFO, |
| format="%(asctime)s [%(name)s] %(levelname)s: %(message)s", |
| ) |
| LOGGER = logging.getLogger("evaluate_results") |
|
|
| |
| |
| |
| PERTURBATION_COL = "sm_name" |
| CONTROL_NAME = "Dimethyl Sulfoxide" |
| CELL_TYPE_FILTER = {"broad_cell_class": "lymphocyte of b lineage"} |
|
|
|
|
| |
| |
| |
|
|
| def build_real_combined(pert_name: str, output_dir: Path) -> ad.AnnData: |
| """Extract ground truth B cells (control + perturbation) from source data.""" |
| LOGGER.info("Loading source data: %s", cfg.SOURCE_ADATA) |
| source = ad.read_h5ad(str(cfg.SOURCE_ADATA)) |
| LOGGER.info("Source shape: %s", source.shape) |
|
|
| broad = source.obs["broad_cell_class"] |
| sm = source.obs[PERTURBATION_COL] |
| b_lineage = CELL_TYPE_FILTER["broad_cell_class"] |
|
|
| ctrl_mask = (broad == b_lineage) & (sm == CONTROL_NAME) |
| pert_mask = (broad == b_lineage) & (sm == pert_name) |
|
|
| control_B = source[ctrl_mask].copy() |
| real_pert_B = source[pert_mask].copy() |
| del source |
| gc.collect() |
|
|
| LOGGER.info("Control B cells (DMSO): %d", control_B.n_obs) |
| LOGGER.info("Real perturbed B cells (%s): %d", pert_name, real_pert_B.n_obs) |
|
|
| if real_pert_B.n_obs == 0: |
| raise ValueError(f"No ground truth B cells found for {pert_name}") |
|
|
| real_combined = ad.concat([control_B, real_pert_B], join="inner") |
| LOGGER.info( |
| "real_combined: %d cells, sm_name: %s", |
| real_combined.n_obs, |
| real_combined.obs[PERTURBATION_COL].value_counts().to_dict(), |
| ) |
| real_path = output_dir / "real_combined.h5ad" |
| real_combined.write_h5ad(real_path) |
| LOGGER.info("Saved real_combined to %s", real_path) |
| return real_combined |
|
|
|
|
| def build_pred_combined(pred_path: Path, label: str) -> ad.AnnData: |
| """Combine control B cells with predictions into a single AnnData.""" |
| ctrl = ad.read_h5ad(str(cfg.RESULTS_DIR / cfg.QUERY_CTRL_H5AD)) |
| pred = ad.read_h5ad(str(pred_path)) |
| LOGGER.info("[%s] Control: %d cells, Pred: %d cells", label, ctrl.n_obs, pred.n_obs) |
|
|
| combined = ad.concat([ctrl, pred], join="inner") |
| combined.obs_names_make_unique() |
| LOGGER.info( |
| "[%s] combined: %d cells, sm_name: %s", |
| label, combined.n_obs, |
| combined.obs[PERTURBATION_COL].value_counts().to_dict(), |
| ) |
| return combined |
|
|
|
|
| def align_genes(adata_pred: ad.AnnData, adata_real: ad.AnnData): |
| """Ensure pred and real have identical var_names in the same order.""" |
| common = adata_pred.var_names.intersection(adata_real.var_names) |
| if len(common) == 0: |
| raise ValueError("No common genes between predicted and real data") |
| LOGGER.info( |
| "Gene alignment: pred=%d, real=%d, common=%d", |
| adata_pred.n_vars, adata_real.n_vars, len(common), |
| ) |
| return adata_pred[:, common].copy(), adata_real[:, common].copy() |
|
|
|
|
| def densify_X(adata: ad.AnnData) -> None: |
| """Convert sparse X to dense in-place to avoid repeated sparse→dense.""" |
| if issparse(adata.X): |
| adata.X = np.asarray(adata.X.todense(), dtype=np.float32) |
|
|
|
|
| def evaluate_one(pred_path, real_combined, label, output_dir): |
| """Build pred_combined, align genes, run cell-eval, free memory.""" |
| LOGGER.info("=" * 60) |
| LOGGER.info("Evaluating: %s", label) |
| LOGGER.info("=" * 60) |
|
|
| pred_combined = build_pred_combined(pred_path, label) |
| pred_al, real_al = align_genes(pred_combined, real_combined) |
| del pred_combined |
| gc.collect() |
|
|
| LOGGER.info("[%s] Densifying expression matrices...", label) |
| densify_X(pred_al) |
| densify_X(real_al) |
|
|
| eval_dir = str(output_dir / f"celleval_{label}") |
| evaluator = MetricsEvaluator( |
| adata_pred=pred_al, |
| adata_real=real_al, |
| control_pert=CONTROL_NAME, |
| pert_col=PERTURBATION_COL, |
| outdir=eval_dir, |
| allow_discrete=True, |
| num_threads=4, |
| ) |
| results, agg_results = evaluator.compute( |
| profile="full", |
| write_csv=True, |
| break_on_error=False, |
| ) |
|
|
| del pred_al, real_al, evaluator |
| gc.collect() |
|
|
| return results, agg_results |
|
|
|
|
| |
| |
| |
|
|
| def main(): |
| parser = argparse.ArgumentParser(description="Evaluate Prompt Selection vs Random Baseline") |
| parser.add_argument( |
| "--perturbation", type=str, required=True, |
| help="Perturbation name (e.g., Dabrafenib).", |
| ) |
| parser.add_argument( |
| "--output-dir", type=Path, default=None, |
| help="Output directory for evaluation results (default: eval_results/<perturbation>).", |
| ) |
| args = parser.parse_args() |
|
|
| pert_name = args.perturbation |
| pcfg = cfg.get_pert_config(pert_name) |
|
|
| output_dir = args.output_dir if args.output_dir else pcfg.eval_dir |
| output_dir.mkdir(parents=True, exist_ok=True) |
|
|
| pred_ps_path = pcfg.results_dir / pcfg.final_result_h5ad |
| pred_bl_path = pcfg.baseline_dir / pcfg.baseline_result_h5ad |
|
|
| |
| LOGGER.info("Step 1: Preparing ground truth data for %s", pert_name) |
| real_combined = build_real_combined(pert_name, output_dir) |
|
|
| |
| agg_ps = None |
| agg_bl = None |
|
|
| if pred_ps_path.exists(): |
| try: |
| _, agg_ps = evaluate_one(pred_ps_path, real_combined, "prompt_selection", output_dir) |
| except Exception as e: |
| LOGGER.error("Evaluation (prompt_selection) failed: %s", e, exc_info=True) |
| else: |
| LOGGER.warning("Prompt selection result not found: %s", pred_ps_path) |
|
|
| if pred_bl_path.exists(): |
| try: |
| _, agg_bl = evaluate_one(pred_bl_path, real_combined, "baseline", output_dir) |
| except Exception as e: |
| LOGGER.error("Evaluation (baseline) failed: %s", e, exc_info=True) |
| else: |
| LOGGER.warning("Baseline result not found: %s", pred_bl_path) |
|
|
| |
| LOGGER.info("=" * 60) |
| LOGGER.info("Comparing results for %s", pert_name) |
| LOGGER.info("=" * 60) |
|
|
| if agg_ps is not None: |
| print(f"\n--- Prompt Selection ({pert_name}, aggregated) ---") |
| print(agg_ps) |
|
|
| if agg_bl is not None: |
| print(f"\n--- Random Baseline ({pert_name}, aggregated) ---") |
| print(agg_bl) |
|
|
| if agg_ps is not None and agg_bl is not None: |
| try: |
| mean_ps = agg_ps.filter(pl.col("statistic") == "mean").drop("statistic") |
| mean_bl = agg_bl.filter(pl.col("statistic") == "mean").drop("statistic") |
|
|
| ps_long = mean_ps.unpivot(variable_name="metric", value_name="prompt_selection") |
| bl_long = mean_bl.unpivot(variable_name="metric", value_name="random_baseline") |
|
|
| comparison = ps_long.join(bl_long, on="metric") |
| comparison = comparison.with_columns( |
| (pl.col("prompt_selection") - pl.col("random_baseline")).alias("diff") |
| ) |
|
|
| comparison_path = output_dir / "comparison_mean.csv" |
| comparison.write_csv(str(comparison_path)) |
|
|
| print("\n" + "=" * 70) |
| print(f"COMPARISON ({pert_name}): Prompt Selection vs Random Baseline (mean)") |
| print("=" * 70) |
| print(comparison) |
| print(f"\nSaved to: {comparison_path}") |
| except Exception as e: |
| LOGGER.warning("Could not build comparison table: %s", e) |
|
|
| LOGGER.info("Evaluation complete for %s. Results in %s", pert_name, output_dir) |
|
|
|
|
| if __name__ == "__main__": |
| main() |
|
|