ethan1115 commited on
Commit
ea634b4
·
verified ·
1 Parent(s): e2409ff

Upload folder using huggingface_hub

Browse files
GRN/grn_sacfm/_bootstrap_scdfm.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Bootstrap scDFM imports by creating missing __init__.py files and loading
3
+ its modules under a 'scdfm_src' prefix in sys.modules.
4
+
5
+ This module MUST be imported before any CCFM src imports.
6
+ """
7
+
8
+ import sys
9
+ import os
10
+ import types
11
+
12
+ _SCDFM_ROOT = os.path.normpath(
13
+ os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", "transfer", "code", "scDFM")
14
+ )
15
+
16
+ # Directories in scDFM that need __init__.py to be proper packages
17
+ _DIRS_NEEDING_INIT = [
18
+ "src",
19
+ "src/models",
20
+ "src/models/origin",
21
+ "src/data_process",
22
+ "src/tokenizer",
23
+ "src/script",
24
+ "src/models/perturbation",
25
+ ]
26
+
27
+
28
+ def _ensure_init_files():
29
+ """Create missing __init__.py files in scDFM so it can be imported as packages."""
30
+ created = []
31
+ for d in _DIRS_NEEDING_INIT:
32
+ init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py")
33
+ if not os.path.exists(init_path):
34
+ with open(init_path, "w") as f:
35
+ f.write("# Auto-created by CCFM bootstrap\n")
36
+ created.append(init_path)
37
+ return created
38
+
39
+
40
+ def bootstrap():
41
+ """Load scDFM's src package as 'scdfm_src' in sys.modules."""
42
+ if "scdfm_src" in sys.modules:
43
+ return # Already bootstrapped
44
+
45
+ # Create missing __init__.py files
46
+ _ensure_init_files()
47
+
48
+ # Save CCFM's src modules
49
+ saved = {}
50
+ for key in list(sys.modules.keys()):
51
+ if key == "src" or key.startswith("src."):
52
+ saved[key] = sys.modules.pop(key)
53
+
54
+ # Add scDFM root to path
55
+ sys.path.insert(0, _SCDFM_ROOT)
56
+
57
+ try:
58
+ # Import scDFM modules (their relative imports work now)
59
+ import src as scdfm_src_pkg
60
+ import src.models
61
+ import src.models.origin
62
+ import src.models.origin.blocks
63
+ import src.models.origin.layers
64
+ import src.models.origin.model
65
+ import src.flow_matching
66
+ import src.flow_matching.path
67
+ import src.flow_matching.path.path
68
+ import src.flow_matching.path.path_sample
69
+ import src.flow_matching.path.affine
70
+ import src.flow_matching.path.scheduler
71
+ import src.flow_matching.path.scheduler.scheduler
72
+ # Skip src.flow_matching.ot (requires 'ot' package, not needed for CCFM)
73
+ import src.utils
74
+ import src.utils.utils
75
+ import src.tokenizer
76
+ import src.tokenizer.gene_tokenizer
77
+ # Skip src.data_process (has heavy deps like bs4, rdkit)
78
+ # We handle data loading separately in CCFM
79
+
80
+ # Re-register all under scdfm_src.* prefix
81
+ for key in list(sys.modules.keys()):
82
+ if key == "src" or key.startswith("src."):
83
+ new_key = "scdfm_" + key
84
+ sys.modules[new_key] = sys.modules[key]
85
+
86
+ finally:
87
+ # Remove scDFM's src.* entries
88
+ for key in list(sys.modules.keys()):
89
+ if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"):
90
+ del sys.modules[key]
91
+
92
+ # Restore CCFM's src modules
93
+ for key, mod in saved.items():
94
+ sys.modules[key] = mod
95
+
96
+ # Remove scDFM from front of path
97
+ if _SCDFM_ROOT in sys.path:
98
+ sys.path.remove(_SCDFM_ROOT)
99
+
100
+
101
+ bootstrap()
GRN/grn_sacfm/config/__init__.py ADDED
File without changes
GRN/grn_sacfm/config/config_sacfm.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """SA-CFM config — simplified from SBConfig, no learned sigma / score / SDE."""
2
+
3
+ import os
4
+ from dataclasses import dataclass
5
+
6
+
7
+ @dataclass
8
+ class SACFMConfig:
9
+ # === Base model ===
10
+ model_type: str = "sacfm"
11
+ batch_size: int = 48
12
+ ntoken: int = 512
13
+ d_model: int = 128
14
+ nhead: int = 8
15
+ nlayers: int = 4
16
+ d_hid: int = 512
17
+ lr: float = 5e-5
18
+ steps: int = 200000
19
+ eta_min: float = 1e-6
20
+ devices: str = "1"
21
+ test_only: bool = False
22
+
23
+ # === Data ===
24
+ data_name: str = "norman"
25
+ perturbation_function: str = "crisper"
26
+ noise_type: str = "Gaussian"
27
+ print_every: int = 5000
28
+ mode: str = "predict_y"
29
+ result_path: str = "/home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/sacfm"
30
+ fusion_method: str = "differential_perceiver"
31
+ infer_top_gene: int = 1000
32
+ n_top_genes: int = 5000
33
+ checkpoint_path: str = ""
34
+
35
+ # === MMD loss ===
36
+ gamma: float = 0.5
37
+ use_mmd_loss: bool = True
38
+
39
+ # === Data split ===
40
+ split_method: str = "additive"
41
+ fold: int = 1
42
+ use_negative_edge: bool = True
43
+ topk: int = 30
44
+
45
+ # === SA-CFM specific ===
46
+ sigma_scale: float = 0.3 # sigma_aug = sigma_scale * gene_pert_std
47
+ sigma_min_clip: float = 0.01 # lower bound for sigma_aug
48
+ sigma_max_clip: float = 2.0 # upper bound for sigma_aug
49
+ gene_weight_alpha: float = 2.0 # gene importance weight strength
50
+ use_ot: bool = False # use OT coupling (default off, same as scDFM)
51
+
52
+ # === Training ===
53
+ ema_decay: float = 0.9999
54
+ warmup_steps: int = 2000
55
+
56
+ # === ODE inference ===
57
+ ode_method: str = "rk4"
58
+ ode_steps: int = 20
59
+ eval_batch_size: int = 32
60
+
61
+ # === Experiment ===
62
+ exp_name: str = "sacfm_baseline"
63
+
64
+ def __post_init__(self):
65
+ if self.data_name == "norman":
66
+ self.n_top_genes = 5000
67
+
68
+ def make_path(self):
69
+ name = (
70
+ f"sacfm-{self.data_name}-f{self.fold}"
71
+ f"-d{self.d_model}-ss{self.sigma_scale}-gw{self.gene_weight_alpha}"
72
+ )
73
+ if self.exp_name:
74
+ name = self.exp_name
75
+ return os.path.join(self.result_path, name)
GRN/grn_sacfm/run_sacfm.sh ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #PJM -L rscgrp=b-batch
3
+ #PJM -L gpu=1
4
+ #PJM -L elapse=12:00:00
5
+ #PJM -j
6
+ #PJM -N sacfm
7
+
8
+ export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
9
+
10
+ source /home/hp250092/ku50001222/qian/aivc/lfj/stack_env/bin/activate
11
+ cd /home/hp250092/ku50001222/qian/aivc/lfj/GRN/grn_sacfm
12
+
13
+ python scripts/run_sacfm.py \
14
+ --d-model 128 \
15
+ --nhead 8 \
16
+ --nlayers 4 \
17
+ --d-hid 512 \
18
+ --batch-size 48 \
19
+ --lr 5e-5 \
20
+ --steps 200000 \
21
+ --fusion-method differential_perceiver \
22
+ --perturbation-function crisper \
23
+ --data-name norman \
24
+ --infer-top-gene 1000 \
25
+ --n-top-genes 5000 \
26
+ --noise-type Gaussian \
27
+ --gamma 0.5 \
28
+ --use-mmd-loss \
29
+ --split-method additive \
30
+ --fold 1 \
31
+ --use-negative-edge \
32
+ --topk 30 \
33
+ --ema-decay 0.9999 \
34
+ --warmup-steps 2000 \
35
+ --ode-method rk4 \
36
+ --ode-steps 20 \
37
+ --sigma-scale 0.3 \
38
+ --gene-weight-alpha 2.0 \
39
+ --result-path /home/hp250092/ku50001222/qian/aivc/lfj/GRN/result/sacfm \
40
+ --exp-name sacfm_ss03_gw2
GRN/grn_sacfm/scripts/run_sacfm.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Training and evaluation entry point for SA-CFM.
3
+
4
+ Source-Anchored Conditional Flow Matching:
5
+ - x_0 = source + data-driven sigma * eps (not pure noise)
6
+ - Standard affine flow matching (no bridge / SDE)
7
+ - Gene-weighted velocity loss
8
+ - ODE inference from clean source
9
+ """
10
+
11
+ import sys
12
+ import os
13
+
14
+ _PROJECT_ROOT = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
15
+ sys.path.insert(0, _PROJECT_ROOT)
16
+
17
+ import _bootstrap_scdfm # noqa: F401
18
+
19
+ import copy
20
+ import csv
21
+ import torch
22
+ import tyro
23
+ import tqdm
24
+ import numpy as np
25
+ import pandas as pd
26
+ import anndata as ad
27
+ from torch.utils.data import DataLoader
28
+ from tqdm import trange
29
+ from accelerate import Accelerator, DistributedDataParallelKwargs
30
+ from torch.optim.lr_scheduler import LinearLR, CosineAnnealingLR, SequentialLR
31
+ from torch.utils.tensorboard import SummaryWriter
32
+
33
+ from config.config_sacfm import SACFMConfig as Config
34
+ from src.data.data import get_data_classes
35
+ from src._scdfm_imports import ScDFMModel, save_checkpoint, load_checkpoint, process_vocab, GeneVocab
36
+ from src.denoiser import SACFMDenoiser
37
+ from cell_eval import MetricsEvaluator
38
+
39
+ _REPO_ROOT = os.path.normpath(os.path.join(_PROJECT_ROOT, "..", "..", "transfer", "code"))
40
+
41
+
42
+ def compute_gene_stats(train_sampler, config, device):
43
+ """Pre-compute per-gene perturbation statistics from training data."""
44
+ adata = train_sampler.adata
45
+ ctrl_mask = adata.obs["is_control"].values.astype(bool)
46
+ ctrl_X = adata[ctrl_mask].X
47
+ ctrl_mean = np.asarray(ctrl_X.mean(axis=0)).flatten()
48
+
49
+ pert_names = adata.obs["perturbation_covariates"].unique()
50
+ pert_names = [p for p in pert_names if p != "control+control"]
51
+
52
+ pert_effects = []
53
+ for pn in pert_names:
54
+ mask = adata.obs["perturbation_covariates"] == pn
55
+ pert_mean = np.asarray(adata[mask].X.mean(axis=0)).flatten()
56
+ pert_effects.append(pert_mean - ctrl_mean)
57
+
58
+ pert_effects = np.stack(pert_effects) # (n_pert, G)
59
+ gene_pert_std = pert_effects.std(axis=0) # (G,)
60
+
61
+ # Sigma augmentation: scaled per-gene perturbation std
62
+ sigma_aug = np.clip(
63
+ config.sigma_scale * gene_pert_std,
64
+ config.sigma_min_clip, config.sigma_max_clip,
65
+ )
66
+ sigma_aug = torch.tensor(sigma_aug, dtype=torch.float32, device=device)
67
+
68
+ # Gene importance weight: upweight DE genes
69
+ median_std = np.median(gene_pert_std[gene_pert_std > 0])
70
+ gene_weight = 1.0 + config.gene_weight_alpha * (gene_pert_std / max(median_std, 1e-8))
71
+ gene_weight = torch.tensor(gene_weight, dtype=torch.float32, device=device)
72
+
73
+ return sigma_aug, gene_weight
74
+
75
+
76
+ @torch.inference_mode()
77
+ def test(data_sampler, denoiser, accelerator, config, vocab, data_manager,
78
+ batch_size=32, path_dir="./"):
79
+ """Evaluate: generate predictions and compute cell-eval metrics."""
80
+ device = accelerator.device
81
+ gene_ids_test = vocab.encode(list(data_sampler.adata.var_names))
82
+ gene_ids_test = torch.tensor(gene_ids_test, dtype=torch.long, device=device)
83
+
84
+ perturbation_name_list = data_sampler._perturbation_covariates
85
+ control_data = data_sampler.get_control_data()
86
+ inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
87
+
88
+ all_pred = [control_data["src_cell_data"]]
89
+ obs_pred = ["control"] * control_data["src_cell_data"].shape[0]
90
+ all_real = [control_data["src_cell_data"]]
91
+ obs_real = ["control"] * control_data["src_cell_data"].shape[0]
92
+
93
+ for pert_name in perturbation_name_list:
94
+ pert_data = data_sampler.get_perturbation_data(pert_name)
95
+ target = pert_data["tgt_cell_data"]
96
+ pert_id = pert_data["condition_id"].to(device)
97
+ source = control_data["src_cell_data"].to(device)
98
+
99
+ if config.perturbation_function == "crisper":
100
+ pert_name_crisper = [
101
+ inverse_dict[int(p)] for p in pert_id[0].cpu().numpy()
102
+ ]
103
+ pert_id = torch.tensor(
104
+ vocab.encode(pert_name_crisper), dtype=torch.long, device=device
105
+ ).repeat(source.shape[0], 1)
106
+
107
+ idx = torch.randperm(source.shape[0])
108
+ source = source[idx][:128]
109
+
110
+ preds = []
111
+ for i in trange(0, 128, batch_size, desc=pert_name):
112
+ bs = source[i:i+batch_size]
113
+ bp = pert_id[0].repeat(bs.shape[0], 1).to(device)
114
+ base_denoiser = denoiser.module if hasattr(denoiser, "module") else denoiser
115
+ pred = base_denoiser.generate(
116
+ bs, bp, gene_ids_test,
117
+ steps=config.ode_steps,
118
+ method=config.ode_method,
119
+ )
120
+ preds.append(pred)
121
+
122
+ preds = torch.cat(preds, 0).cpu().numpy()
123
+ all_pred.append(preds)
124
+ all_real.append(target)
125
+ obs_pred.extend([pert_name] * preds.shape[0])
126
+ obs_real.extend([pert_name] * target.shape[0])
127
+
128
+ all_pred = np.concatenate(all_pred, 0)
129
+ all_real = np.concatenate(all_real, 0)
130
+ pred_adata = ad.AnnData(X=all_pred, obs=pd.DataFrame({"perturbation": obs_pred}))
131
+ real_adata = ad.AnnData(X=all_real, obs=pd.DataFrame({"perturbation": obs_real}))
132
+
133
+ eval_score = None
134
+ if accelerator.is_main_process:
135
+ evaluator = MetricsEvaluator(
136
+ adata_pred=pred_adata, adata_real=real_adata,
137
+ control_pert="control", pert_col="perturbation", num_threads=32,
138
+ )
139
+ results, agg_results = evaluator.compute()
140
+ results.write_csv(os.path.join(path_dir, "results.csv"))
141
+ agg_results.write_csv(os.path.join(path_dir, "agg_results.csv"))
142
+ pred_adata.write_h5ad(os.path.join(path_dir, "pred.h5ad"))
143
+ real_adata.write_h5ad(os.path.join(path_dir, "real.h5ad"))
144
+ df = agg_results.to_pandas()
145
+ for m in ("pearson_delta", "mse", "pr_auc"):
146
+ if m in df.columns and df[m].notna().any():
147
+ eval_score = float(df[m].iloc[0])
148
+ break
149
+ if eval_score is not None:
150
+ print(f"Eval score: {eval_score:.4f}")
151
+
152
+ return eval_score
153
+
154
+
155
+ if __name__ == "__main__":
156
+ config = tyro.cli(Config)
157
+
158
+ ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
159
+ accelerator = Accelerator(kwargs_handlers=[ddp_kwargs])
160
+ if accelerator.is_main_process:
161
+ print(config)
162
+ save_path = config.make_path()
163
+ os.makedirs(save_path, exist_ok=True)
164
+ device = accelerator.device
165
+
166
+ # === Data loading ===
167
+ Data, PerturbationDataset, TrainSampler, TestDataset = get_data_classes()
168
+ scdfm_data_path = os.path.join(_REPO_ROOT, "scDFM", "data")
169
+ data_manager = Data(scdfm_data_path)
170
+ data_manager.load_data(config.data_name)
171
+
172
+ if "gene_name" in data_manager.adata.var.columns and data_manager.adata.var_names[0].startswith("ENSG"):
173
+ data_manager.adata.var_names = data_manager.adata.var["gene_name"].values
174
+ data_manager.adata.var_names_make_unique()
175
+
176
+ data_manager.process_data(
177
+ n_top_genes=config.n_top_genes,
178
+ split_method=config.split_method,
179
+ fold=config.fold,
180
+ use_negative_edge=config.use_negative_edge,
181
+ k=config.topk,
182
+ )
183
+ train_sampler, valid_sampler, _ = data_manager.load_flow_data(batch_size=config.batch_size)
184
+
185
+ # === Pre-compute per-gene statistics ===
186
+ if accelerator.is_main_process:
187
+ print("Computing per-gene perturbation statistics...")
188
+ sigma_aug, gene_weight = compute_gene_stats(train_sampler, config, device)
189
+ if accelerator.is_main_process:
190
+ print(f" sigma_aug: mean={sigma_aug.mean():.4f}, std={sigma_aug.std():.4f}, "
191
+ f"min={sigma_aug.min():.4f}, max={sigma_aug.max():.4f}")
192
+ print(f" gene_weight: mean={gene_weight.mean():.4f}, max={gene_weight.max():.4f}")
193
+ n_active = (sigma_aug > config.sigma_min_clip + 1e-6).sum().item()
194
+ print(f" Active genes (sigma > clip): {n_active}/{len(sigma_aug)}")
195
+
196
+ # === Mask path ===
197
+ if config.use_negative_edge:
198
+ mask_path = os.path.join(
199
+ data_manager.data_path, data_manager.data_name,
200
+ f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}_negative_edge.pt",
201
+ )
202
+ else:
203
+ mask_path = os.path.join(
204
+ data_manager.data_path, data_manager.data_name,
205
+ f"mask_fold_{config.fold}topk_{config.topk}{config.split_method}.pt",
206
+ )
207
+
208
+ # === Vocab ===
209
+ orig_cwd = os.getcwd()
210
+ os.chdir(os.path.join(_REPO_ROOT, "scDFM"))
211
+ vocab = process_vocab(data_manager, config)
212
+ os.chdir(orig_cwd)
213
+
214
+ gene_ids = vocab.encode(list(data_manager.adata.var_names))
215
+ gene_ids = torch.tensor(gene_ids, dtype=torch.long, device=device)
216
+
217
+ # === Build model (reuse scDFM model directly, no SBModel) ===
218
+ vf = ScDFMModel(
219
+ ntoken=len(vocab),
220
+ d_model=config.d_model,
221
+ nhead=config.nhead,
222
+ d_hid=config.d_hid,
223
+ nlayers=config.nlayers,
224
+ fusion_method=config.fusion_method,
225
+ perturbation_function=config.perturbation_function,
226
+ mask_path=mask_path,
227
+ )
228
+
229
+ # === Build SA-CFM denoiser ===
230
+ denoiser = SACFMDenoiser(
231
+ model=vf,
232
+ sigma_aug=sigma_aug,
233
+ gene_weight=gene_weight,
234
+ noise_type=config.noise_type,
235
+ use_mmd_loss=config.use_mmd_loss,
236
+ gamma=config.gamma,
237
+ )
238
+
239
+ # === Dataset & DataLoader ===
240
+ base_dataset = PerturbationDataset(train_sampler, config.batch_size)
241
+ dataloader = DataLoader(
242
+ base_dataset, batch_size=1, shuffle=False,
243
+ num_workers=4, pin_memory=True, persistent_workers=True,
244
+ )
245
+
246
+ # === EMA model ===
247
+ ema_model = copy.deepcopy(vf).to(device)
248
+ ema_model.eval()
249
+ ema_model.requires_grad_(False)
250
+
251
+ # === Optimizer & Scheduler ===
252
+ save_path = config.make_path()
253
+ optimizer = torch.optim.Adam(vf.parameters(), lr=config.lr)
254
+ warmup_scheduler = LinearLR(optimizer, start_factor=1e-3, end_factor=1.0, total_iters=config.warmup_steps)
255
+ cosine_scheduler = CosineAnnealingLR(optimizer, T_max=max(config.steps - config.warmup_steps, 1), eta_min=config.eta_min)
256
+ scheduler = SequentialLR(optimizer, [warmup_scheduler, cosine_scheduler], milestones=[config.warmup_steps])
257
+
258
+ start_iteration = 0
259
+ if config.checkpoint_path != "":
260
+ start_iteration, _ = load_checkpoint(config.checkpoint_path, vf, optimizer, scheduler)
261
+ ema_model.load_state_dict(vf.state_dict())
262
+
263
+ # === Prepare with accelerator ===
264
+ denoiser = accelerator.prepare(denoiser)
265
+ optimizer, scheduler, dataloader = accelerator.prepare(optimizer, scheduler, dataloader)
266
+
267
+ inverse_dict = {v: str(k) for k, v in data_manager.perturbation_dict.items()}
268
+
269
+ # === Test-only mode ===
270
+ if config.test_only:
271
+ eval_path = os.path.join(save_path, "eval_only")
272
+ os.makedirs(eval_path, exist_ok=True)
273
+ eval_score = test(
274
+ valid_sampler, denoiser, accelerator, config, vocab, data_manager,
275
+ batch_size=config.eval_batch_size, path_dir=eval_path,
276
+ )
277
+ sys.exit(0)
278
+
279
+ # === Loss logging ===
280
+ if accelerator.is_main_process:
281
+ os.makedirs(save_path, exist_ok=True)
282
+ csv_path = os.path.join(save_path, 'loss_curve.csv')
283
+ csv_file = open(csv_path, 'a' if start_iteration > 0 and os.path.exists(csv_path) else 'w', newline='')
284
+ csv_writer = csv.writer(csv_file)
285
+ if start_iteration == 0 or not os.path.exists(csv_path):
286
+ csv_writer.writerow(['iteration', 'loss', 'loss_v', 'loss_mmd', 'lr'])
287
+ tb_writer = SummaryWriter(log_dir=os.path.join(save_path, 'tb_logs'))
288
+
289
+ # === Training loop ===
290
+ pbar = tqdm.tqdm(total=config.steps, initial=start_iteration)
291
+ iteration = start_iteration
292
+
293
+ while iteration < config.steps:
294
+ for batch_data in dataloader:
295
+ source = batch_data["src_cell_data"].squeeze(0).to(device)
296
+ target = batch_data["tgt_cell_data"].squeeze(0).to(device)
297
+ perturbation_id = batch_data["condition_id"].squeeze(0).to(device)
298
+
299
+ # Random gene subset (same as scDFM)
300
+ G_full = source.shape[-1]
301
+ input_gene_ids_pos = torch.randperm(G_full, device=device)[:config.infer_top_gene]
302
+ source_sub = source[:, input_gene_ids_pos]
303
+ target_sub = target[:, input_gene_ids_pos]
304
+ gene_ids_sub = gene_ids[input_gene_ids_pos]
305
+
306
+ if config.perturbation_function == "crisper":
307
+ pert_name = [inverse_dict[int(p)] for p in perturbation_id[0].cpu().numpy()]
308
+ perturbation_id = torch.tensor(
309
+ vocab.encode(pert_name), dtype=torch.long, device=device
310
+ ).repeat(source_sub.shape[0], 1)
311
+
312
+ base_denoiser = denoiser.module if hasattr(denoiser, "module") else denoiser
313
+ base_denoiser.model.train()
314
+
315
+ B = source_sub.shape[0]
316
+ gene_input = gene_ids_sub.unsqueeze(0).expand(B, -1)
317
+
318
+ loss_dict = base_denoiser.train_step(
319
+ source_sub, target_sub, perturbation_id, gene_input, input_gene_ids_pos,
320
+ )
321
+
322
+ loss = loss_dict["loss"]
323
+ optimizer.zero_grad(set_to_none=True)
324
+ accelerator.backward(loss)
325
+ optimizer.step()
326
+ scheduler.step()
327
+
328
+ # EMA update
329
+ with torch.no_grad():
330
+ for ema_p, model_p in zip(ema_model.parameters(), vf.parameters()):
331
+ ema_p.lerp_(model_p.data, 1 - config.ema_decay)
332
+
333
+ # Checkpoint & eval
334
+ if iteration % config.print_every == 0:
335
+ save_path_ = os.path.join(save_path, f"iteration_{iteration}")
336
+ os.makedirs(save_path_, exist_ok=True)
337
+ if accelerator.is_main_process:
338
+ save_checkpoint(
339
+ model=ema_model, optimizer=optimizer, scheduler=scheduler,
340
+ iteration=iteration, eval_score=None,
341
+ save_path=save_path_, is_best=False,
342
+ )
343
+ if iteration + config.print_every >= config.steps:
344
+ orig_state = copy.deepcopy(vf.state_dict())
345
+ vf.load_state_dict(ema_model.state_dict())
346
+ eval_score = test(
347
+ valid_sampler, denoiser, accelerator, config, vocab, data_manager,
348
+ batch_size=config.eval_batch_size, path_dir=save_path_,
349
+ )
350
+ vf.load_state_dict(orig_state)
351
+ if accelerator.is_main_process and eval_score is not None:
352
+ tb_writer.add_scalar('eval/score', eval_score, iteration)
353
+
354
+ # Logging
355
+ if accelerator.is_main_process:
356
+ lr = scheduler.get_last_lr()[0]
357
+ csv_writer.writerow([
358
+ iteration, loss.item(),
359
+ loss_dict["loss_v"].item(), loss_dict["loss_mmd"].item(), lr,
360
+ ])
361
+ if iteration % 100 == 0:
362
+ csv_file.flush()
363
+ tb_writer.add_scalar('loss/total', loss.item(), iteration)
364
+ tb_writer.add_scalar('loss/velocity', loss_dict["loss_v"].item(), iteration)
365
+ tb_writer.add_scalar('loss/mmd', loss_dict["loss_mmd"].item(), iteration)
366
+ tb_writer.add_scalar('lr', lr, iteration)
367
+
368
+ accelerator.wait_for_everyone()
369
+ pbar.update(1)
370
+ pbar.set_description(
371
+ f"L={loss.item():.4f} v={loss_dict['loss_v'].item():.3f} "
372
+ f"mmd={loss_dict['loss_mmd'].item():.3f}"
373
+ )
374
+ iteration += 1
375
+ if iteration >= config.steps:
376
+ break
377
+
378
+ if accelerator.is_main_process:
379
+ csv_file.close()
380
+ tb_writer.close()
GRN/grn_sacfm/src/__init__.py ADDED
File without changes
GRN/grn_sacfm/src/_scdfm_imports.py ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Central import hub for scDFM modules.
3
+ Requires _bootstrap_scdfm to have been imported first (at script entry point).
4
+ """
5
+
6
+ import sys
7
+
8
+ # Ensure bootstrap has run
9
+ if "scdfm_src" not in sys.modules:
10
+ import os
11
+ sys.path.insert(0, os.path.normpath(os.path.join(os.path.dirname(__file__), "..")))
12
+ import _bootstrap_scdfm
13
+
14
+ import scdfm_src.models.origin.layers as _layers
15
+ import scdfm_src.models.origin.model as _model
16
+ import scdfm_src.flow_matching.path as _fm_path
17
+ import scdfm_src.flow_matching.path.scheduler.scheduler as _scheduler
18
+ import scdfm_src.utils.utils as _utils
19
+ import scdfm_src.tokenizer.gene_tokenizer as _tokenizer
20
+ # === scDFM Layers ===
21
+ GeneadaLN = _layers.GeneadaLN
22
+ ContinuousValueEncoder = _layers.ContinuousValueEncoder
23
+ GeneEncoder = _layers.GeneEncoder
24
+ BatchLabelEncoder = _layers.BatchLabelEncoder
25
+ TimestepEmbedder = _layers.TimestepEmbedder
26
+ ExprDecoder = _layers.ExprDecoder
27
+
28
+ # === scDFM Blocks ===
29
+ DifferentialTransformerBlock = _model.DifferentialTransformerBlock
30
+ PerceiverBlock = _model.PerceiverBlock
31
+ DiffPerceiverBlock = _model.DiffPerceiverBlock
32
+
33
+ # === scDFM Model (direct reuse, no modifications) ===
34
+ ScDFMModel = _model.model
35
+
36
+ # === scDFM Flow Matching ===
37
+ AffineProbPath = _fm_path.AffineProbPath
38
+ CondOTScheduler = _scheduler.CondOTScheduler
39
+
40
+ # === scDFM Utils ===
41
+ save_checkpoint = _utils.save_checkpoint
42
+ load_checkpoint = _utils.load_checkpoint
43
+ make_lognorm_poisson_noise = _utils.make_lognorm_poisson_noise
44
+ pick_eval_score = _utils.pick_eval_score
45
+ process_vocab = _utils.process_vocab
46
+ set_requires_grad_for_p_only = _utils.set_requires_grad_for_p_only
47
+ get_perturbation_emb = _utils.get_perturbation_emb
48
+
49
+ # === scDFM Tokenizer ===
50
+ GeneVocab = _tokenizer.GeneVocab
GRN/grn_sacfm/src/data/__init__.py ADDED
File without changes
GRN/grn_sacfm/src/data/data.py ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Data loading for grn_svd.
3
+ Imports scDFM Data/PerturbationDataset by temporarily swapping sys.modules
4
+ so that scDFM's 'src.*' packages are visible during import.
5
+ """
6
+
7
+ import sys
8
+ import os
9
+
10
+ import torch
11
+ from torch.utils.data import Dataset
12
+
13
+ _SCDFM_ROOT = os.path.normpath(
14
+ os.path.join(os.path.dirname(__file__), "..", "..", "..", "..", "transfer", "code", "scDFM")
15
+ )
16
+
17
+ # Cache to avoid repeated imports
18
+ _cached_classes = {}
19
+
20
+
21
+ def get_data_classes():
22
+ """Lazily import scDFM data classes with proper module isolation."""
23
+ if _cached_classes:
24
+ return (
25
+ _cached_classes["Data"],
26
+ _cached_classes["PerturbationDataset"],
27
+ _cached_classes["TrainSampler"],
28
+ _cached_classes["TestDataset"],
29
+ )
30
+
31
+ # Save CCFM's src modules
32
+ saved = {}
33
+ for key in list(sys.modules.keys()):
34
+ if key == "src" or key.startswith("src."):
35
+ saved[key] = sys.modules.pop(key)
36
+
37
+ # Ensure __init__.py exists for scDFM data_process
38
+ for d in ["src", "src/data_process", "src/utils", "src/tokenizer"]:
39
+ init_path = os.path.join(_SCDFM_ROOT, d, "__init__.py")
40
+ if not os.path.exists(init_path):
41
+ os.makedirs(os.path.dirname(init_path), exist_ok=True)
42
+ with open(init_path, "w") as f:
43
+ f.write("# Auto-created by CCFM\n")
44
+
45
+ sys.path.insert(0, _SCDFM_ROOT)
46
+ try:
47
+ from src.data_process.data import Data, PerturbationDataset, TrainSampler, TestDataset
48
+ _cached_classes["Data"] = Data
49
+ _cached_classes["PerturbationDataset"] = PerturbationDataset
50
+ _cached_classes["TrainSampler"] = TrainSampler
51
+ _cached_classes["TestDataset"] = TestDataset
52
+ finally:
53
+ # Remove scDFM's src.* entries
54
+ for key in list(sys.modules.keys()):
55
+ if (key == "src" or key.startswith("src.")) and not key.startswith("scdfm_"):
56
+ del sys.modules[key]
57
+
58
+ # Restore CCFM's src modules
59
+ for key, mod in saved.items():
60
+ sys.modules[key] = mod
61
+
62
+ if _SCDFM_ROOT in sys.path:
63
+ sys.path.remove(_SCDFM_ROOT)
64
+
65
+ return Data, PerturbationDataset, TrainSampler, TestDataset
66
+
67
+
68
+ class GRNDatasetWrapper(Dataset):
69
+ """
70
+ Wraps scDFM PerturbationDataset to produce sparse delta triplets.
71
+
72
+ Returns delta_values (B, G_sub, K) and delta_indices (B, G_sub, K)
73
+ instead of dense z_target (B, G_sub, G_sub).
74
+ SVD projection happens on GPU in denoiser.train_step().
75
+ """
76
+
77
+ def __init__(self, base_dataset, sparse_cache, gene_ids_cpu, infer_top_gene):
78
+ self.base = base_dataset # scDFM PerturbationDataset
79
+ self.sparse_cache = sparse_cache # SparseDeltaCache (multi-process safe)
80
+ self.gene_ids = gene_ids_cpu # (G_full,) CPU tensor — vocab-encoded gene IDs
81
+ self.infer_top_gene = infer_top_gene
82
+
83
+ def __len__(self):
84
+ return len(self.base)
85
+
86
+ def __getitem__(self, idx):
87
+ batch = self.base[idx]
88
+
89
+ # 1. Random gene subset
90
+ G_full = batch["src_cell_data"].shape[-1]
91
+ input_gene_ids = torch.randperm(G_full)[:self.infer_top_gene]
92
+
93
+ # 2. Sparse cache lookup → sparse triplets (runs in worker process)
94
+ src_names = batch["src_cell_id"]
95
+ tgt_names = batch["tgt_cell_id"]
96
+ if src_names and isinstance(src_names[0], (tuple, list)):
97
+ src_names = [n[0] for n in src_names]
98
+ tgt_names = [n[0] for n in tgt_names]
99
+ delta_values, delta_indices = self.sparse_cache.lookup_delta(
100
+ src_names, tgt_names, input_gene_ids, device=torch.device("cpu")
101
+ ) # delta_values: (B, G_sub, K), delta_indices: (B, G_sub, K) int16
102
+
103
+ # 3. Subset expression data
104
+ return {
105
+ "src_cell_data": batch["src_cell_data"][:, input_gene_ids], # (B, G_sub)
106
+ "tgt_cell_data": batch["tgt_cell_data"][:, input_gene_ids], # (B, G_sub)
107
+ "condition_id": batch["condition_id"], # (B, 2)
108
+ "delta_values": delta_values, # (B, G_sub, K)
109
+ "delta_indices": delta_indices, # (B, G_sub, K) int16
110
+ "gene_ids_sub": self.gene_ids[input_gene_ids], # (G_sub,)
111
+ "input_gene_ids": input_gene_ids, # (G_sub,)
112
+ }
GRN/grn_sacfm/src/denoiser.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SACFMDenoiser — Source-Anchored Conditional Flow Matching.
3
+
4
+ Training: x_0 = source + sigma_aug * eps (noisy source, not pure noise).
5
+ Standard affine path: x_t = (1-t)*x_0 + t*target.
6
+ Velocity target: dx_t = target - x_0.
7
+ Gene-weighted MSE loss.
8
+
9
+ Inference: ODE from clean source (no noise, no SDE).
10
+ """
11
+
12
+ import torch
13
+ import torch.nn as nn
14
+ import torchdiffeq
15
+
16
+ from ._scdfm_imports import AffineProbPath, CondOTScheduler
17
+
18
+
19
+ def pairwise_sq_dists(X, Y):
20
+ return torch.cdist(X, Y, p=2) ** 2
21
+
22
+
23
+ @torch.no_grad()
24
+ def median_sigmas(X, scales=(0.5, 1.0, 2.0, 4.0)):
25
+ D2 = pairwise_sq_dists(X, X)
26
+ tri = D2[~torch.eye(D2.size(0), dtype=bool, device=D2.device)]
27
+ m = torch.median(tri).clamp_min(1e-12)
28
+ s2 = torch.tensor(scales, device=X.device) * m
29
+ return [float(s.item()) for s in torch.sqrt(s2)]
30
+
31
+
32
+ def mmd2_unbiased_multi_sigma(X, Y, sigmas):
33
+ m, n = X.size(0), Y.size(0)
34
+ Dxx = pairwise_sq_dists(X, X)
35
+ Dyy = pairwise_sq_dists(Y, Y)
36
+ Dxy = pairwise_sq_dists(X, Y)
37
+ vals = []
38
+ for sigma in sigmas:
39
+ beta = 1.0 / (2.0 * (sigma ** 2) + 1e-12)
40
+ Kxx = torch.exp(-beta * Dxx)
41
+ Kyy = torch.exp(-beta * Dyy)
42
+ Kxy = torch.exp(-beta * Dxy)
43
+ term_xx = (Kxx.sum() - Kxx.diag().sum()) / (m * (m - 1) + 1e-12)
44
+ term_yy = (Kyy.sum() - Kyy.diag().sum()) / (n * (n - 1) + 1e-12)
45
+ term_xy = Kxy.mean()
46
+ vals.append(term_xx + term_yy - 2.0 * term_xy)
47
+ return torch.stack(vals).mean()
48
+
49
+
50
+ class SACFMDenoiser(nn.Module):
51
+ """
52
+ Source-Anchored Conditional Flow Matching Denoiser.
53
+
54
+ Key differences from scDFM baseline:
55
+ - x_0 = source + sigma_aug * eps (not pure noise)
56
+ - Gene-weighted velocity loss
57
+ - Inference starts from clean source
58
+
59
+ Key differences from SB:
60
+ - No SigmaNet (sigma_aug is fixed, data-driven)
61
+ - No ScoreDecoder
62
+ - No SDE inference
63
+ - No bridge formulation (standard affine path)
64
+ """
65
+
66
+ def __init__(
67
+ self,
68
+ model: nn.Module,
69
+ sigma_aug: torch.Tensor, # (G,) per-gene augmentation noise
70
+ gene_weight: torch.Tensor, # (G,) per-gene loss weight
71
+ noise_type: str = "Gaussian",
72
+ use_mmd_loss: bool = True,
73
+ gamma: float = 0.5,
74
+ ):
75
+ super().__init__()
76
+ self.model = model
77
+ self.noise_type = noise_type
78
+ self.use_mmd_loss = use_mmd_loss
79
+ self.gamma = gamma
80
+
81
+ # Fixed per-gene tensors (not learned)
82
+ self.register_buffer("sigma_aug", sigma_aug)
83
+ self.register_buffer("gene_weight", gene_weight)
84
+
85
+ # Standard affine flow matching path (same as scDFM)
86
+ self.flow_path = AffineProbPath(scheduler=CondOTScheduler())
87
+
88
+ def train_step(
89
+ self,
90
+ source: torch.Tensor, # (B, G_sub) control expression
91
+ target: torch.Tensor, # (B, G_sub) perturbed expression
92
+ perturbation_id: torch.Tensor, # (B, n_pert)
93
+ gene_input: torch.Tensor, # (B, G_sub) vocab-encoded gene IDs
94
+ input_gene_ids: torch.Tensor, # (G_sub,) indices into full gene set
95
+ ) -> dict:
96
+ B = source.shape[0]
97
+ device = source.device
98
+
99
+ # 1. Sample time (uniform, clamped away from boundaries)
100
+ t = torch.rand(B, device=device).clamp(1e-5, 1 - 1e-5)
101
+
102
+ # 2. Look up per-gene sigma and weight for current gene subset
103
+ sigma_sub = self.sigma_aug[input_gene_ids] # (G_sub,)
104
+ weight_sub = self.gene_weight[input_gene_ids] # (G_sub,)
105
+
106
+ # 3. Source-anchored x_0 with data-driven augmentation noise
107
+ eps = torch.randn_like(source)
108
+ x_0 = source + sigma_sub.unsqueeze(0) * eps # (B, G_sub)
109
+
110
+ # 4. Standard affine flow matching (reuses scDFM AffineProbPath)
111
+ # x_t = (1-t)*x_0 + t*target
112
+ # dx_t = target - x_0 = (target - source) - sigma * eps
113
+ path_sample = self.flow_path.sample(t=t, x_0=x_0, x_1=target)
114
+
115
+ # 5. Model forward (scDFM convention: cell_1=x_t, cell_2=source)
116
+ pred_v = self.model(
117
+ gene_input, path_sample.x_t, path_sample.t, source,
118
+ perturbation_id, gene_input, mode="predict_y",
119
+ )
120
+
121
+ # 6. Gene-weighted velocity loss
122
+ loss_v = (weight_sub.unsqueeze(0) * (pred_v - path_sample.dx_t) ** 2).mean()
123
+
124
+ # 7. MMD loss (optional, same as scDFM baseline)
125
+ loss_mmd = torch.tensor(0.0, device=device)
126
+ if self.use_mmd_loss:
127
+ t_col = t.unsqueeze(-1)
128
+ x1_hat = path_sample.x_t + pred_v * (1 - t_col)
129
+ sigmas_mmd = median_sigmas(target, scales=(0.5, 1.0, 2.0, 4.0))
130
+ loss_mmd = mmd2_unbiased_multi_sigma(x1_hat, target, sigmas_mmd)
131
+
132
+ loss = loss_v + self.gamma * loss_mmd
133
+
134
+ return {
135
+ "loss": loss,
136
+ "loss_v": loss_v.detach(),
137
+ "loss_mmd": loss_mmd.detach(),
138
+ }
139
+
140
+ @torch.no_grad()
141
+ def generate(
142
+ self,
143
+ source: torch.Tensor, # (B, G)
144
+ perturbation_id: torch.Tensor, # (B, n_pert)
145
+ gene_ids: torch.Tensor, # (B, G) or (G,)
146
+ steps: int = 20,
147
+ method: str = "rk4",
148
+ ) -> torch.Tensor:
149
+ """Generate perturbed expression via PF-ODE starting from clean source."""
150
+ B, G = source.shape
151
+ device = source.device
152
+
153
+ if gene_ids.dim() == 1:
154
+ gene_ids = gene_ids.unsqueeze(0).expand(B, -1)
155
+
156
+ # Start from clean source (no augmentation noise at inference)
157
+ x_0 = source.clone()
158
+
159
+ def ode_func(t_scalar, x):
160
+ t_batch = torch.full((B,), t_scalar.item(), device=device)
161
+ pred_v = self.model(
162
+ gene_ids, x, t_batch, source,
163
+ perturbation_id, gene_ids, mode="predict_y",
164
+ )
165
+ return pred_v
166
+
167
+ t_span = torch.linspace(0, 1, steps, device=device)
168
+ trajectory = torchdiffeq.odeint(
169
+ ode_func, x_0, t_span,
170
+ method=method, atol=1e-4, rtol=1e-4,
171
+ )
172
+ return torch.clamp(trajectory[-1], min=0)
GRN/grn_sacfm/src/utils.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Re-export scDFM utility functions from the central import module.
3
+ """
4
+
5
+ from ._scdfm_imports import (
6
+ save_checkpoint,
7
+ load_checkpoint,
8
+ make_lognorm_poisson_noise,
9
+ pick_eval_score,
10
+ process_vocab,
11
+ set_requires_grad_for_p_only,
12
+ get_perturbation_emb,
13
+ GeneVocab,
14
+ )