#!/bin/bash #PJM -L rscgrp=b-batch #PJM -L gpu=1 #PJM -L elapse=24:00:00 #PJM -N ccfm_v2_cached #PJM -j #PJM -o logs/ccfm_v2_cached_%j.out module load cuda/12.2.2 module load cudnn/8.9.7 module load gcc-toolset/12 source /home/hp250092/ku50001222/qian/aivc/lfj/stack_env/bin/activate cd /home/hp250092/ku50001222/qian/aivc/lfj/transfer/code/CCFM export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256 echo "==========================================" echo "Job ID: $PJM_JOBID" echo "Job Name: $PJM_JOBNAME" echo "Start: $(date)" echo "Node: $(hostname)" echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')" echo "Run: CCFM v2 cached (KV fix + loss fix + EMA + RK4 + logit-normal + warmup + scGPT cache)" echo "==========================================" accelerate launch --num_processes=1 scripts/run_cascaded.py \ --data-name norman \ --d-model 128 \ --nhead 8 \ --nlayers 4 \ --batch-size 48 \ --lr 5e-5 \ --steps 200000 \ --fusion-method differential_perceiver \ --perturbation-function crisper \ --noise-type Gaussian \ --infer-top-gene 1000 \ --n-top-genes 5000 \ --use-mmd-loss \ --gamma 0.5 \ --split-method additive \ --fold 1 \ --scgpt-dim 512 \ --bottleneck-dim 128 \ --latent-weight 1.0 \ --choose-latent-p 0.4 \ --dh-depth 2 \ --print-every 10000 \ --topk 30 \ --use-negative-edge \ --ema-decay 0.9999 \ --t-sample-mode logit_normal \ --t-expr-mean 0.0 \ --t-expr-std 1.0 \ --t-latent-mean 0.0 \ --t-latent-std 1.0 \ --warmup-steps 2000 \ --ode-method rk4 \ --scgpt-cache-path scgpt_cache_norman.h5 \ --result-path ./result echo "==========================================" echo "Finished: $(date)" echo "=========================================="