ethan1115 commited on
Commit
2bea32f
·
verified ·
1 Parent(s): dcce49b

Upload train/CCFM/pca_emb/run_precompute_sparse.sh with huggingface_hub

Browse files
train/CCFM/pca_emb/run_precompute_sparse.sh ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ #PJM -L rscgrp=b-batch
3
+ #PJM -L gpu=1
4
+ #PJM -L elapse=4:00:00
5
+ #PJM -N precompute_sparse
6
+ #PJM -j
7
+ #PJM -o logs/precompute_sparse_%j.out
8
+
9
+ module load cuda/12.2.2
10
+ module load cudnn/8.9.7
11
+ module load gcc-toolset/12
12
+
13
+ source /home/pj24002027/ku50002536/Takoai/lfj/lfj/stack_env/bin/activate
14
+
15
+ # 在 grn_ccfm 目录运行(precompute 脚本依赖 grn_ccfm 的 src/)
16
+ cd /home/pj24002027/ku50002536/Takoai/lfj/lfj/GRN/grn_ccfm
17
+
18
+ export PYTORCH_CUDA_ALLOC_CONF=max_split_size_mb:256
19
+
20
+ echo "=========================================="
21
+ echo "Job ID: $PJM_JOBID"
22
+ echo "Job Name: $PJM_JOBNAME"
23
+ echo "Start: $(date)"
24
+ echo "Node: $(hostname)"
25
+ echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader 2>/dev/null || echo 'N/A')"
26
+ echo "=========================================="
27
+
28
+ # 输出到 grn_ccfm/cache/ (pca_emb 的两个 shell 脚本引用此路径)
29
+ mkdir -p cache
30
+
31
+ python scripts/precompute_sparse_attn.py \
32
+ --data-name norman \
33
+ --n-top-genes 5000 \
34
+ --fold 1 \
35
+ --split-method additive \
36
+ --topk 30 \
37
+ --use-negative-edge \
38
+ --scgpt-model-dir transfer/data/scGPT_pretrained \
39
+ --max-seq-len 5000 \
40
+ --attn-layer 11 \
41
+ --attn-use-rank-norm \
42
+ --batch-size 2 \
43
+ --top-k 300 \
44
+ --n-pca-pairs 1000 \
45
+ --max-pca-components 64 \
46
+ --output cache/norman_attn_L11_sparse.h5 \
47
+ --device cuda
48
+
49
+ echo "=========================================="
50
+ echo "Finished: $(date)"
51
+ echo "Output: cache/norman_attn_L11_sparse.h5"
52
+ echo "=========================================="