File size: 2,227 Bytes
45b0ed8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
#!/bin/bash
#SBATCH --job-name=moge-fe2e
#SBATCH --output=/home/ywan0794/MoGe/moge_fe2e_%j.log
#SBATCH --error=/home/ywan0794/MoGe/moge_fe2e_%j.log
#SBATCH --open-mode=append
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
#SBATCH --gres=gpu:H100:1
#SBATCH --time=1-00:00:00
#SBATCH --mem=80G
#SBATCH --nodelist=erinyes

export PYTHONUNBUFFERED=1

cd /home/ywan0794/MoGe

source /home/ywan0794/miniconda3/etc/profile.d/conda.sh
conda activate fe2e

export CUDA_HOME=$CONDA_PREFIX
export PATH=$CUDA_HOME/bin:$PATH
export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH
# FE2E pins torch 2.6 / diffusers 0.32.2 fork in its own env.
export LD_LIBRARY_PATH=$CONDA_PREFIX/lib/python3.10/site-packages/torch/lib:$LD_LIBRARY_PATH
export PYTHONPATH=$PYTHONPATH:$(pwd)

echo "============================================"
echo "Activated conda environment: $CONDA_DEFAULT_ENV"
echo "CUDA_HOME: $CUDA_HOME"
echo "============================================"

echo "=== GPU Info ==="
nvidia-smi

python -c "import torch; print('CUDA:', torch.cuda.is_available(), torch.cuda.get_device_name(0) if torch.cuda.is_available() else '')"

TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
REPO=/home/ywan0794/EvalMDE/FE2E
MODEL_PATH=$REPO/pretrain
LORA_PATH=$REPO/lora/LDRN.safetensors
CONFIG=/home/ywan0794/MoGe/configs/eval/fe2e_all_benchmarks.json
OUT_DIR=eval_output
mkdir -p $OUT_DIR

echo "============================================"
echo "Starting MoGe Eval for FE2E at $(date)"
echo "Repo: $REPO"
echo "model_path: $MODEL_PATH"
echo "lora_path: $LORA_PATH"
echo "Config: $CONFIG"
echo "============================================"

# Mirror README depth-eval call:
#   --single_denoise --prompt_type empty --task_name depth --cfg_guidance 6.0
# Default size_level matches README 768.
python moge/scripts/eval_baseline.py \
    --baseline baselines/fe2e.py \
    --config $CONFIG \
    --output ${OUT_DIR}/fe2e_${TIMESTAMP}.json \
    --repo $REPO \
    --model_path $MODEL_PATH \
    --lora_path $LORA_PATH \
    --prompt_type empty \
    --single_denoise \
    --cfg_guidance 6.0 \
    --size_level 768

echo "============================================"
echo "Evaluation completed at $(date)"
echo "============================================"