File size: 5,662 Bytes
45b0ed8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 | #!/bin/bash
#SBATCH --job-name=eval-all
#SBATCH --output=/home/ywan0794/MoGe/eval_all_%j.log
#SBATCH --error=/home/ywan0794/MoGe/eval_all_%j.log
#SBATCH --open-mode=append
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
#SBATCH --gres=gpu:H100:1
#SBATCH --time=0-12:00:00
#SBATCH --mem=80G
#SBATCH --nodelist=erinyes
# Single sbatch — production run for 7 models on all 10 MoGe benchmarks, serial,
# one H100 held the whole time. Failures don't abort; we log & continue.
# Model order: cheap → expensive (FE2E last so it doesn't block others if it crashes).
export PYTHONUNBUFFERED=1
cd /home/ywan0794/MoGe
source /home/ywan0794/miniconda3/etc/profile.d/conda.sh
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
CONFIG=/home/ywan0794/MoGe/configs/eval/all_benchmarks.json
CONFIG_FE2E=/home/ywan0794/MoGe/configs/eval/fe2e_all_benchmarks.json
OUT_DIR=eval_output
mkdir -p $OUT_DIR
SUMMARY=$OUT_DIR/_eval_all_${TIMESTAMP}.summary.txt
: > $SUMMARY
echo "============================================"
echo "eval-all started at $(date)"
echo "Config (main): $CONFIG"
echo "Config (fe2e): $CONFIG_FE2E"
echo "TIMESTAMP: $TIMESTAMP"
echo "Summary file: $SUMMARY"
echo "============================================"
nvidia-smi
run_model() {
# Usage: run_model <label> <env> <config> <python invocation ...>
local label=$1 env=$2 cfg=$3
shift 3
echo
echo "============================================"
echo "[$label] starting at $(date) (conda env: $env)"
echo "============================================"
conda deactivate 2>/dev/null || true
conda activate $env
echo "Active env: $CONDA_DEFAULT_ENV"
export PYTHONPATH=$PYTHONPATH:$(pwd)
python -c "import torch; print('CUDA:', torch.cuda.is_available(), torch.cuda.get_device_name(0) if torch.cuda.is_available() else '')"
local OUTFILE=$OUT_DIR/${label}_${TIMESTAMP}.json
if "$@" \
--baseline baselines/${label}.py \
--config $cfg \
--output $OUTFILE; then
if [ -f $OUTFILE ]; then
local SIZE=$(stat -c%s $OUTFILE 2>/dev/null)
echo "[OK] $label -> $OUTFILE (${SIZE} bytes) at $(date)" | tee -a $SUMMARY
else
echo "[NO-OUTPUT] $label (exited 0 but no JSON) at $(date)" | tee -a $SUMMARY
fi
else
rc=$?
echo "[FAIL rc=$rc] $label at $(date)" | tee -a $SUMMARY
fi
}
# ============================================
# 1) DA3-Mono — SKIPPED, already done in eval_output/da3_mono_20260514_010406.json
# ============================================
# REPO=/home/ywan0794/EvalMDE/Depth-Anything-3
# HF_ID=depth-anything/DA3MONO-LARGE
# run_model da3_mono da3 $CONFIG \
# python moge/scripts/eval_baseline.py \
# --repo $REPO --hf_id $HF_ID
# ============================================
# 2) Depth Pro — SKIPPED, already done in eval_output/depth_pro_20260514_010406.json
# ============================================
# REPO=/home/ywan0794/EvalMDE/ml-depth-pro
# CKPT=$REPO/checkpoints/depth_pro.pt
# run_model depth_pro depth-pro $CONFIG \
# python moge/scripts/eval_baseline.py \
# --repo $REPO --checkpoint $CKPT --precision fp32
# ============================================
# 3) Marigold v1.1 (env: marigold) — paper-canonical via
# `script/depth/eval/11_infer_nyu.sh`: v1-1 + denoise=1 + ensemble=10 + seed=1234.
# v1-1 retrained to match v1-0's denoise=50 quality at denoise=1.
# ============================================
REPO=/home/ywan0794/EvalMDE/Marigold
CHECKPOINT=prs-eth/marigold-depth-v1-1
run_model marigold marigold $CONFIG \
python moge/scripts/eval_baseline.py \
--repo $REPO --checkpoint $CHECKPOINT \
--denoise_steps 4 --ensemble_size 1
# ============================================
# 4) Lotus (env: lotus) — paper-canonical eval.sh:
# generative v2-1-disparity + half_precision + seed=42.
# ============================================
REPO=/home/ywan0794/EvalMDE/Lotus
PRETRAINED=jingheya/lotus-depth-g-v2-1-disparity
run_model lotus lotus $CONFIG \
python moge/scripts/eval_baseline.py \
--repo $REPO --pretrained $PRETRAINED --mode generation \
--task_name depth --disparity --timestep 999 --fp16 --seed 42
# ============================================
# 5) DepthMaster (env: depthmaster)
# ============================================
REPO=/home/ywan0794/EvalMDE/DepthMaster
CKPT=$REPO/ckpt/eval
run_model depthmaster depthmaster $CONFIG \
python moge/scripts/eval_baseline.py \
--repo $REPO --checkpoint $CKPT --processing_res 768
# ============================================
# 6) PPD (env: ppd) — needs DA2 vitl semantics
# ============================================
REPO=/home/ywan0794/EvalMDE/Pixel-Perfect-Depth
# Paper-canonical eval.yaml: semantics=MoGe2, ppd_moge.pth, sampling_steps=4
run_model ppd ppd $CONFIG \
python moge/scripts/eval_baseline.py \
--repo $REPO --semantics_model MoGe2 \
--semantics_pth checkpoints/moge2.pt \
--model_pth checkpoints/ppd_moge.pth --sampling_steps 4
# ============================================
# 7) FE2E (env: fe2e) — slowest, last
# ============================================
REPO=/home/ywan0794/EvalMDE/FE2E
MODEL_PATH=$REPO/pretrain
LORA_PATH=$REPO/lora/LDRN.safetensors
run_model fe2e fe2e $CONFIG_FE2E \
python moge/scripts/eval_baseline.py \
--repo $REPO --model_path $MODEL_PATH --lora_path $LORA_PATH \
--prompt_type empty --single_denoise --cfg_guidance 6.0 --size_level 768
# ============================================
echo
echo "============================================"
echo "eval-all finished at $(date)"
echo "============================================"
echo "=== Summary ==="
cat $SUMMARY
|