File size: 4,997 Bytes
45b0ed8 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 | #!/bin/bash
#SBATCH --job-name=sanity-all
#SBATCH --output=/home/ywan0794/MoGe/sanity_all_%j.log
#SBATCH --error=/home/ywan0794/MoGe/sanity_all_%j.log
#SBATCH --open-mode=append
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
#SBATCH --gres=gpu:H100:1
#SBATCH --time=0-01:00:00
#SBATCH --mem=80G
#SBATCH --nodelist=erinyes
# Single sbatch that runs sanity for the 6 remaining models in serial,
# activating each model's conda env in turn. One GPU held the whole time.
# Phase 0 contract: failures do not abort; we log & continue.
export PYTHONUNBUFFERED=1
cd /home/ywan0794/MoGe
source /home/ywan0794/miniconda3/etc/profile.d/conda.sh
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
CONFIG=/home/ywan0794/MoGe/configs/eval/sanity_benchmarks.json
OUT_DIR=sanity_output
mkdir -p $OUT_DIR
SUMMARY=$OUT_DIR/_sanity_all_${TIMESTAMP}.summary.txt
: > $SUMMARY
echo "============================================"
echo "sanity-all started at $(date)"
echo "Config: $CONFIG"
echo "TIMESTAMP: $TIMESTAMP"
echo "Summary file: $SUMMARY"
echo "============================================"
nvidia-smi
run_model() {
local label=$1 env=$2
shift 2
echo
echo "============================================"
echo "[$label] starting at $(date) (conda env: $env)"
echo "============================================"
conda deactivate 2>/dev/null || true
conda activate $env
echo "Active env: $CONDA_DEFAULT_ENV"
export PYTHONPATH=$PYTHONPATH:$(pwd)
python -c "import torch; print('CUDA:', torch.cuda.is_available(), torch.cuda.get_device_name(0) if torch.cuda.is_available() else '')"
local OUTFILE=$OUT_DIR/sanity_${label}_${TIMESTAMP}.json
# Run; don't let failures kill the script.
if "$@" \
--baseline baselines/${label}.py \
--config $CONFIG \
--output $OUTFILE; then
if [ -f $OUTFILE ]; then
echo "[OK] $label -> $OUTFILE" | tee -a $SUMMARY
else
echo "[NO-OUTPUT] $label (exited 0 but no JSON)" | tee -a $SUMMARY
fi
else
rc=$?
echo "[FAIL rc=$rc] $label" | tee -a $SUMMARY
fi
}
# ============================================
# 0) Depth Pro (env: depth-pro) — metric depth, added for full 7-model coverage
# ============================================
REPO=/home/ywan0794/EvalMDE/ml-depth-pro
CKPT=$REPO/checkpoints/depth_pro.pt
run_model depth_pro depth-pro \
python moge/scripts/eval_baseline.py \
--repo $REPO --checkpoint $CKPT --precision fp32
# ============================================
# 1) Marigold (env: marigold)
# ============================================
REPO=/home/ywan0794/EvalMDE/Marigold
CHECKPOINT=prs-eth/marigold-depth-v1-1
run_model marigold marigold \
python moge/scripts/eval_baseline.py \
--repo $REPO --checkpoint $CHECKPOINT \
--denoise_steps 4 --ensemble_size 1
# ============================================
# 2) Lotus (env: lotus) - paper-canonical eval.sh: g-v2-1-disparity, generation, fp16, seed=42
# ============================================
REPO=/home/ywan0794/EvalMDE/Lotus
PRETRAINED=jingheya/lotus-depth-g-v2-1-disparity
run_model lotus lotus \
python moge/scripts/eval_baseline.py \
--repo $REPO --pretrained $PRETRAINED --mode generation \
--task_name depth --disparity --timestep 999 --fp16 --seed 42
# ============================================
# 3) DepthMaster (env: depthmaster)
# ============================================
REPO=/home/ywan0794/EvalMDE/DepthMaster
CKPT=$REPO/ckpt/eval
run_model depthmaster depthmaster \
python moge/scripts/eval_baseline.py \
--repo $REPO --checkpoint $CKPT --processing_res 768
# ============================================
# 4) PPD (env: ppd)
# ============================================
REPO=/home/ywan0794/EvalMDE/Pixel-Perfect-Depth
# Paper-canonical eval.yaml: semantics=MoGe2, ppd_moge.pth
run_model ppd ppd \
python moge/scripts/eval_baseline.py \
--repo $REPO --semantics_model MoGe2 \
--semantics_pth checkpoints/moge2.pt \
--model_pth checkpoints/ppd_moge.pth --sampling_steps 4
# ============================================
# 5) DA3-Mono (env: da3)
# ============================================
REPO=/home/ywan0794/EvalMDE/Depth-Anything-3
HF_ID=depth-anything/DA3MONO-LARGE
run_model da3_mono da3 \
python moge/scripts/eval_baseline.py \
--repo $REPO --hf_id $HF_ID
# ============================================
# 6) FE2E (env: fe2e)
# ============================================
REPO=/home/ywan0794/EvalMDE/FE2E
MODEL_PATH=$REPO/pretrain
LORA_PATH=$REPO/lora/LDRN.safetensors
run_model fe2e fe2e \
python moge/scripts/eval_baseline.py \
--repo $REPO --model_path $MODEL_PATH --lora_path $LORA_PATH \
--prompt_type empty --single_denoise --cfg_guidance 6.0 --size_level 768
# ============================================
echo
echo "============================================"
echo "sanity-all finished at $(date)"
echo "============================================"
echo "=== Summary ==="
cat $SUMMARY
|