MoGe / eval_scripts /sanity_all_slurm.sh
zeyuren2002's picture
Add files using upload-large-folder tool
45b0ed8 verified
#!/bin/bash
#SBATCH --job-name=sanity-all
#SBATCH --output=/home/ywan0794/MoGe/sanity_all_%j.log
#SBATCH --error=/home/ywan0794/MoGe/sanity_all_%j.log
#SBATCH --open-mode=append
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=4
#SBATCH --gres=gpu:H100:1
#SBATCH --time=0-01:00:00
#SBATCH --mem=80G
#SBATCH --nodelist=erinyes
# Single sbatch that runs sanity for the 6 remaining models in serial,
# activating each model's conda env in turn. One GPU held the whole time.
# Phase 0 contract: failures do not abort; we log & continue.
export PYTHONUNBUFFERED=1
cd /home/ywan0794/MoGe
source /home/ywan0794/miniconda3/etc/profile.d/conda.sh
TIMESTAMP=$(date +"%Y%m%d_%H%M%S")
CONFIG=/home/ywan0794/MoGe/configs/eval/sanity_benchmarks.json
OUT_DIR=sanity_output
mkdir -p $OUT_DIR
SUMMARY=$OUT_DIR/_sanity_all_${TIMESTAMP}.summary.txt
: > $SUMMARY
echo "============================================"
echo "sanity-all started at $(date)"
echo "Config: $CONFIG"
echo "TIMESTAMP: $TIMESTAMP"
echo "Summary file: $SUMMARY"
echo "============================================"
nvidia-smi
run_model() {
local label=$1 env=$2
shift 2
echo
echo "============================================"
echo "[$label] starting at $(date) (conda env: $env)"
echo "============================================"
conda deactivate 2>/dev/null || true
conda activate $env
echo "Active env: $CONDA_DEFAULT_ENV"
export PYTHONPATH=$PYTHONPATH:$(pwd)
python -c "import torch; print('CUDA:', torch.cuda.is_available(), torch.cuda.get_device_name(0) if torch.cuda.is_available() else '')"
local OUTFILE=$OUT_DIR/sanity_${label}_${TIMESTAMP}.json
# Run; don't let failures kill the script.
if "$@" \
--baseline baselines/${label}.py \
--config $CONFIG \
--output $OUTFILE; then
if [ -f $OUTFILE ]; then
echo "[OK] $label -> $OUTFILE" | tee -a $SUMMARY
else
echo "[NO-OUTPUT] $label (exited 0 but no JSON)" | tee -a $SUMMARY
fi
else
rc=$?
echo "[FAIL rc=$rc] $label" | tee -a $SUMMARY
fi
}
# ============================================
# 0) Depth Pro (env: depth-pro) — metric depth, added for full 7-model coverage
# ============================================
REPO=/home/ywan0794/EvalMDE/ml-depth-pro
CKPT=$REPO/checkpoints/depth_pro.pt
run_model depth_pro depth-pro \
python moge/scripts/eval_baseline.py \
--repo $REPO --checkpoint $CKPT --precision fp32
# ============================================
# 1) Marigold (env: marigold)
# ============================================
REPO=/home/ywan0794/EvalMDE/Marigold
CHECKPOINT=prs-eth/marigold-depth-v1-1
run_model marigold marigold \
python moge/scripts/eval_baseline.py \
--repo $REPO --checkpoint $CHECKPOINT \
--denoise_steps 4 --ensemble_size 1
# ============================================
# 2) Lotus (env: lotus) - paper-canonical eval.sh: g-v2-1-disparity, generation, fp16, seed=42
# ============================================
REPO=/home/ywan0794/EvalMDE/Lotus
PRETRAINED=jingheya/lotus-depth-g-v2-1-disparity
run_model lotus lotus \
python moge/scripts/eval_baseline.py \
--repo $REPO --pretrained $PRETRAINED --mode generation \
--task_name depth --disparity --timestep 999 --fp16 --seed 42
# ============================================
# 3) DepthMaster (env: depthmaster)
# ============================================
REPO=/home/ywan0794/EvalMDE/DepthMaster
CKPT=$REPO/ckpt/eval
run_model depthmaster depthmaster \
python moge/scripts/eval_baseline.py \
--repo $REPO --checkpoint $CKPT --processing_res 768
# ============================================
# 4) PPD (env: ppd)
# ============================================
REPO=/home/ywan0794/EvalMDE/Pixel-Perfect-Depth
# Paper-canonical eval.yaml: semantics=MoGe2, ppd_moge.pth
run_model ppd ppd \
python moge/scripts/eval_baseline.py \
--repo $REPO --semantics_model MoGe2 \
--semantics_pth checkpoints/moge2.pt \
--model_pth checkpoints/ppd_moge.pth --sampling_steps 4
# ============================================
# 5) DA3-Mono (env: da3)
# ============================================
REPO=/home/ywan0794/EvalMDE/Depth-Anything-3
HF_ID=depth-anything/DA3MONO-LARGE
run_model da3_mono da3 \
python moge/scripts/eval_baseline.py \
--repo $REPO --hf_id $HF_ID
# ============================================
# 6) FE2E (env: fe2e)
# ============================================
REPO=/home/ywan0794/EvalMDE/FE2E
MODEL_PATH=$REPO/pretrain
LORA_PATH=$REPO/lora/LDRN.safetensors
run_model fe2e fe2e \
python moge/scripts/eval_baseline.py \
--repo $REPO --model_path $MODEL_PATH --lora_path $LORA_PATH \
--prompt_type empty --single_denoise --cfg_guidance 6.0 --size_level 768
# ============================================
echo
echo "============================================"
echo "sanity-all finished at $(date)"
echo "============================================"
echo "=== Summary ==="
cat $SUMMARY