#!/bin/bash #SBATCH --job-name=moge-lotus-v1 #SBATCH --output=/home/ywan0794/MoGe/moge_lotus_v1_%j.log #SBATCH --error=/home/ywan0794/MoGe/moge_lotus_v1_%j.log #SBATCH --open-mode=append #SBATCH --ntasks=1 #SBATCH --cpus-per-task=4 #SBATCH --gres=gpu:H100:1 #SBATCH --time=0-04:00:00 #SBATCH --mem=40G #SBATCH --nodelist=erinyes # MoGe protocol full eval on 10 benchmarks with Lotus depth ckpt v1-0 (depth output). # Chosen over v2-1-disparity for uniform `depth_affine_invariant` output across 7 models. # v1-0 is the original Lotus depth ckpt (Lotus paper, 2024-09); v2-1-disparity (2024-11) # achieves better numbers per README, but emits disparity_affine_invariant — not directly # comparable in depth space to the other 6 models. export PYTHONUNBUFFERED=1 cd /home/ywan0794/MoGe source /home/ywan0794/miniconda3/etc/profile.d/conda.sh conda activate lotus export CUDA_HOME=$CONDA_PREFIX export PATH=$CUDA_HOME/bin:$PATH export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$CONDA_PREFIX/lib/python3.10/site-packages/torch/lib:$LD_LIBRARY_PATH export PYTHONPATH=${PYTHONPATH:-}:$(pwd) echo "============================================" echo "Activated conda environment: $CONDA_DEFAULT_ENV" echo "Ckpt: jingheya/lotus-depth-g-v1-0 (depth output, generation mode)" echo "============================================" nvidia-smi python -c "import torch; print('CUDA:', torch.cuda.is_available(), torch.cuda.get_device_name(0) if torch.cuda.is_available() else '')" TIMESTAMP=$(date +"%Y%m%d_%H%M%S") REPO=/home/ywan0794/EvalMDE/Lotus PRETRAINED=jingheya/lotus-depth-g-v1-0 CONFIG=/home/ywan0794/MoGe/configs/eval/all_benchmarks.json OUT_DIR=eval_output mkdir -p $OUT_DIR echo "============================================" echo "Starting MoGe Eval for Lotus v1-0 (depth ckpt) at $(date)" echo "Repo: $REPO" echo "Checkpoint: $PRETRAINED" echo "Config: $CONFIG" echo "============================================" # Paper-canonical from Lotus eval.sh: generation mode, fp16, seed=42, timestep=999. # NO --disparity flag (v1-0 outputs depth, not disparity). # Wrapper auto-emits `depth_affine_invariant` when --disparity is absent. python moge/scripts/eval_baseline.py \ --baseline baselines/lotus.py \ --config $CONFIG \ --output ${OUT_DIR}/lotus_v1_${TIMESTAMP}.json \ --repo $REPO \ --pretrained $PRETRAINED \ --mode generation \ --task_name depth \ --timestep 999 \ --fp16 \ --seed 42 echo "============================================" echo "Evaluation completed at $(date)" echo "============================================"