File size: 7,380 Bytes
881f9f2 d606d10 881f9f2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 | #!/bin/bash
#SBATCH --job-name=vllm-llama8b
#SBATCH --account=edu
#SBATCH --partition=short
#SBATCH --qos=short
#SBATCH --gres=gpu:A6000:1
#SBATCH --mem=64G
#SBATCH --cpus-per-task=4
#SBATCH --time=02:00:00
#SBATCH --output=logs/vllm_%j.out
#
# Launches vLLM serving Llama-3.1-8B-Instruct on a single A6000.
# After the server starts, runs a test prompt and keeps serving until the time limit.
#
# MUST be submitted from the repo root — `#SBATCH --output=logs/...` is
# resolved relative to $SLURM_SUBMIT_DIR, so running `sbatch` from elsewhere
# either writes logs to a surprising location or fails with "no such file".
# If you need to submit from a different directory, add
# `--chdir=/path/to/repo` to the sbatch invocation.
#
# Usage (with BEGIN/END/FAIL email notifications):
# cd /path/to/smartgridbench
# sbatch --mail-type=BEGIN,END,FAIL --mail-user=<email> scripts/vllm_serve.sh
#
# Or without notifications:
# sbatch scripts/vllm_serve.sh
#
# Tip: export MAIL_USER=<email> in your shell profile and run:
# sbatch --mail-type=BEGIN,END,FAIL --mail-user="$MAIL_USER" scripts/vllm_serve.sh
#
# --- Connecting to the running server ---
#
# vLLM binds to 127.0.0.1 on the compute node (not the compute node's external
# interface), so SSH-tunneling from the login node does NOT work. The tested
# path is to attach to the Slurm job from another shell via --overlap:
#
# srun --jobid=<JOB_ID> --overlap --pty bash
#
# Inside that shell, hit the server via localhost:
#
# bash scripts/test_inference.sh localhost 8000 Llama-3.1-8B-Instruct
# # or raw curl:
# curl -s http://127.0.0.1:8000/v1/completions \
# -H "Content-Type: application/json" \
# -d '{"model":"Llama-3.1-8B-Instruct","prompt":"hello","max_tokens":16}'
set -euo pipefail
REPO_ROOT="${SLURM_SUBMIT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}"
cd "$REPO_ROOT"
# Shared checkout on Insomnia: keep new logs group-writable for teammates.
umask 0002
mkdir -p logs
chmod 2775 logs 2>/dev/null || true
if command -v setfacl >/dev/null 2>&1; then
setfacl -m g::rwx logs 2>/dev/null || true
setfacl -d -m g::rwx logs 2>/dev/null || true
fi
if [ ! -f ".venv-insomnia/bin/activate" ]; then
echo "ERROR: missing .venv-insomnia. Run bash scripts/setup_insomnia.sh first." >&2
exit 1
fi
STARTUP_TIMEOUT="${STARTUP_TIMEOUT:-900}"
MODEL_PATH="${MODEL_PATH:-models/Llama-3.1-8B-Instruct}"
PORT="${PORT:-8000}"
MAX_MODEL_LEN="${MAX_MODEL_LEN:-32768}"
VLLM_GENERATION_CONFIG="${VLLM_GENERATION_CONFIG:-vllm}"
VLLM_SERVED_MODEL_NAME="${VLLM_SERVED_MODEL_NAME:-$(basename "${MODEL_PATH%/}")}"
if [ ! -d "$MODEL_PATH" ]; then
echo "ERROR: missing model directory at $MODEL_PATH" >&2
echo "Run bash scripts/setup_insomnia.sh first, or set MODEL_PATH to the downloaded checkpoint." >&2
exit 1
fi
for cmd in curl nvidia-smi python3; do
if ! command -v "$cmd" >/dev/null 2>&1; then
echo "ERROR: required command not found: $cmd" >&2
exit 1
fi
done
VLLM_PID=""
trap 'if [ -n "$VLLM_PID" ]; then kill "$VLLM_PID" 2>/dev/null || true; wait "$VLLM_PID" 2>/dev/null || true; fi' EXIT INT TERM
# --- CUDA setup (don't use module load cuda, it's broken) ---
export PATH=/usr/local/cuda/bin:$PATH
export LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH:-}
# Cluster-specific env (NCCL overrides for Insomnia Slingshot fabric, etc.)
# shellcheck source=scripts/insomnia_env.sh
source "$REPO_ROOT/scripts/insomnia_env.sh"
# --- Activate venv ---
source .venv-insomnia/bin/activate
# cuDNN from pip install
CUDNN_LIB="$(python3 -c 'import nvidia.cudnn; import os; print(os.path.join(os.path.dirname(nvidia.cudnn.__file__), "lib"))' 2>/dev/null || true)"
if [ -n "$CUDNN_LIB" ]; then
export LD_LIBRARY_PATH="$CUDNN_LIB:$LD_LIBRARY_PATH"
fi
echo "=== vLLM Serving Job ==="
echo "Node: $(hostname)"
echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader)"
echo "Model: $MODEL_PATH"
echo "Served as: $VLLM_SERVED_MODEL_NAME"
echo "Port: $PORT"
echo "Job ID: ${SLURM_JOB_ID:-N/A}"
echo "Start: $(date)"
echo ""
# --- Record baseline GPU state ---
nvidia-smi
# Add vLLM logging
VLLM_STARTUP_LOG="logs/vllm_startup_${SLURM_JOB_ID:-local}.log"
# --- Launch vLLM server in background ---
python3 -m vllm.entrypoints.openai.api_server \
--model "$MODEL_PATH" \
--served-model-name "$VLLM_SERVED_MODEL_NAME" \
--host 127.0.0.1 \
--port "$PORT" \
--max-model-len "$MAX_MODEL_LEN" \
--dtype float16 \
--generation-config "$VLLM_GENERATION_CONFIG" \
>"$VLLM_STARTUP_LOG" 2>&1 &
VLLM_PID=$!
# --- Wait for server to be ready ---
echo ""
echo "Waiting for vLLM server to start..."
for i in $(seq 1 "$STARTUP_TIMEOUT"); do
if curl -s http://127.0.0.1:$PORT/health > /dev/null 2>&1; then
echo "Server ready after ${i}s"
break
fi
if ! kill -0 "$VLLM_PID" 2>/dev/null; then
echo "ERROR: vLLM process died during startup"
tail -100 "$VLLM_STARTUP_LOG" || true
exit 1
fi
sleep 1
done
if ! curl -s http://127.0.0.1:$PORT/health > /dev/null 2>&1; then
echo "ERROR: Server did not start within ${STARTUP_TIMEOUT}s"
echo "=== Process state ==="
ps -fp "$VLLM_PID" || true
echo "=== Port state ==="
ss -ltnp | grep ":$PORT" || true
echo "=== Recent vLLM startup log ==="
tail -100 "$VLLM_STARTUP_LOG" || true
kill "$VLLM_PID" 2>/dev/null || true
exit 1
fi
# --- Run test inference ---
echo ""
echo "=== Test Inference ==="
TEST_RESPONSE="$(curl -s http://127.0.0.1:$PORT/v1/completions \
-H "Content-Type: application/json" \
-d "{
\"model\": \"$VLLM_SERVED_MODEL_NAME\",
\"prompt\": \"A power transformer's dissolved gas analysis shows elevated hydrogen and acetylene levels. This pattern indicates\",
\"max_tokens\": 100,
\"temperature\": 0.7
}")"
echo "$TEST_RESPONSE" | python3 -c '
import json
import sys
raw = sys.stdin.read()
if not raw.strip():
raise SystemExit("ERROR: inference returned an empty response.")
try:
payload = json.loads(raw)
except json.JSONDecodeError:
raise SystemExit(f"ERROR: inference returned non-JSON output: {raw[:500]}")
error = payload.get("error")
if error is not None:
raise SystemExit(f"ERROR: inference returned error payload: {error}")
choices = payload.get("choices") or []
if not choices:
raise SystemExit("ERROR: inference response had no choices.")
text = (choices[0].get("text") or "").strip()
if not text:
raise SystemExit("ERROR: inference response had an empty completion.")
print(json.dumps(payload, indent=2))
'
# --- Record GPU utilization after model load ---
echo ""
echo "=== GPU State After Model Load ==="
nvidia-smi
echo ""
echo "=== Server Running ==="
echo "vLLM is serving on localhost:$PORT on compute node $(hostname)"
echo "To run the standalone inference smoke test from another shell, attach to this allocation:"
echo " srun --jobid ${SLURM_JOB_ID:-<job-id>} --overlap --pty bash"
echo ""
echo "Then, inside that shell, run:"
echo " bash scripts/test_inference.sh localhost $PORT $VLLM_SERVED_MODEL_NAME"
echo ""
echo "Server will run until the SLURM time limit is hit (script default is 2 hours unless overridden at submission). Ctrl+C or scancel ${SLURM_JOB_ID:-<job-id>} to stop."
# --- Keep alive until time limit ---
wait $VLLM_PID
|