| #!/bin/bash |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| set -euo pipefail |
|
|
| REPO_ROOT="${SLURM_SUBMIT_DIR:-$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)}" |
| cd "$REPO_ROOT" |
|
|
| |
| umask 0002 |
| mkdir -p logs |
| chmod 2775 logs 2>/dev/null || true |
| if command -v setfacl >/dev/null 2>&1; then |
| setfacl -m g::rwx logs 2>/dev/null || true |
| setfacl -d -m g::rwx logs 2>/dev/null || true |
| fi |
|
|
| if [ ! -f ".venv-insomnia/bin/activate" ]; then |
| echo "ERROR: missing .venv-insomnia. Run bash scripts/setup_insomnia.sh first." >&2 |
| exit 1 |
| fi |
|
|
| STARTUP_TIMEOUT="${STARTUP_TIMEOUT:-900}" |
| MODEL_PATH="${MODEL_PATH:-models/Llama-3.1-8B-Instruct}" |
| PORT="${PORT:-8000}" |
| MAX_MODEL_LEN="${MAX_MODEL_LEN:-32768}" |
| VLLM_GENERATION_CONFIG="${VLLM_GENERATION_CONFIG:-vllm}" |
| VLLM_SERVED_MODEL_NAME="${VLLM_SERVED_MODEL_NAME:-$(basename "${MODEL_PATH%/}")}" |
|
|
| if [ ! -d "$MODEL_PATH" ]; then |
| echo "ERROR: missing model directory at $MODEL_PATH" >&2 |
| echo "Run bash scripts/setup_insomnia.sh first, or set MODEL_PATH to the downloaded checkpoint." >&2 |
| exit 1 |
| fi |
|
|
| for cmd in curl nvidia-smi python3; do |
| if ! command -v "$cmd" >/dev/null 2>&1; then |
| echo "ERROR: required command not found: $cmd" >&2 |
| exit 1 |
| fi |
| done |
|
|
| VLLM_PID="" |
| trap 'if [ -n "$VLLM_PID" ]; then kill "$VLLM_PID" 2>/dev/null || true; wait "$VLLM_PID" 2>/dev/null || true; fi' EXIT INT TERM |
|
|
| |
| export PATH=/usr/local/cuda/bin:$PATH |
| export LD_LIBRARY_PATH=/usr/local/cuda/lib64:${LD_LIBRARY_PATH:-} |
|
|
| |
| |
| source "$REPO_ROOT/scripts/insomnia_env.sh" |
|
|
| |
| source .venv-insomnia/bin/activate |
|
|
| |
| CUDNN_LIB="$(python3 -c 'import nvidia.cudnn; import os; print(os.path.join(os.path.dirname(nvidia.cudnn.__file__), "lib"))' 2>/dev/null || true)" |
| if [ -n "$CUDNN_LIB" ]; then |
| export LD_LIBRARY_PATH="$CUDNN_LIB:$LD_LIBRARY_PATH" |
| fi |
|
|
| echo "=== vLLM Serving Job ===" |
| echo "Node: $(hostname)" |
| echo "GPU: $(nvidia-smi --query-gpu=name,memory.total --format=csv,noheader)" |
| echo "Model: $MODEL_PATH" |
| echo "Served as: $VLLM_SERVED_MODEL_NAME" |
| echo "Port: $PORT" |
| echo "Job ID: ${SLURM_JOB_ID:-N/A}" |
| echo "Start: $(date)" |
| echo "" |
|
|
| |
| nvidia-smi |
|
|
| |
| VLLM_STARTUP_LOG="logs/vllm_startup_${SLURM_JOB_ID:-local}.log" |
|
|
| |
| python3 -m vllm.entrypoints.openai.api_server \ |
| --model "$MODEL_PATH" \ |
| --served-model-name "$VLLM_SERVED_MODEL_NAME" \ |
| --host 127.0.0.1 \ |
| --port "$PORT" \ |
| --max-model-len "$MAX_MODEL_LEN" \ |
| --dtype float16 \ |
| --generation-config "$VLLM_GENERATION_CONFIG" \ |
| >"$VLLM_STARTUP_LOG" 2>&1 & |
|
|
| VLLM_PID=$! |
|
|
| |
| echo "" |
| echo "Waiting for vLLM server to start..." |
| for i in $(seq 1 "$STARTUP_TIMEOUT"); do |
| if curl -s http://127.0.0.1:$PORT/health > /dev/null 2>&1; then |
| echo "Server ready after ${i}s" |
| break |
| fi |
| if ! kill -0 "$VLLM_PID" 2>/dev/null; then |
| echo "ERROR: vLLM process died during startup" |
| tail -100 "$VLLM_STARTUP_LOG" || true |
| exit 1 |
| fi |
| sleep 1 |
| done |
|
|
| if ! curl -s http://127.0.0.1:$PORT/health > /dev/null 2>&1; then |
| echo "ERROR: Server did not start within ${STARTUP_TIMEOUT}s" |
| echo "=== Process state ===" |
| ps -fp "$VLLM_PID" || true |
| echo "=== Port state ===" |
| ss -ltnp | grep ":$PORT" || true |
| echo "=== Recent vLLM startup log ===" |
| tail -100 "$VLLM_STARTUP_LOG" || true |
| kill "$VLLM_PID" 2>/dev/null || true |
| exit 1 |
| fi |
|
|
| |
| echo "" |
| echo "=== Test Inference ===" |
| TEST_RESPONSE="$(curl -s http://127.0.0.1:$PORT/v1/completions \ |
| -H "Content-Type: application/json" \ |
| -d "{ |
| \"model\": \"$VLLM_SERVED_MODEL_NAME\", |
| \"prompt\": \"A power transformer's dissolved gas analysis shows elevated hydrogen and acetylene levels. This pattern indicates\", |
| \"max_tokens\": 100, |
| \"temperature\": 0.7 |
| }")" |
|
|
| echo "$TEST_RESPONSE" | python3 -c ' |
| import json |
| import sys |
| |
| raw = sys.stdin.read() |
| if not raw.strip(): |
| raise SystemExit("ERROR: inference returned an empty response.") |
| |
| try: |
| payload = json.loads(raw) |
| except json.JSONDecodeError: |
| raise SystemExit(f"ERROR: inference returned non-JSON output: {raw[:500]}") |
| |
| error = payload.get("error") |
| if error is not None: |
| raise SystemExit(f"ERROR: inference returned error payload: {error}") |
| |
| choices = payload.get("choices") or [] |
| if not choices: |
| raise SystemExit("ERROR: inference response had no choices.") |
| |
| text = (choices[0].get("text") or "").strip() |
| if not text: |
| raise SystemExit("ERROR: inference response had an empty completion.") |
| |
| print(json.dumps(payload, indent=2)) |
| ' |
|
|
| |
| echo "" |
| echo "=== GPU State After Model Load ===" |
| nvidia-smi |
|
|
| echo "" |
| echo "=== Server Running ===" |
| echo "vLLM is serving on localhost:$PORT on compute node $(hostname)" |
| echo "To run the standalone inference smoke test from another shell, attach to this allocation:" |
| echo " srun --jobid ${SLURM_JOB_ID:-<job-id>} --overlap --pty bash" |
| echo "" |
| echo "Then, inside that shell, run:" |
| echo " bash scripts/test_inference.sh localhost $PORT $VLLM_SERVED_MODEL_NAME" |
| echo "" |
| echo "Server will run until the SLURM time limit is hit (script default is 2 hours unless overridden at submission). Ctrl+C or scancel ${SLURM_JOB_ID:-<job-id>} to stop." |
|
|
| |
| wait $VLLM_PID |
|
|