File size: 5,027 Bytes
881f9f2 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 | #!/bin/bash
# Test inference against a running vLLM server on Insomnia.
# Run from within the allocated job context (for example via srun --jobid=<JOBID> --overlap --pty bash):
# bash scripts/test_inference.sh localhost 8000 [model]
#
# Usage:
# bash scripts/test_inference.sh <host> [port] [model]
#
# Example:
# srun --jobid=<JOBID> --overlap --pty bash
set -euo pipefail
HOST="${1:?Usage: $0 <host> [port] [model]}"
PORT="${2:-8000}"
MODEL="Llama-3.1-8B-Instruct"
MODEL="${3:-$MODEL}"
BASE_URL="http://$HOST:$PORT"
echo "=== vLLM Inference Test ==="
echo "Server: $BASE_URL"
echo ""
# --- Health check ---
echo "[1/4] Health check..."
if ! curl -s "$BASE_URL/health" > /dev/null 2>&1; then
echo " FAIL: Server not reachable at $BASE_URL"
echo " Check that the job is running: squeue -u \$USER"
exit 1
fi
echo " OK"
validate_completion_json() {
python3 -c '
import json
import sys
raw = sys.stdin.read()
if not raw.strip():
raise SystemExit("completion response was empty")
try:
payload = json.loads(raw)
except json.JSONDecodeError:
raise SystemExit(f"completion response was non-JSON: {raw[:500]}")
error = payload.get("error")
if error is not None:
raise SystemExit(f"error payload returned: {error}")
choices = payload.get("choices") or []
if not choices:
raise SystemExit("no choices present in completion response")
text = (choices[0].get("text") or "").strip()
if not text:
raise SystemExit("completion response was empty")
print(text)
'
}
validate_chat_json() {
python3 -c '
import json
import sys
raw = sys.stdin.read()
if not raw.strip():
raise SystemExit("chat response was empty")
try:
payload = json.loads(raw)
except json.JSONDecodeError:
raise SystemExit(f"chat response was non-JSON: {raw[:500]}")
error = payload.get("error")
if error is not None:
raise SystemExit(f"error payload returned: {error}")
choices = payload.get("choices") or []
if not choices:
raise SystemExit("no choices present in chat response")
message = choices[0].get("message") or {}
content = (message.get("content") or "").strip()
if not content:
raise SystemExit("chat response was empty")
print(content)
'
}
# --- List models ---
echo ""
echo "[2/4] Available models:"
MODELS_RESPONSE="$(curl -s "$BASE_URL/v1/models")"
echo "$MODELS_RESPONSE" | python3 -m json.tool
echo "$MODELS_RESPONSE" | MODEL_TO_CHECK="$MODEL" python3 -c "
import json
import os
import sys
model = os.environ['MODEL_TO_CHECK']
payload = json.load(sys.stdin)
model_ids = [item.get('id') for item in payload.get('data', []) if item.get('id')]
if model not in model_ids:
raise SystemExit(f\"ERROR: expected model {model!r} not in loaded models: {model_ids}\")
print(f\" OK: {model} is loaded\")
"
# --- Completions API test ---
echo ""
echo "[3/4] Completions API (transformer DGA prompt):"
COMP_PAYLOAD="$(MODEL_TO_USE="$MODEL" python3 - <<'PY'
import json
import os
print(
json.dumps(
{
"model": os.environ["MODEL_TO_USE"],
"prompt": "A power transformer's dissolved gas analysis shows elevated hydrogen and acetylene levels. This pattern indicates",
"max_tokens": 100,
"temperature": 0.7,
}
)
)
PY
)"
COMP_RESPONSE=$(curl -s "$BASE_URL/v1/completions" \
-H "Content-Type: application/json" \
-d "$COMP_PAYLOAD")
echo "$COMP_RESPONSE" | python3 -m json.tool
COMP_TEXT="$(echo "$COMP_RESPONSE" | validate_completion_json)"
echo " Completion preview: $COMP_TEXT"
# Extract tokens/sec from usage
PROMPT_TOKENS=$(echo "$COMP_RESPONSE" | python3 -c "import sys,json; print(json.load(sys.stdin)['usage']['prompt_tokens'])" 2>/dev/null || echo "?")
COMP_TOKENS=$(echo "$COMP_RESPONSE" | python3 -c "import sys,json; print(json.load(sys.stdin)['usage']['completion_tokens'])" 2>/dev/null || echo "?")
echo " Prompt tokens: $PROMPT_TOKENS, Completion tokens: $COMP_TOKENS"
# --- Chat API test ---
echo ""
echo "[4/4] Chat API (work order prompt):"
CHAT_PAYLOAD="$(MODEL_TO_USE="$MODEL" python3 - <<'PY'
import json
import os
print(
json.dumps(
{
"model": os.environ["MODEL_TO_USE"],
"messages": [
{
"role": "system",
"content": "You are a Smart Grid maintenance assistant.",
},
{
"role": "user",
"content": "Transformer T-4021 shows DGA readings of H2=450ppm, C2H2=120ppm. What fault type does this indicate and what priority should the work order be?",
},
],
"max_tokens": 200,
"temperature": 0.7,
}
)
)
PY
)"
CHAT_RESPONSE="$(curl -s "$BASE_URL/v1/chat/completions" \
-H "Content-Type: application/json" \
-d "$CHAT_PAYLOAD")"
echo "$CHAT_RESPONSE" | python3 -m json.tool
CHAT_TEXT="$(echo "$CHAT_RESPONSE" | validate_chat_json)"
echo " Chat preview: $CHAT_TEXT"
echo ""
echo "=== All Tests Passed ==="
echo "Environment is ready for MCP experiments."
|