| #!/bin/bash |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| set -euo pipefail |
|
|
| REPO_ROOT="$(cd "$(dirname "$0")/.." && pwd)" |
| VENV_DIR="$REPO_ROOT/.venv-insomnia" |
| MODEL_DIR="$REPO_ROOT/models" |
| REQUIREMENTS_FILE="$REPO_ROOT/requirements-insomnia.txt" |
| MODEL_NAME="meta-llama/Llama-3.1-8B-Instruct" |
| DEFAULT_MODEL_REVISION="0e9e39f249a16976918f6564b8830bc894c89659" |
| MODEL_REVISION="${MODEL_REVISION:-$DEFAULT_MODEL_REVISION}" |
| SETUP_MODE="${SETUP_MODE:-all}" |
|
|
| case "$SETUP_MODE" in |
| all|venv|model) ;; |
| *) |
| echo "ERROR: SETUP_MODE=$SETUP_MODE (expected all|venv|model)" >&2 |
| exit 1 |
| ;; |
| esac |
|
|
| do_venv=0 |
| do_model=0 |
| case "$SETUP_MODE" in |
| all) do_venv=1; do_model=1 ;; |
| venv) do_venv=1 ;; |
| model) do_model=1 ;; |
| esac |
|
|
| echo "=== Insomnia Environment Setup ===" |
| echo "Repo root: $REPO_ROOT" |
| echo "Mode: $SETUP_MODE" |
| echo "Model: $MODEL_NAME @ $MODEL_REVISION" |
|
|
| for cmd in python3 uv; do |
| if ! command -v "$cmd" >/dev/null 2>&1; then |
| echo "ERROR: required command not found: $cmd" >&2 |
| exit 1 |
| fi |
| done |
|
|
| if [ "$do_model" = "1" ] && [ -z "${HF_TOKEN:-}" ]; then |
| echo "ERROR: HF_TOKEN must be exported for model download." >&2 |
| echo "Accept the Llama license at https://huggingface.co/meta-llama/Llama-3.1-8B-Instruct" >&2 |
| echo "and create a Read-scope token at https://huggingface.co/settings/tokens" >&2 |
| exit 1 |
| fi |
|
|
| if [ "$do_venv" = "1" ] && [ ! -f "$REQUIREMENTS_FILE" ]; then |
| echo "ERROR: requirements file not found: $REQUIREMENTS_FILE" >&2 |
| exit 1 |
| fi |
|
|
| |
| if [ "$do_venv" = "1" ]; then |
| if [ ! -d "$VENV_DIR" ]; then |
| echo "" |
| echo "[1] Creating Python 3.11 virtual environment with uv..." |
| uv venv "$VENV_DIR" --python 3.11 |
| else |
| echo "[1] Virtual environment already exists at $VENV_DIR" |
| fi |
|
|
| |
| echo "" |
| echo "[2] Installing pinned dependencies from requirements-insomnia.txt..." |
| uv pip install --python "$VENV_DIR/bin/python" -r "$REQUIREMENTS_FILE" |
|
|
| |
| |
| |
| echo "" |
| echo "Installed versions (from package metadata; no heavy imports):" |
| "$VENV_DIR/bin/python3" - <<'PY' |
| from importlib.metadata import PackageNotFoundError, version |
|
|
| for pkg in ("torch", "vllm", "transformers", "huggingface-hub", "nvidia-cudnn-cu12"): |
| try: |
| print(f" {pkg:20s} {version(pkg)}") |
| except PackageNotFoundError: |
| print(f" {pkg:20s} NOT INSTALLED") |
| PY |
| else |
| echo "[1/2] Skipped venv creation (SETUP_MODE=$SETUP_MODE)." |
| fi |
|
|
| |
| if [ "$do_model" = "1" ]; then |
| |
| |
| if [ -x "$VENV_DIR/bin/python3" ]; then |
| PYTHON_BIN="$VENV_DIR/bin/python3" |
| else |
| PYTHON_BIN="python3" |
| fi |
|
|
| echo "" |
| echo "[3] HuggingFace auth (needed for the gated Llama model)..." |
| |
| |
| |
| |
| "$PYTHON_BIN" -m huggingface_hub.commands.huggingface_cli login \ |
| --token "$HF_TOKEN" |
|
|
| echo "" |
| echo "[4] Downloading $MODEL_NAME @ $MODEL_REVISION (~16 GB)..." |
| echo " This may take 10-30 minutes on the first run." |
| mkdir -p "$MODEL_DIR" |
| "$PYTHON_BIN" -m huggingface_hub.commands.huggingface_cli download \ |
| "$MODEL_NAME" \ |
| --revision "$MODEL_REVISION" \ |
| --local-dir "$MODEL_DIR/Llama-3.1-8B-Instruct" |
|
|
| |
| echo "" |
| echo "Resolved revision for reproducibility:" |
| "$PYTHON_BIN" - "$MODEL_NAME" "$MODEL_REVISION" <<'PY' |
| import sys |
| from huggingface_hub import HfApi |
|
|
| repo_id, revision = sys.argv[1], sys.argv[2] |
| info = HfApi().model_info(repo_id, revision=revision) |
| print(f" repo: {repo_id}") |
| print(f" revision: {info.sha} (requested: {revision})") |
| print(f" pin with: export MODEL_REVISION={info.sha}") |
| PY |
| else |
| echo "[3/4] Skipped model download (SETUP_MODE=$SETUP_MODE)." |
| fi |
|
|
| echo "" |
| echo "=== Setup Complete ===" |
| echo "Venv: $VENV_DIR" |
| echo "Model dir: $MODEL_DIR/Llama-3.1-8B-Instruct" |
| echo "Pinned deps: requirements-insomnia.txt" |
| echo "" |
| echo "Next steps:" |
| echo " - Submit the vLLM serving job from this directory:" |
| echo " sbatch --mail-type=BEGIN,END,FAIL --mail-user=\$MAIL_USER scripts/vllm_serve.sh" |
| echo " - Or run a benchmark cell:" |
| echo " sbatch scripts/run_experiment.sh configs/example_baseline.env" |
|
|