File size: 5,028 Bytes
20145da | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 | #!/usr/bin/env bash
set -euo pipefail
# Force verbose download progress even in non-TTY environments
export HF_HUB_DISABLE_PROGRESS_BARS=0
export TRANSFORMERS_VERBOSITY=info
export HF_HUB_VERBOSITY=info
export WANDB_DISABLE_WEAVE="${WANDB_DISABLE_WEAVE:-true}"
echo "============================================"
echo " F13 Qwen 3.5 Fine-Tuning Container"
echo "============================================"
if [ -n "${RUNPOD_POD_ID:-}" ]; then
echo "Running on RunPod pod: ${RUNPOD_POD_ID}"
elif [ -n "${AWS_INSTANCE_ID:-}" ]; then
echo "Running on AWS instance: ${AWS_INSTANCE_ID}"
fi
NUM_NODES="${NUM_NODES:-1}"
NODE_RANK="${NODE_RANK:-0}"
if [ "${NUM_NODES}" -gt 1 ]; then
echo ""
echo "--- Multi-Node Cluster ---"
echo " Nodes: ${NUM_NODES}"
echo " This rank: ${NODE_RANK}"
echo " Master: ${MASTER_ADDR:-unknown}:${MASTER_PORT:-29500}"
echo " Node addr: ${NODE_ADDR:-unknown}"
echo " World size: ${WORLD_SIZE:-unknown}"
echo " GPUs/node: ${NUM_TRAINERS:-unknown}"
if [ -n "${PRIMARY_ADDR:-}" ]; then
export NCCL_SOCKET_IFNAME="${NCCL_SOCKET_IFNAME:-ens1}"
echo " NCCL iface: ${NCCL_SOCKET_IFNAME}"
fi
echo "--------------------------"
echo ""
echo "--- Network Interfaces ---"
ip -4 addr show 2>/dev/null || echo "(ip command not available)"
echo ""
echo "--- Connectivity Test ---"
if [ -n "${MASTER_ADDR:-}" ]; then
echo -n " TCP to ${MASTER_ADDR}:${MASTER_PORT:-29500} -> "
timeout 3 bash -c "echo > /dev/tcp/${MASTER_ADDR}/${MASTER_PORT:-29500}" 2>/dev/null && echo "OK" || echo "UNREACHABLE (expected before torchrun)"
echo -n " TCP to ${MASTER_ADDR}:8080 -> "
timeout 3 bash -c "echo > /dev/tcp/${MASTER_ADDR}/8080" 2>/dev/null && echo "OK" || echo "UNREACHABLE (master API may not be up yet)"
fi
echo "--------------------------"
fi
nvidia-smi || echo "Warning: nvidia-smi not available"
echo ""
# --- SSH Setup ---
SSH_PORT="${SSH_PORT:-22}"
mkdir -p /var/run/sshd /root/.ssh
chmod 700 /root/.ssh
if [ -n "${PUBLIC_KEY:-}" ]; then
echo "${PUBLIC_KEY}" > /root/.ssh/authorized_keys
chmod 600 /root/.ssh/authorized_keys
echo "SSH public key installed."
fi
if [ -n "${SSH_PASSWORD:-}" ]; then
echo "root:${SSH_PASSWORD}" | chpasswd
sed -i 's/#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config
sed -i 's/#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config
echo "SSH password auth enabled."
else
sed -i 's/#*PasswordAuthentication.*/PasswordAuthentication no/' /etc/ssh/sshd_config
fi
sed -i "s/#*Port .*/Port ${SSH_PORT}/" /etc/ssh/sshd_config
if [ ! -f /etc/ssh/ssh_host_rsa_key ]; then
ssh-keygen -A
fi
/usr/sbin/sshd
echo "SSH daemon started on port ${SSH_PORT}"
# --- HuggingFace / W&B ---
if [ -n "${HF_TOKEN:-}" ]; then
echo "HF_TOKEN is set — logging into HuggingFace Hub..."
hf auth login --token "${HF_TOKEN}" --add-to-git-credential 2>/dev/null || true
echo "HuggingFace login complete."
else
echo "HF_TOKEN not set — only public models/datasets available."
fi
if [ -n "${WANDB_API_KEY:-}" ]; then
echo "WANDB_API_KEY is set — W&B logging available."
wandb login "${WANDB_API_KEY}" 2>/dev/null || true
else
echo "WANDB_API_KEY not set — W&B logging disabled by default."
fi
echo ""
# --- Default dataset ---
if [ ! -f "/workspace/data/train.jsonl" ]; then
echo "No custom train.jsonl found, copying bundled default dataset."
mkdir -p /workspace/data
cp /opt/f13/data/train.jsonl /workspace/data/train.jsonl
cp /opt/f13/data/test.jsonl /workspace/data/test.jsonl
else
echo "Found custom dataset at /workspace/data/train.jsonl"
fi
# --- Pre-flight: verify key CUDA packages ---
echo "Verifying GPU-accelerated packages..."
python3 -c "
checks = []
try:
import flash_attn; checks.append(f'flash-attn {flash_attn.__version__}')
except Exception as e: checks.append(f'flash-attn MISSING: {e}')
try:
import fla; checks.append('flash-linear-attention OK')
from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule
checks.append(' GatedDeltaNet FLA kernel OK')
except Exception as e: checks.append(f'flash-linear-attention MISSING: {e}')
try:
import causal_conv1d; checks.append('causal-conv1d OK')
except Exception as e: checks.append(f'causal-conv1d MISSING: {e}')
for c in checks: print(f' [{\"OK\" if \"MISSING\" not in c else \"WARN\"}] {c}')
"
echo ""
# --- API Server ---
API_PORT="${API_PORT:-8080}"
echo ""
echo "Starting API server on port ${API_PORT}..."
if [ -z "${API_SECRET:-}" ]; then
echo "WARNING: API_SECRET is not set. All API calls will be rejected."
echo " Set API_SECRET env var to enable the training API."
fi
if [ "${AUTO_TRAIN:-false}" = "true" ]; then
echo "AUTO_TRAIN=true — training will start after API server is up."
fi
exec uvicorn server.app:app \
--host 0.0.0.0 \
--port "${API_PORT}" \
--app-dir /opt/f13
|