| #!/usr/bin/env bash |
| set -euo pipefail |
|
|
| |
| export HF_HUB_DISABLE_PROGRESS_BARS=0 |
| export TRANSFORMERS_VERBOSITY=info |
| export HF_HUB_VERBOSITY=info |
| export WANDB_DISABLE_WEAVE="${WANDB_DISABLE_WEAVE:-true}" |
|
|
| echo "============================================" |
| echo " F13 Qwen 3.5 Fine-Tuning Container" |
| echo "============================================" |
|
|
| if [ -n "${RUNPOD_POD_ID:-}" ]; then |
| echo "Running on RunPod pod: ${RUNPOD_POD_ID}" |
| elif [ -n "${AWS_INSTANCE_ID:-}" ]; then |
| echo "Running on AWS instance: ${AWS_INSTANCE_ID}" |
| fi |
|
|
| NUM_NODES="${NUM_NODES:-1}" |
| NODE_RANK="${NODE_RANK:-0}" |
| if [ "${NUM_NODES}" -gt 1 ]; then |
| echo "" |
| echo "--- Multi-Node Cluster ---" |
| echo " Nodes: ${NUM_NODES}" |
| echo " This rank: ${NODE_RANK}" |
| echo " Master: ${MASTER_ADDR:-unknown}:${MASTER_PORT:-29500}" |
| echo " Node addr: ${NODE_ADDR:-unknown}" |
| echo " World size: ${WORLD_SIZE:-unknown}" |
| echo " GPUs/node: ${NUM_TRAINERS:-unknown}" |
|
|
| if [ -n "${PRIMARY_ADDR:-}" ]; then |
| export NCCL_SOCKET_IFNAME="${NCCL_SOCKET_IFNAME:-ens1}" |
| echo " NCCL iface: ${NCCL_SOCKET_IFNAME}" |
| fi |
| echo "--------------------------" |
| echo "" |
| echo "--- Network Interfaces ---" |
| ip -4 addr show 2>/dev/null || echo "(ip command not available)" |
| echo "" |
| echo "--- Connectivity Test ---" |
| if [ -n "${MASTER_ADDR:-}" ]; then |
| echo -n " TCP to ${MASTER_ADDR}:${MASTER_PORT:-29500} -> " |
| timeout 3 bash -c "echo > /dev/tcp/${MASTER_ADDR}/${MASTER_PORT:-29500}" 2>/dev/null && echo "OK" || echo "UNREACHABLE (expected before torchrun)" |
| echo -n " TCP to ${MASTER_ADDR}:8080 -> " |
| timeout 3 bash -c "echo > /dev/tcp/${MASTER_ADDR}/8080" 2>/dev/null && echo "OK" || echo "UNREACHABLE (master API may not be up yet)" |
| fi |
| echo "--------------------------" |
| fi |
|
|
| nvidia-smi || echo "Warning: nvidia-smi not available" |
| echo "" |
|
|
| |
| SSH_PORT="${SSH_PORT:-22}" |
| mkdir -p /var/run/sshd /root/.ssh |
| chmod 700 /root/.ssh |
|
|
| if [ -n "${PUBLIC_KEY:-}" ]; then |
| echo "${PUBLIC_KEY}" > /root/.ssh/authorized_keys |
| chmod 600 /root/.ssh/authorized_keys |
| echo "SSH public key installed." |
| fi |
|
|
| if [ -n "${SSH_PASSWORD:-}" ]; then |
| echo "root:${SSH_PASSWORD}" | chpasswd |
| sed -i 's/#*PermitRootLogin.*/PermitRootLogin yes/' /etc/ssh/sshd_config |
| sed -i 's/#*PasswordAuthentication.*/PasswordAuthentication yes/' /etc/ssh/sshd_config |
| echo "SSH password auth enabled." |
| else |
| sed -i 's/#*PasswordAuthentication.*/PasswordAuthentication no/' /etc/ssh/sshd_config |
| fi |
|
|
| sed -i "s/#*Port .*/Port ${SSH_PORT}/" /etc/ssh/sshd_config |
|
|
| if [ ! -f /etc/ssh/ssh_host_rsa_key ]; then |
| ssh-keygen -A |
| fi |
|
|
| /usr/sbin/sshd |
| echo "SSH daemon started on port ${SSH_PORT}" |
|
|
| |
| if [ -n "${HF_TOKEN:-}" ]; then |
| echo "HF_TOKEN is set — logging into HuggingFace Hub..." |
| hf auth login --token "${HF_TOKEN}" --add-to-git-credential 2>/dev/null || true |
| echo "HuggingFace login complete." |
| else |
| echo "HF_TOKEN not set — only public models/datasets available." |
| fi |
|
|
| if [ -n "${WANDB_API_KEY:-}" ]; then |
| echo "WANDB_API_KEY is set — W&B logging available." |
| wandb login "${WANDB_API_KEY}" 2>/dev/null || true |
| else |
| echo "WANDB_API_KEY not set — W&B logging disabled by default." |
| fi |
| echo "" |
|
|
| |
| if [ ! -f "/workspace/data/train.jsonl" ]; then |
| echo "No custom train.jsonl found, copying bundled default dataset." |
| mkdir -p /workspace/data |
| cp /opt/f13/data/train.jsonl /workspace/data/train.jsonl |
| cp /opt/f13/data/test.jsonl /workspace/data/test.jsonl |
| else |
| echo "Found custom dataset at /workspace/data/train.jsonl" |
| fi |
|
|
| |
| echo "Verifying GPU-accelerated packages..." |
| python3 -c " |
| checks = [] |
| try: |
| import flash_attn; checks.append(f'flash-attn {flash_attn.__version__}') |
| except Exception as e: checks.append(f'flash-attn MISSING: {e}') |
| try: |
| import fla; checks.append('flash-linear-attention OK') |
| from fla.ops.gated_delta_rule import fused_recurrent_gated_delta_rule |
| checks.append(' GatedDeltaNet FLA kernel OK') |
| except Exception as e: checks.append(f'flash-linear-attention MISSING: {e}') |
| try: |
| import causal_conv1d; checks.append('causal-conv1d OK') |
| except Exception as e: checks.append(f'causal-conv1d MISSING: {e}') |
| for c in checks: print(f' [{\"OK\" if \"MISSING\" not in c else \"WARN\"}] {c}') |
| " |
| echo "" |
|
|
| |
| API_PORT="${API_PORT:-8080}" |
| echo "" |
| echo "Starting API server on port ${API_PORT}..." |
|
|
| if [ -z "${API_SECRET:-}" ]; then |
| echo "WARNING: API_SECRET is not set. All API calls will be rejected." |
| echo " Set API_SECRET env var to enable the training API." |
| fi |
|
|
| if [ "${AUTO_TRAIN:-false}" = "true" ]; then |
| echo "AUTO_TRAIN=true — training will start after API server is up." |
| fi |
|
|
| exec uvicorn server.app:app \ |
| --host 0.0.0.0 \ |
| --port "${API_PORT}" \ |
| --app-dir /opt/f13 |
|
|