victor/qwen35-test-scripts / entrypoint-iq4ks.sh
download
raw
10.1 kB
#!/usr/bin/env bash
set -euo pipefail
# ── Quant name ──────────────────────────────────────────────────────────────
QUANT_NAME="iq4ks-pawellll"
# ── Logging ─────────────────────────────────────────────────────────────────
LOG_DIR="/tmp/logs"
mkdir -p "$LOG_DIR"
exec > >(tee -a "$LOG_DIR/job.log") 2>&1
log() { echo "[$(date -u +%H:%M:%S)] $*"; }
die() { log "FATAL: $*"; sync_logs; exit 1; }
sync_logs() {
log "Syncing logs to /output/${QUANT_NAME}/logs/..."
mkdir -p "/output/${QUANT_NAME}/logs" 2>/dev/null || true
cp -r "$LOG_DIR"/* "/output/${QUANT_NAME}/logs/" 2>/dev/null || log "Warning: could not copy logs to /output"
}
log "=== Job started: $(date -u) ==="
log "ACCELERATOR=${ACCELERATOR:-unknown} | CPU_CORES=${CPU_CORES:-?} | MEMORY=${MEMORY:-?}"
# ── Verify model volume ────────────────────────────────────────────────────
MODEL_PATH="/model/Qwen3.5-27B-IQ4_KS.gguf"
log "Checking model volume..."
log "Looking for: $MODEL_PATH"
ls -lh "$MODEL_PATH" 2>/dev/null || { log "File not found, listing /model/:"; ls -lh /model/*.gguf 2>/dev/null | head -5; }
[[ -f "$MODEL_PATH" ]] || die "Model file not found at $MODEL_PATH"
log "Model file found: $(ls -lh "$MODEL_PATH" | awk '{print $5}')"
# ── Build ik_llama.cpp from source ─────────────────────────────────────────
log "Installing build dependencies..."
apt-get update -qq 2>&1 | tail -3
apt-get install -y -qq build-essential git curl ca-certificates gnupg wget 2>&1 | tail -3
log "Installing cmake 3.31 (ik_llama.cpp requires CUDA20 dialect)..."
wget -qO /tmp/cmake.sh https://github.com/Kitware/CMake/releases/download/v3.31.6/cmake-3.31.6-linux-x86_64.sh
bash /tmp/cmake.sh --skip-license --prefix=/usr/local 2>&1 | tail -3
cmake --version | head -1
log "Cloning ik_llama.cpp..."
git clone --depth 1 https://github.com/ikawrakow/ik_llama.cpp /tmp/ik_llama.cpp
cd /tmp/ik_llama.cpp
log "Building ik_llama.cpp with CUDA support..."
cmake -B build -DGGML_CUDA=ON 2>&1 | tail -10
cmake --build build --config Release -j8 2>&1 | tail -20
log "Build complete"
LLAMA_SERVER="/tmp/ik_llama.cpp/build/bin/llama-server"
[[ -f "$LLAMA_SERVER" ]] || die "llama-server binary not found at $LLAMA_SERVER"
log "llama-server binary: $(ls -lh "$LLAMA_SERVER" | awk '{print $5}')"
# ── Start llama-server ──────────────────────────────────────────────────────
log "Starting llama-server..."
"$LLAMA_SERVER" \
--model "$MODEL_PATH" \
--host 127.0.0.1 \
--port 8080 \
--ctx-size 8192 \
--gpu-layers 999 \
--threads 8 \
--batch-size 512 \
--ubatch-size 512 \
--parallel 1 \
--cont-batching \
--flash-attn on \
--cache-type-k q4_0 \
--cache-type-v q4_0 \
--jinja \
--chat-template-kwargs '{"enable_thinking":true}' \
> "$LOG_DIR/llama-server.log" 2>&1 &
LLAMA_PID=$!
log "llama-server PID: $LLAMA_PID"
# ── Install Node.js + pi while llama-server loads ──────────────────────────
log "Installing Node.js 22 (while model loads in background)..."
curl -fsSL https://deb.nodesource.com/setup_22.x | bash - 2>&1 | tail -3
apt-get install -y -qq nodejs 2>&1 | tail -3
log "Node.js $(node --version) installed, npm $(npm --version)"
log "Installing pi coding agent..."
npm install -g @mariozechner/pi-coding-agent 2>&1 | tail -5
log "pi installed: $(pi --version 2>&1 || echo 'version check done')"
# ── Health check ────────────────────────────────────────────────────────────
log "Waiting for llama-server to be ready..."
MAX_WAIT=600
ELAPSED=0
while true; do
if curl -sf http://127.0.0.1:8080/health 2>/dev/null | grep -q '"status"'; then
break
fi
sleep 3
ELAPSED=$((ELAPSED + 3))
if [[ $ELAPSED -ge $MAX_WAIT ]]; then
log "llama-server failed to start within ${MAX_WAIT}s. Last logs:"
tail -30 "$LOG_DIR/llama-server.log" || true
die "Health check timeout"
fi
kill -0 $LLAMA_PID 2>/dev/null || {
log "llama-server died. Last logs:"
tail -30 "$LOG_DIR/llama-server.log" || true
die "llama-server process exited"
}
[[ $((ELAPSED % 15)) -eq 0 ]] && log " ...still waiting (${ELAPSED}s elapsed)"
done
log "llama-server is ready!"
# Quick sanity check
log "Testing inference..."
SANITY=$(curl -sf http://127.0.0.1:8080/v1/chat/completions \
-H 'Content-Type: application/json' \
-d '{"model":"test","messages":[{"role":"user","content":"Say hello in one word."}],"max_tokens":128}' 2>/dev/null || echo "FAILED")
echo "$SANITY" | head -c 500 > "$LOG_DIR/sanity-check.json"
log "Sanity check response saved"
# ── Configure pi ────────────────────────────────────────────────────────────
log "Writing pi configuration..."
mkdir -p ~/.pi/agent
cat > ~/.pi/agent/models.json << EOF
{
"providers": {
"llamacpp-local": {
"baseUrl": "http://127.0.0.1:8080/v1",
"api": "openai-completions",
"apiKey": "none",
"compat": {
"supportsDeveloperRole": false,
"supportsReasoningEffort": false,
"supportsUsageInStreaming": false,
"supportsStrictMode": false,
"thinkingFormat": "qwen-chat-template"
},
"models": [
{
"id": "Qwen3.5-27B-${QUANT_NAME}",
"name": "Qwen3.5-27B-${QUANT_NAME}",
"reasoning": true,
"input": ["text"],
"contextWindow": 8192,
"maxTokens": 4096,
"cost": { "input": 0, "output": 0, "cacheRead": 0, "cacheWrite": 0 }
}
]
}
}
}
EOF
cat > ~/.pi/agent/settings.json << EOF
{
"defaultProvider": "llamacpp-local",
"defaultModel": "Qwen3.5-27B-${QUANT_NAME}",
"defaultThinkingLevel": "medium",
"hideThinkingBlock": false
}
EOF
log "pi config written"
# ── Clone test repo ─────────────────────────────────────────────────────────
WORKDIR="/workspace"
mkdir -p "$WORKDIR"
log "Cloning test repository..."
git clone --depth 1 https://github.com/sindresorhus/slugify "$WORKDIR/slugify"
cd "$WORKDIR/slugify"
log "Repo cloned: $(git log --oneline -1)"
log "Installing repo dependencies..."
npm install 2>&1 | tail -5
log "Dependencies installed"
# ── Run pi agentic task ────────────────────────────────────────────────────
TASK=$(cat << 'TASK_EOF'
You are working in a Node.js project called @sindresorhus/slugify — a string slugification library.
Your task: Add a new `--interactive` mode to the CLI (cli.js) that reads lines from stdin, slugifies each line, and prints the result. Requirements:
1. First, read and understand the existing codebase — look at the main module (index.js), the CLI (cli.js), and the test files.
2. Modify cli.js to accept a `--interactive` / `-i` flag. When set, the CLI should:
- Read lines from stdin (one string per line)
- Slugify each line using the library
- Print each slugified result to stdout
- Exit cleanly when stdin closes (EOF)
3. The existing CLI behavior (passing a string as an argument) must continue to work unchanged.
4. Add tests for the new interactive mode in the test file. The tests should:
- Test that piping multiple lines produces correct slugified output
- Test that existing argument-based usage still works
5. Run the existing test suite with `npm test` to make sure nothing is broken.
6. If tests fail, debug and fix until they pass.
Report what you did and whether tests pass.
TASK_EOF
)
log "Starting pi coding task..."
log "Task: Add --interactive stdin mode to slugify CLI"
pi \
--print \
--no-session \
--provider llamacpp-local \
--model "Qwen3.5-27B-${QUANT_NAME}" \
--thinking medium \
"$TASK" \
> "$LOG_DIR/pi-output.txt" 2>&1 || true
PI_EXIT=${PIPESTATUS[0]:-$?}
log "pi exited with code: $PI_EXIT"
# ── Capture results ─────────────────────────────────────────────────────────
log "Collecting results..."
cd "$WORKDIR/slugify"
git diff > "$LOG_DIR/pi-changes.patch" 2>/dev/null || true
git diff --stat > "$LOG_DIR/pi-changes-stat.txt" 2>/dev/null || true
git status > "$LOG_DIR/git-status.txt" 2>/dev/null || true
# Try running tests one final time to get clean output
npm test > "$LOG_DIR/test-output.txt" 2>&1 || true
# ── Summary ─────────────────────────────────────────────────────────────────
log "=== RESULTS SUMMARY ==="
log "Pi exit code: $PI_EXIT"
log ""
log "--- Files changed ---"
cat "$LOG_DIR/pi-changes-stat.txt" 2>/dev/null || log "(no changes)"
log ""
log "--- Test results ---"
tail -20 "$LOG_DIR/test-output.txt" 2>/dev/null || log "(no test output)"
log ""
log "--- Log files ---"
ls -lh "$LOG_DIR/"
log ""
# ── Sync logs to output bucket ─────────────────────────────────────────────
sync_logs
log "=== Job complete: $(date -u) ==="
# Clean up llama-server
kill $LLAMA_PID 2>/dev/null || true
wait $LLAMA_PID 2>/dev/null || true

Xet Storage Details

Size:
10.1 kB
·
Xet hash:
60847ed0963cf01149d8873e9a3cd127f60051c051cc37e1e7efe776d5449bf4

Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.