Buckets:
| set -euo pipefail | |
| # ── Quant name (set via env var, default to Q3_K_M) ───────────────────────── | |
| QUANT_NAME="${QUANT_NAME:-Q3_K_M}" | |
| # ── Logging ────────────────────────────────────────────────────────────────── | |
| # Use /tmp for local logs, sync to /output/{QUANT_NAME}/logs/ at the end | |
| LOG_DIR="/tmp/logs" | |
| mkdir -p "$LOG_DIR" | |
| exec > >(tee -a "$LOG_DIR/job.log") 2>&1 | |
| log() { echo "[$(date -u +%H:%M:%S)] $*"; } | |
| die() { log "FATAL: $*"; sync_logs; exit 1; } | |
| sync_logs() { | |
| log "Syncing logs to /output/${QUANT_NAME}/logs/..." | |
| mkdir -p "/output/${QUANT_NAME}/logs" 2>/dev/null || true | |
| cp -r "$LOG_DIR"/* "/output/${QUANT_NAME}/logs/" 2>/dev/null || log "Warning: could not copy logs to /output" | |
| } | |
| log "=== Job started: $(date -u) ===" | |
| log "ACCELERATOR=${ACCELERATOR:-unknown} | CPU_CORES=${CPU_CORES:-?} | MEMORY=${MEMORY:-?}" | |
| # ── Verify model volume ───────────────────────────────────────────────────── | |
| # MODEL_FILE env var overrides the default filename pattern | |
| MODEL_PATH="/model/${MODEL_FILE:-Qwen3.5-27B-${QUANT_NAME}.gguf}" | |
| log "Checking model volume..." | |
| log "Looking for: $MODEL_PATH" | |
| ls -lh "$MODEL_PATH" 2>/dev/null || { log "File not found, listing /model/:"; ls -lh /model/*.gguf 2>/dev/null | head -5; } | |
| [[ -f "$MODEL_PATH" ]] || die "Model file not found at $MODEL_PATH" | |
| log "Model file found: $(ls -lh "$MODEL_PATH" | awk '{print $5}')" | |
| # ── Start llama-server ─────────────────────────────────────────────────────── | |
| log "Starting llama-server..." | |
| export LD_LIBRARY_PATH="/app:${LD_LIBRARY_PATH:-}" | |
| /app/llama-server \ | |
| --model "$MODEL_PATH" \ | |
| --host 127.0.0.1 \ | |
| --port 8080 \ | |
| --ctx-size 32768 \ | |
| --gpu-layers 999 \ | |
| --threads 8 \ | |
| --batch-size 512 \ | |
| --ubatch-size 512 \ | |
| --parallel 1 \ | |
| --cont-batching \ | |
| --flash-attn on \ | |
| --cache-type-k q8_0 \ | |
| --cache-type-v q8_0 \ | |
| --chat-template-kwargs '{"enable_thinking":true}' \ | |
| > "$LOG_DIR/llama-server.log" 2>&1 & | |
| LLAMA_PID=$! | |
| log "llama-server PID: $LLAMA_PID" | |
| # ── Install Node.js + pi while llama-server loads ─────────────────────────── | |
| log "Installing Node.js 22 (while model loads in background)..." | |
| apt-get update -qq 2>&1 | tail -3 | |
| apt-get install -y -qq curl git ca-certificates gnupg 2>&1 | tail -3 | |
| curl -fsSL https://deb.nodesource.com/setup_22.x | bash - 2>&1 | tail -3 | |
| apt-get install -y -qq nodejs 2>&1 | tail -3 | |
| log "Node.js $(node --version) installed, npm $(npm --version)" | |
| log "Installing pi coding agent..." | |
| npm install -g @mariozechner/pi-coding-agent 2>&1 | tail -5 | |
| log "pi installed: $(pi --version 2>&1 || echo 'version check done')" | |
| # ── Health check ───────────────────────────────────────────────────────────── | |
| log "Waiting for llama-server to be ready..." | |
| MAX_WAIT=300 | |
| ELAPSED=0 | |
| while true; do | |
| if curl -sf http://127.0.0.1:8080/health 2>/dev/null | grep -q '"status"'; then | |
| break | |
| fi | |
| sleep 3 | |
| ELAPSED=$((ELAPSED + 3)) | |
| if [[ $ELAPSED -ge $MAX_WAIT ]]; then | |
| log "llama-server failed to start within ${MAX_WAIT}s. Last logs:" | |
| tail -30 "$LOG_DIR/llama-server.log" || true | |
| die "Health check timeout" | |
| fi | |
| kill -0 $LLAMA_PID 2>/dev/null || { | |
| log "llama-server died. Last logs:" | |
| tail -30 "$LOG_DIR/llama-server.log" || true | |
| die "llama-server process exited" | |
| } | |
| [[ $((ELAPSED % 15)) -eq 0 ]] && log " ...still waiting (${ELAPSED}s elapsed)" | |
| done | |
| log "llama-server is ready!" | |
| # Quick sanity check | |
| log "Testing inference..." | |
| SANITY=$(curl -sf http://127.0.0.1:8080/v1/chat/completions \ | |
| -H 'Content-Type: application/json' \ | |
| -d '{"model":"test","messages":[{"role":"user","content":"Say hello in one word."}],"max_tokens":128}' 2>/dev/null || echo "FAILED") | |
| echo "$SANITY" | head -c 500 > "$LOG_DIR/sanity-check.json" | |
| log "Sanity check response saved" | |
| # ── Configure pi ───────────────────────────────────────────────────────────── | |
| log "Writing pi configuration..." | |
| mkdir -p ~/.pi/agent | |
| cat > ~/.pi/agent/models.json << EOF | |
| { | |
| "providers": { | |
| "llamacpp-local": { | |
| "baseUrl": "http://127.0.0.1:8080/v1", | |
| "api": "openai-completions", | |
| "apiKey": "none", | |
| "compat": { | |
| "supportsDeveloperRole": false, | |
| "supportsReasoningEffort": false, | |
| "supportsUsageInStreaming": false, | |
| "supportsStrictMode": false, | |
| "thinkingFormat": "qwen-chat-template" | |
| }, | |
| "models": [ | |
| { | |
| "id": "Qwen3.5-27B-${QUANT_NAME}", | |
| "name": "Qwen3.5-27B-${QUANT_NAME}", | |
| "reasoning": true, | |
| "input": ["text"], | |
| "contextWindow": 32768, | |
| "maxTokens": 8192, | |
| "cost": { "input": 0, "output": 0, "cacheRead": 0, "cacheWrite": 0 } | |
| } | |
| ] | |
| } | |
| } | |
| } | |
| EOF | |
| cat > ~/.pi/agent/settings.json << EOF | |
| { | |
| "defaultProvider": "llamacpp-local", | |
| "defaultModel": "Qwen3.5-27B-${QUANT_NAME}", | |
| "defaultThinkingLevel": "medium", | |
| "hideThinkingBlock": false | |
| } | |
| EOF | |
| log "pi config written" | |
| # ── Clone test repo ────────────────────────────────────────────────────────── | |
| WORKDIR="/workspace" | |
| mkdir -p "$WORKDIR" | |
| log "Cloning test repository..." | |
| git clone --depth 1 https://github.com/sindresorhus/slugify "$WORKDIR/slugify" | |
| cd "$WORKDIR/slugify" | |
| log "Repo cloned: $(git log --oneline -1)" | |
| log "Installing repo dependencies..." | |
| npm install 2>&1 | tail -5 | |
| log "Dependencies installed" | |
| # ── Run pi agentic task ────────────────────────────────────────────────────── | |
| TASK=$(cat << 'TASK_EOF' | |
| You are working in a Node.js project called @sindresorhus/slugify — a string slugification library. | |
| Your task: Add a new `--interactive` mode to the CLI (cli.js) that reads lines from stdin, slugifies each line, and prints the result. Requirements: | |
| 1. First, read and understand the existing codebase — look at the main module (index.js), the CLI (cli.js), and the test files. | |
| 2. Modify cli.js to accept a `--interactive` / `-i` flag. When set, the CLI should: | |
| - Read lines from stdin (one string per line) | |
| - Slugify each line using the library | |
| - Print each slugified result to stdout | |
| - Exit cleanly when stdin closes (EOF) | |
| 3. The existing CLI behavior (passing a string as an argument) must continue to work unchanged. | |
| 4. Add tests for the new interactive mode in the test file. The tests should: | |
| - Test that piping multiple lines produces correct slugified output | |
| - Test that existing argument-based usage still works | |
| 5. Run the existing test suite with `npm test` to make sure nothing is broken. | |
| 6. If tests fail, debug and fix until they pass. | |
| Report what you did and whether tests pass. | |
| TASK_EOF | |
| ) | |
| log "Starting pi coding task..." | |
| log "Task: Add --interactive stdin mode to slugify CLI" | |
| pi \ | |
| --print \ | |
| --no-session \ | |
| --provider llamacpp-local \ | |
| --model "Qwen3.5-27B-${QUANT_NAME}" \ | |
| --thinking medium \ | |
| "$TASK" \ | |
| > "$LOG_DIR/pi-output.txt" 2>&1 || true | |
| PI_EXIT=${PIPESTATUS[0]:-$?} | |
| log "pi exited with code: $PI_EXIT" | |
| # ── Capture results ────────────────────────────────────────────────────────── | |
| log "Collecting results..." | |
| cd "$WORKDIR/slugify" | |
| git diff > "$LOG_DIR/pi-changes.patch" 2>/dev/null || true | |
| git diff --stat > "$LOG_DIR/pi-changes-stat.txt" 2>/dev/null || true | |
| git status > "$LOG_DIR/git-status.txt" 2>/dev/null || true | |
| # Try running tests one final time to get clean output | |
| npm test > "$LOG_DIR/test-output.txt" 2>&1 || true | |
| # ── Summary ────────────────────────────────────────────────────────────────── | |
| log "=== RESULTS SUMMARY ===" | |
| log "Pi exit code: $PI_EXIT" | |
| log "" | |
| log "--- Files changed ---" | |
| cat "$LOG_DIR/pi-changes-stat.txt" 2>/dev/null || log "(no changes)" | |
| log "" | |
| log "--- Test results ---" | |
| tail -20 "$LOG_DIR/test-output.txt" 2>/dev/null || log "(no test output)" | |
| log "" | |
| log "--- Log files ---" | |
| ls -lh "$LOG_DIR/" | |
| log "" | |
| # ── Sync logs to output bucket ────────────────────────────────────────────── | |
| sync_logs | |
| log "=== Job complete: $(date -u) ===" | |
| # Clean up llama-server | |
| kill $LLAMA_PID 2>/dev/null || true | |
| wait $LLAMA_PID 2>/dev/null || true | |
Xet Storage Details
- Size:
- 9.2 kB
- Xet hash:
- e66e7d20d0d00279fc209e8c42e605b4400d076b2af1c0e0fb7583f7bda492fc
·
Xet efficiently stores files, intelligently splitting them into unique chunks and accelerating uploads and downloads. More info.