| #!/bin/bash |
|
|
| |
| if [ ! -f "$HOME/.config/llama/llama-prompt.conf" ]; then |
| mkdir -p "$HOME/.config/llama" |
| cat <<EOF > "$HOME/.config/llama/llama-prompt.conf" |
| LLAMA_MODEL_NAME=$HOME/.ai/models/llama/teknium/OpenHermes-2.5-Mistral-7B/openhermes-2.5-mistral-7b-f16.gguf |
| LLAMA_CONTEXT_SIZE=8192 |
| LLAMA_TEMPERATURE=1.0 |
| LLAMA_TOP_P=1.0 |
| LLAMA_MIN_P=0.3 |
| LLAMA_TOP_K=0 |
| EOF |
| fi |
|
|
| source "$HOME/.config/llama/llama-prompt.conf" |
|
|
| |
| if [ -n "$2" ]; then |
| LLAMA_MODEL_NAME="$2" |
| fi |
|
|
| |
| if [ -n "$3" ]; then |
| LLAMA_CONTEXT_SIZE="$3" |
| fi |
|
|
| |
| if [ -z "$1" ]; then |
| echo "Usage: llama-prompt.sh <prompt> [model] [context_size]" |
| exit 1 |
| fi |
|
|
| llama \ |
| --file "$1" \ |
| --model "$LLAMA_MODEL_NAME" \ |
| --ctx-size "$LLAMA_CONTEXT_SIZE" \ |
| --temp "$LLAMA_TEMPERATURE" \ |
| --min-p "$LLAMA_MIN_P" \ |
| --top-p "$LLAMA_TOP_P" \ |
| --top-k "$LLAMA_TOP_K" \ |
| --min-p "$LLAMA_MIN_P" \ |
| --threads 6 \ |
| --log-disable 2> /dev/null | fmt -w 80 |
|
|