Spaces:
Sleeping
Sleeping
Upload folder using huggingface_hub
Browse files- .gitattributes +36 -21
- .github/workflows/sync_to_hf.yml +19 -0
- .hfignore +9 -0
- Dockerfile +82 -25
- README.md +6 -20
- api_app.py +123 -0
- build_logs.txt +3 -0
- check_secrets.py +35 -0
- full_build_logs.txt +5 -0
- initialize.py +166 -0
- knowledge/custom/TED Podcasts.pdf +3 -0
- models.py +632 -0
- preload.py +58 -0
- python/api/chat.py +88 -0
- python/api/csrf_token.py +23 -0
- python/api/docs.py +82 -0
- python/api/get.py +23 -0
- python/api/health.py +17 -0
- python/api/set.py +52 -0
- python/api/stream.py +111 -0
- python/extensions/reasoning_stream/_30_api_stream.py +3 -0
- python/extensions/response_stream/_30_api_stream.py +3 -0
- python/extensions/system_prompt/_10_system_prompt.py +43 -0
- python/helpers/csrf.py +28 -0
- python/helpers/mcp_server.py +433 -0
- python/helpers/searxng.py +36 -0
- python/helpers/settings.py +1623 -0
- python/tools/search_engine.py +81 -0
- requirements.txt +44 -0
- run_ui.py +322 -0
- searxng/settings.yml +0 -0
- start.sh +11 -0
- test_api.py +68 -0
- test_deployed_app.py +18 -0
- webui/index.html +0 -0
- webui/js/api.js +90 -0
- webui/js/index.js +1275 -0
- whisper.py +5 -0
.gitattributes
CHANGED
|
@@ -1,21 +1,36 @@
|
|
| 1 |
-
*
|
| 2 |
-
|
| 3 |
-
|
| 4 |
-
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 36 |
+
knowledge/custom/TED[[:space:]]Podcasts.pdf filter=lfs diff=lfs merge=lfs -text
|
.github/workflows/sync_to_hf.yml
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
name: Sync to Hugging Face hub
|
| 2 |
+
on:
|
| 3 |
+
push:
|
| 4 |
+
branches: [main]
|
| 5 |
+
workflow_dispatch:
|
| 6 |
+
|
| 7 |
+
jobs:
|
| 8 |
+
sync-to-hub:
|
| 9 |
+
runs-on: ubuntu-latest
|
| 10 |
+
steps:
|
| 11 |
+
- uses: actions/checkout@v3
|
| 12 |
+
with:
|
| 13 |
+
fetch-depth: 0
|
| 14 |
+
lfs: true
|
| 15 |
+
- name: Push to hub
|
| 16 |
+
env:
|
| 17 |
+
HF_TOKEN: ${{ secrets.HF_TOKEN }}
|
| 18 |
+
run: |
|
| 19 |
+
git push --force https://AUXteam:$HF_TOKEN@huggingface.co/spaces/AUXteam/Agent-skillset HEAD:main
|
.hfignore
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git/
|
| 2 |
+
.github/
|
| 3 |
+
agent-zero-repo/
|
| 4 |
+
knowledge/default/
|
| 5 |
+
knowledge/custom/*.pdf
|
| 6 |
+
tmp/
|
| 7 |
+
*.log
|
| 8 |
+
__pycache__/
|
| 9 |
+
.env
|
Dockerfile
CHANGED
|
@@ -1,39 +1,96 @@
|
|
| 1 |
-
|
|
|
|
| 2 |
|
| 3 |
-
|
| 4 |
-
PYTHONDONTWRITEBYTECODE=1 \
|
| 5 |
-
DEBIAN_FRONTEND=noninteractive \
|
| 6 |
-
HOME=/home/user \
|
| 7 |
-
APP_HOME=/home/user/app
|
| 8 |
|
| 9 |
-
#
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
| 11 |
git \
|
| 12 |
curl \
|
|
|
|
|
|
|
|
|
|
| 13 |
build-essential \
|
| 14 |
-
|
| 15 |
-
libssl-dev \
|
| 16 |
-
pkg-config \
|
| 17 |
-
python3-dev \
|
| 18 |
-
gfortran \
|
| 19 |
-
libopenblas-dev \
|
| 20 |
-
liblapack-dev \
|
| 21 |
-
cmake \
|
| 22 |
-
ninja-build \
|
| 23 |
-
libgomp1 \
|
| 24 |
-
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
| 25 |
|
|
|
|
| 26 |
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
| 27 |
ENV PATH="/root/.local/bin:$PATH"
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
|
|
|
|
| 34 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
USER user
|
| 36 |
-
RUN chmod +x /home/user/app/start_hf.sh
|
| 37 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 38 |
EXPOSE 7860
|
| 39 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
# Use Python 3.11 image for better compatibility
|
| 2 |
+
FROM python:3.11-slim-bookworm
|
| 3 |
|
| 4 |
+
LABEL description="Dockerfile for Agent-Zero on Hugging Face Spaces"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
+
# Avoid prompts during package installation
|
| 7 |
+
ENV DEBIAN_FRONTEND=noninteractive
|
| 8 |
+
|
| 9 |
+
# Install system dependencies
|
| 10 |
+
RUN apt-get update && apt-get install -y \
|
| 11 |
git \
|
| 12 |
curl \
|
| 13 |
+
openssl \
|
| 14 |
+
procps \
|
| 15 |
+
zstd \
|
| 16 |
build-essential \
|
| 17 |
+
&& rm -rf /var/lib/apt/lists/*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
+
# Install uv
|
| 20 |
RUN curl -LsSf https://astral.sh/uv/install.sh | sh
|
| 21 |
ENV PATH="/root/.local/bin:$PATH"
|
| 22 |
|
| 23 |
+
# Use the official Ollama installation script
|
| 24 |
+
RUN curl -fsSL https://ollama.com/install.sh | sh
|
| 25 |
+
|
| 26 |
+
# Clone the agent-zero repository
|
| 27 |
+
RUN git clone --branch fix-initialize-mcp-nameerror https://github.com/JsonLord/agent-zero.git /app
|
| 28 |
+
|
| 29 |
+
# Copy the local files to overwrite or add to the repository
|
| 30 |
+
COPY requirements.txt /app/requirements.txt
|
| 31 |
+
COPY run_ui.py /app/run_ui.py
|
| 32 |
+
COPY models.py /app/models.py
|
| 33 |
+
COPY whisper.py /app/python/helpers/whisper.py
|
| 34 |
+
COPY webui/js/api.js /app/webui/js/api.js
|
| 35 |
+
COPY webui/index.html /app/webui/index.html
|
| 36 |
+
COPY webui/js/index.js /app/webui/js/index.js
|
| 37 |
+
COPY preload.py /app/preload.py
|
| 38 |
+
COPY python/extensions/system_prompt/_10_system_prompt.py /app/python/extensions/system_prompt/_10_system_prompt.py
|
| 39 |
+
COPY python/helpers/searxng.py /app/python/helpers/searxng.py
|
| 40 |
+
COPY python/helpers/settings.py /app/python/helpers/settings.py
|
| 41 |
+
COPY python/helpers/csrf.py /app/python/helpers/csrf.py
|
| 42 |
+
COPY python/api/csrf_token.py /app/python/api/csrf_token.py
|
| 43 |
+
COPY start.sh /app/start.sh
|
| 44 |
+
COPY python/tools/search_engine.py /app/python/tools/search_engine.py
|
| 45 |
+
COPY initialize.py /app/initialize.py
|
| 46 |
+
|
| 47 |
+
# New API handlers
|
| 48 |
+
COPY python/api/health.py /app/python/api/health.py
|
| 49 |
+
COPY python/api/chat.py /app/python/api/chat.py
|
| 50 |
+
COPY python/api/stream.py /app/python/api/stream.py
|
| 51 |
+
COPY python/api/set.py /app/python/api/set.py
|
| 52 |
+
COPY python/api/get.py /app/python/api/get.py
|
| 53 |
+
COPY python/api/docs.py /app/python/api/docs.py
|
| 54 |
+
|
| 55 |
+
# New extensions
|
| 56 |
+
COPY python/extensions/response_stream/_30_api_stream.py /app/python/extensions/response_stream/_30_api_stream.py
|
| 57 |
+
COPY python/extensions/reasoning_stream/_30_api_stream.py /app/python/extensions/reasoning_stream/_30_api_stream.py
|
| 58 |
+
|
| 59 |
+
# Set the working directory for the next steps
|
| 60 |
+
WORKDIR /app
|
| 61 |
+
|
| 62 |
+
# --- DEFINITIVE FIX: GENERATE KEY AT BUILD TIME ---
|
| 63 |
+
RUN echo "FLASK_SECRET_KEY=$(openssl rand -hex 32)" > .env
|
| 64 |
+
|
| 65 |
+
# Install Python dependencies from requirements.txt using uv
|
| 66 |
+
RUN uv pip install --system --no-cache -r requirements.txt
|
| 67 |
|
| 68 |
+
# Pre-download the required spaCy model during the build
|
| 69 |
+
RUN python -m spacy download en_core_web_sm
|
| 70 |
|
| 71 |
+
# Manually create the 'ollama' group
|
| 72 |
+
RUN groupadd -r ollama
|
| 73 |
+
|
| 74 |
+
# Create a non-root user for security
|
| 75 |
+
RUN useradd --create-home --shell /bin/bash user
|
| 76 |
+
|
| 77 |
+
# Add the user to the 'ollama' group so it can use the service
|
| 78 |
+
RUN usermod -aG ollama user
|
| 79 |
+
|
| 80 |
+
# Grant the non-root user ownership of the application directory
|
| 81 |
+
RUN chown -R user:user /app
|
| 82 |
+
|
| 83 |
+
# Make start.sh executable
|
| 84 |
+
RUN chmod +x /app/start.sh
|
| 85 |
+
|
| 86 |
+
# Switch to the non-root user
|
| 87 |
USER user
|
|
|
|
| 88 |
|
| 89 |
+
# Set the final working directory
|
| 90 |
+
WORKDIR /app
|
| 91 |
+
|
| 92 |
+
# Expose the application port (Hugging Face standard is 7860)
|
| 93 |
EXPOSE 7860
|
| 94 |
+
|
| 95 |
+
# Command to start the services
|
| 96 |
+
CMD ["/app/start.sh"]
|
README.md
CHANGED
|
@@ -1,26 +1,12 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
-
colorTo:
|
| 6 |
sdk: docker
|
| 7 |
app_port: 7860
|
| 8 |
pinned: false
|
|
|
|
| 9 |
---
|
| 10 |
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
This space runs a modified version of **Agent-Zero**, adapted for Hugging Face Spaces. It automatically pulls the latest version from the main branch on startup while maintaining custom adaptations for API accessibility and non-root environment compatibility.
|
| 14 |
-
|
| 15 |
-
## Key Adaptations
|
| 16 |
-
|
| 17 |
-
1. **Authentication & CSRF Bypass**: Security is adjusted for Hugging Face's environment to allow public API access via `https://Leon4gr45-openoperator.hf.space`.
|
| 18 |
-
2. **Health Check & API Documentation**: Mandatory endpoints `/health` and `/api-docs` are available at the root.
|
| 19 |
-
3. **Port Mapping**: The application is configured to listen on port 7860, as required by the Space.
|
| 20 |
-
4. **Runtime Logic**: The system automatically identifies the Dockerized environment and adjusts paths and settings accordingly.
|
| 21 |
-
|
| 22 |
-
## API Endpoints
|
| 23 |
-
|
| 24 |
-
- **Health**: `/health`
|
| 25 |
-
- **API Documentation**: `/api-docs`
|
| 26 |
-
- **Core API**: `/api/message` (requires JSON payload)
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Agent safe - internal data only
|
| 3 |
+
emoji: 😻
|
| 4 |
+
colorFrom: gray
|
| 5 |
+
colorTo: yellow
|
| 6 |
sdk: docker
|
| 7 |
app_port: 7860
|
| 8 |
pinned: false
|
| 9 |
+
short_description: 'First edits for agent-0 backend '
|
| 10 |
---
|
| 11 |
|
| 12 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
api_app.py
ADDED
|
@@ -0,0 +1,123 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import secrets
|
| 4 |
+
import hmac
|
| 5 |
+
import hashlib
|
| 6 |
+
import time
|
| 7 |
+
from fastapi import FastAPI, HTTPException, Request
|
| 8 |
+
from fastapi.responses import HTMLResponse, Response
|
| 9 |
+
from fastapi.staticfiles import StaticFiles
|
| 10 |
+
from pydantic import BaseModel
|
| 11 |
+
from typing import Optional, List
|
| 12 |
+
|
| 13 |
+
sys.path.append(os.path.dirname(os.path.abspath(__file__)))
|
| 14 |
+
|
| 15 |
+
from agent import AgentContext, AgentContextType, UserMessage
|
| 16 |
+
import initialize
|
| 17 |
+
from python.helpers import runtime, dotenv, files, git
|
| 18 |
+
from python.helpers.print_style import PrintStyle
|
| 19 |
+
|
| 20 |
+
app = FastAPI(title="Skilled-Agent API")
|
| 21 |
+
|
| 22 |
+
# CSRF Logic from run_ui.py
|
| 23 |
+
CSRF_SECRET = secrets.token_bytes(32)
|
| 24 |
+
TOKEN_TTL = 3600
|
| 25 |
+
|
| 26 |
+
def generate_csrf_token():
|
| 27 |
+
nonce = secrets.token_hex(16)
|
| 28 |
+
timestamp = str(int(time.time()))
|
| 29 |
+
data = f"{nonce}:{timestamp}"
|
| 30 |
+
sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 31 |
+
return f"{data}.{sig}"
|
| 32 |
+
|
| 33 |
+
class ChatRequest(BaseModel):
|
| 34 |
+
message: str
|
| 35 |
+
chat_id: Optional[str] = None
|
| 36 |
+
attachments: Optional[List[str]] = None
|
| 37 |
+
|
| 38 |
+
class ChatResponse(BaseModel):
|
| 39 |
+
response: str
|
| 40 |
+
chat_id: str
|
| 41 |
+
|
| 42 |
+
@app.on_event("startup")
|
| 43 |
+
async def startup_event():
|
| 44 |
+
PrintStyle().print("Initializing Skilled-Agent API...")
|
| 45 |
+
runtime.initialize()
|
| 46 |
+
dotenv.load_dotenv()
|
| 47 |
+
|
| 48 |
+
# Run migrations if necessary
|
| 49 |
+
if hasattr(initialize, "initialize_migration"):
|
| 50 |
+
initialize.initialize_migration()
|
| 51 |
+
|
| 52 |
+
# Initialize chats
|
| 53 |
+
init_chats = initialize.initialize_chats()
|
| 54 |
+
init_chats.result_sync()
|
| 55 |
+
|
| 56 |
+
# Initialize MCP
|
| 57 |
+
initialize.initialize_mcp()
|
| 58 |
+
|
| 59 |
+
# Start job loop
|
| 60 |
+
initialize.initialize_job_loop()
|
| 61 |
+
|
| 62 |
+
# Preload
|
| 63 |
+
initialize.initialize_preload()
|
| 64 |
+
|
| 65 |
+
PrintStyle().print("Skilled-Agent API started.")
|
| 66 |
+
|
| 67 |
+
@app.get("/", response_class=HTMLResponse)
|
| 68 |
+
async def serve_index():
|
| 69 |
+
PrintStyle().print("Serving index.html")
|
| 70 |
+
gitinfo = None
|
| 71 |
+
try:
|
| 72 |
+
gitinfo = git.get_git_info()
|
| 73 |
+
except Exception as e:
|
| 74 |
+
gitinfo = {"version": "unknown", "commit_time": "unknown"}
|
| 75 |
+
|
| 76 |
+
index_content = files.read_file("webui/index.html")
|
| 77 |
+
index_content = files.replace_placeholders_text(
|
| 78 |
+
_content=index_content,
|
| 79 |
+
version_no=gitinfo["version"],
|
| 80 |
+
version_time=gitinfo["commit_time"]
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
csrf_token = generate_csrf_token()
|
| 84 |
+
runtime_id = runtime.get_runtime_id()
|
| 85 |
+
meta_tags = f'''<meta name="csrf-token" content="{csrf_token}">
|
| 86 |
+
<meta name="runtime-id" content="{runtime_id}">'''
|
| 87 |
+
index_content = index_content.replace("</head>", f"{meta_tags}</head>")
|
| 88 |
+
return index_content
|
| 89 |
+
|
| 90 |
+
@app.post("/chat", response_model=ChatResponse)
|
| 91 |
+
async def chat(request: ChatRequest):
|
| 92 |
+
context = None
|
| 93 |
+
if request.chat_id:
|
| 94 |
+
context = AgentContext.get(request.chat_id)
|
| 95 |
+
if not context:
|
| 96 |
+
raise HTTPException(status_code=404, detail=f"Chat session {request.chat_id} not found")
|
| 97 |
+
else:
|
| 98 |
+
config = initialize.initialize_agent()
|
| 99 |
+
context = AgentContext(config=config, type=AgentContextType.BACKGROUND)
|
| 100 |
+
|
| 101 |
+
if not request.message:
|
| 102 |
+
raise HTTPException(status_code=400, detail="Message is required")
|
| 103 |
+
|
| 104 |
+
try:
|
| 105 |
+
PrintStyle().print(f"Processing message for chat {context.id}...")
|
| 106 |
+
task = context.communicate(
|
| 107 |
+
UserMessage(
|
| 108 |
+
message=request.message,
|
| 109 |
+
attachments=request.attachments or []
|
| 110 |
+
)
|
| 111 |
+
)
|
| 112 |
+
result = await task.result()
|
| 113 |
+
return ChatResponse(response=result, chat_id=context.id)
|
| 114 |
+
except Exception as e:
|
| 115 |
+
PrintStyle().error(f"Error in chat: {e}")
|
| 116 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 117 |
+
|
| 118 |
+
@app.get("/health")
|
| 119 |
+
async def health():
|
| 120 |
+
return {"status": "healthy"}
|
| 121 |
+
|
| 122 |
+
# Mount static files
|
| 123 |
+
app.mount("/", StaticFiles(directory="webui"), name="static")
|
build_logs.txt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
|
| 1 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0
|
| 2 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0
|
| 3 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:03 --:--:-- 0
|
| 4 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:04 --:--:-- 0
|
| 5 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:05 --:--:-- 0
|
| 6 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:06 --:--:-- 0
|
| 7 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:07 --:--:-- 0
|
| 8 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:08 --:--:-- 0
|
| 9 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:09 --:--:-- 0
|
|
|
|
| 1 |
+
% Total % Received % Xferd Average Speed Time Time Time Current
|
| 2 |
+
Dload Upload Total Spent Left Speed
|
| 3 |
+
|
| 4 |
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
|
| 5 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0
|
| 6 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0
|
| 7 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:03 --:--:-- 0
|
| 8 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:04 --:--:-- 0
|
| 9 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:05 --:--:-- 0
|
| 10 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:06 --:--:-- 0
|
| 11 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:07 --:--:-- 0
|
| 12 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:08 --:--:-- 0
|
| 13 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:09 --:--:-- 0
|
check_secrets.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
# List of secrets that are commonly used by Agent-Zero and integrated skills
|
| 4 |
+
COMMON_SECRETS = [
|
| 5 |
+
"ANTHROPIC_API_KEY",
|
| 6 |
+
"OPENAI_API_KEY",
|
| 7 |
+
"GITHUB_PERSONAL_ACCESS_TOKEN",
|
| 8 |
+
"HUGGINGFACE_API_KEY",
|
| 9 |
+
"GOOGLE_API_KEY",
|
| 10 |
+
"SERPAPI_API_KEY",
|
| 11 |
+
"SEARXNG_URL"
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
def check_secrets():
|
| 15 |
+
print("=== Skilled-Agent Secret Check ===")
|
| 16 |
+
missing = []
|
| 17 |
+
available = []
|
| 18 |
+
|
| 19 |
+
for secret in COMMON_SECRETS:
|
| 20 |
+
if os.getenv(secret):
|
| 21 |
+
available.append(secret)
|
| 22 |
+
else:
|
| 23 |
+
missing.append(secret)
|
| 24 |
+
|
| 25 |
+
if available:
|
| 26 |
+
print(f"INFO: Available secrets in environment: {', '.join(available)}")
|
| 27 |
+
|
| 28 |
+
if missing:
|
| 29 |
+
print(f"WARNING: The following secrets are NOT set: {', '.join(missing)}")
|
| 30 |
+
print("Please configure them in your Hugging Face Space Settings -> Secrets if your tasks require them.")
|
| 31 |
+
|
| 32 |
+
print("==================================")
|
| 33 |
+
|
| 34 |
+
if __name__ == "__main__":
|
| 35 |
+
check_secrets()
|
full_build_logs.txt
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
| 0 |
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
|
| 1 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0
|
| 2 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0
|
| 3 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:03 --:--:-- 0
|
| 4 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:04 --:--:-- 0
|
| 5 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:05 --:--:-- 0
|
| 6 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:06 --:--:-- 0
|
| 7 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:07 --:--:-- 0
|
| 8 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:08 --:--:-- 0
|
| 9 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:09 --:--:-- 0
|
| 10 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:10 --:--:-- 0
|
| 11 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:11 --:--:-- 0
|
| 12 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:12 --:--:-- 0
|
| 13 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:13 --:--:-- 0
|
| 14 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:14 --:--:-- 0
|
| 15 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:15 --:--:-- 0
|
| 16 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:16 --:--:-- 0
|
| 17 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:17 --:--:-- 0
|
| 18 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:18 --:--:-- 0
|
| 19 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:19 --:--:-- 0
|
| 20 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:20 --:--:-- 0
|
| 21 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:21 --:--:-- 0
|
| 22 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:22 --:--:-- 0
|
| 23 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:23 --:--:-- 0
|
| 24 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:24 --:--:-- 0
|
| 25 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:25 --:--:-- 0
|
| 26 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:26 --:--:-- 0
|
| 27 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:27 --:--:-- 0
|
| 28 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:28 --:--:-- 0
|
| 29 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:29 --:--:-- 0: keep-alive
|
|
|
|
|
|
|
|
|
| 1 |
+
% Total % Received % Xferd Average Speed Time Time Time Current
|
| 2 |
+
Dload Upload Total Spent Left Speed
|
| 3 |
+
|
| 4 |
0 0 0 0 0 0 0 0 --:--:-- --:--:-- --:--:-- 0
|
| 5 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:01 --:--:-- 0
|
| 6 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:02 --:--:-- 0
|
| 7 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:03 --:--:-- 0
|
| 8 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:04 --:--:-- 0
|
| 9 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:05 --:--:-- 0
|
| 10 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:06 --:--:-- 0
|
| 11 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:07 --:--:-- 0
|
| 12 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:08 --:--:-- 0
|
| 13 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:09 --:--:-- 0
|
| 14 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:10 --:--:-- 0
|
| 15 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:11 --:--:-- 0
|
| 16 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:12 --:--:-- 0
|
| 17 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:13 --:--:-- 0
|
| 18 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:14 --:--:-- 0
|
| 19 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:15 --:--:-- 0
|
| 20 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:16 --:--:-- 0
|
| 21 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:17 --:--:-- 0
|
| 22 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:18 --:--:-- 0
|
| 23 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:19 --:--:-- 0
|
| 24 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:20 --:--:-- 0
|
| 25 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:21 --:--:-- 0
|
| 26 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:22 --:--:-- 0
|
| 27 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:23 --:--:-- 0
|
| 28 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:24 --:--:-- 0
|
| 29 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:25 --:--:-- 0
|
| 30 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:26 --:--:-- 0
|
| 31 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:27 --:--:-- 0
|
| 32 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:28 --:--:-- 0
|
| 33 |
0 0 0 0 0 0 0 0 --:--:-- 0:00:29 --:--:-- 0: keep-alive
|
| 34 |
+
|
| 35 |
+
|
initialize.py
ADDED
|
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent import AgentConfig
|
| 2 |
+
import models
|
| 3 |
+
from python.helpers import runtime, settings, defer
|
| 4 |
+
from python.helpers.print_style import PrintStyle
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
def initialize_agent():
|
| 8 |
+
current_settings = settings.get_settings()
|
| 9 |
+
|
| 10 |
+
def _normalize_model_kwargs(kwargs: dict) -> dict:
|
| 11 |
+
# convert string values that represent valid Python numbers to numeric types
|
| 12 |
+
result = {}
|
| 13 |
+
for key, value in kwargs.items():
|
| 14 |
+
if isinstance(value, str):
|
| 15 |
+
# try to convert string to number if it's a valid Python number
|
| 16 |
+
try:
|
| 17 |
+
# try int first, then float
|
| 18 |
+
result[key] = int(value)
|
| 19 |
+
except ValueError:
|
| 20 |
+
try:
|
| 21 |
+
result[key] = float(value)
|
| 22 |
+
except ValueError:
|
| 23 |
+
result[key] = value
|
| 24 |
+
else:
|
| 25 |
+
result[key] = value
|
| 26 |
+
return result
|
| 27 |
+
|
| 28 |
+
# chat model from user settings
|
| 29 |
+
chat_llm = models.ModelConfig(
|
| 30 |
+
type=models.ModelType.CHAT,
|
| 31 |
+
provider=current_settings["chat_model_provider"],
|
| 32 |
+
name=current_settings["chat_model_name"],
|
| 33 |
+
api_base=current_settings["chat_model_api_base"],
|
| 34 |
+
ctx_length=current_settings["chat_model_ctx_length"],
|
| 35 |
+
vision=current_settings["chat_model_vision"],
|
| 36 |
+
limit_requests=current_settings["chat_model_rl_requests"],
|
| 37 |
+
limit_input=current_settings["chat_model_rl_input"],
|
| 38 |
+
limit_output=current_settings["chat_model_rl_output"],
|
| 39 |
+
kwargs=_normalize_model_kwargs(current_settings["chat_model_kwargs"]),
|
| 40 |
+
)
|
| 41 |
+
|
| 42 |
+
# utility model from user settings
|
| 43 |
+
utility_llm = models.ModelConfig(
|
| 44 |
+
type=models.ModelType.CHAT,
|
| 45 |
+
provider=current_settings["util_model_provider"],
|
| 46 |
+
name=current_settings["util_model_name"],
|
| 47 |
+
api_base=current_settings["util_model_api_base"],
|
| 48 |
+
ctx_length=current_settings["util_model_ctx_length"],
|
| 49 |
+
limit_requests=current_settings["util_model_rl_requests"],
|
| 50 |
+
limit_input=current_settings["util_model_rl_input"],
|
| 51 |
+
limit_output=current_settings["util_model_rl_output"],
|
| 52 |
+
kwargs=_normalize_model_kwargs(current_settings["util_model_kwargs"]),
|
| 53 |
+
)
|
| 54 |
+
# embedding model from user settings
|
| 55 |
+
embedding_llm = models.ModelConfig(
|
| 56 |
+
type=models.ModelType.EMBEDDING,
|
| 57 |
+
provider=current_settings["embed_model_provider"],
|
| 58 |
+
name=current_settings["embed_model_name"],
|
| 59 |
+
api_base=current_settings["embed_model_api_base"],
|
| 60 |
+
limit_requests=current_settings["embed_model_rl_requests"],
|
| 61 |
+
kwargs=_normalize_model_kwargs(current_settings["embed_model_kwargs"]),
|
| 62 |
+
)
|
| 63 |
+
# browser model from user settings
|
| 64 |
+
browser_llm = models.ModelConfig(
|
| 65 |
+
type=models.ModelType.CHAT,
|
| 66 |
+
provider=current_settings["browser_model_provider"],
|
| 67 |
+
name=current_settings["browser_model_name"],
|
| 68 |
+
api_base=current_settings["browser_model_api_base"],
|
| 69 |
+
vision=current_settings["browser_model_vision"],
|
| 70 |
+
kwargs=_normalize_model_kwargs(current_settings["browser_model_kwargs"]),
|
| 71 |
+
)
|
| 72 |
+
# agent configuration
|
| 73 |
+
config = AgentConfig(
|
| 74 |
+
chat_model=chat_llm,
|
| 75 |
+
utility_model=utility_llm,
|
| 76 |
+
embeddings_model=embedding_llm,
|
| 77 |
+
browser_model=browser_llm,
|
| 78 |
+
profile=current_settings["agent_profile"],
|
| 79 |
+
memory_subdir=current_settings["agent_memory_subdir"],
|
| 80 |
+
knowledge_subdirs=[current_settings["agent_knowledge_subdir"], "default"],
|
| 81 |
+
mcp_servers=current_settings.get("mcp_servers"),
|
| 82 |
+
# code_exec params get initialized in _set_runtime_config
|
| 83 |
+
# additional = {},
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
# update SSH and docker settings
|
| 87 |
+
_set_runtime_config(config, current_settings)
|
| 88 |
+
|
| 89 |
+
# update config with runtime args
|
| 90 |
+
_args_override(config)
|
| 91 |
+
|
| 92 |
+
# initialize MCP in deferred task to prevent blocking the main thread
|
| 93 |
+
# async def initialize_mcp_async(mcp_servers_config: str):
|
| 94 |
+
# return initialize_mcp(mcp_servers_config)
|
| 95 |
+
# defer.DeferredTask(thread_name="mcp-initializer").start_task(initialize_mcp_async, config.mcp_servers)
|
| 96 |
+
# initialize_mcp(config.mcp_servers)
|
| 97 |
+
|
| 98 |
+
# import python.helpers.mcp_handler as mcp_helper
|
| 99 |
+
# import agent as agent_helper
|
| 100 |
+
# import python.helpers.print_style as print_style_helper
|
| 101 |
+
# if not mcp_helper.MCPConfig.get_instance().is_initialized():
|
| 102 |
+
# try:
|
| 103 |
+
# mcp_helper.MCPConfig.update(config.mcp_servers)
|
| 104 |
+
# except Exception as e:
|
| 105 |
+
# first_context = agent_helper.AgentContext.first()
|
| 106 |
+
# if first_context:
|
| 107 |
+
# (
|
| 108 |
+
# first_context.log
|
| 109 |
+
# .log(type="warning", content=f"Failed to update MCP settings: {e}", temp=False)
|
| 110 |
+
# )
|
| 111 |
+
# (
|
| 112 |
+
# print_style_helper.PrintStyle(background_color="black", font_color="red", padding=True)
|
| 113 |
+
# .print(f"Failed to update MCP settings: {e}")
|
| 114 |
+
# )
|
| 115 |
+
|
| 116 |
+
# return config object
|
| 117 |
+
return config
|
| 118 |
+
|
| 119 |
+
def initialize_chats():
|
| 120 |
+
from python.helpers import persist_chat
|
| 121 |
+
async def initialize_chats_async():
|
| 122 |
+
persist_chat.load_tmp_chats()
|
| 123 |
+
return defer.DeferredTask().start_task(initialize_chats_async)
|
| 124 |
+
|
| 125 |
+
def initialize_mcp():
|
| 126 |
+
set = settings.get_settings()
|
| 127 |
+
async def initialize_mcp_async():
|
| 128 |
+
from python.helpers.mcp_handler import initialize_mcp as _initialize_mcp
|
| 129 |
+
return _initialize_mcp(set["mcp_servers"])
|
| 130 |
+
return defer.DeferredTask().start_task(initialize_mcp_async)
|
| 131 |
+
|
| 132 |
+
def initialize_job_loop():
|
| 133 |
+
from python.helpers.job_loop import run_loop
|
| 134 |
+
return defer.DeferredTask("JobLoop").start_task(run_loop)
|
| 135 |
+
|
| 136 |
+
def initialize_preload():
|
| 137 |
+
import preload
|
| 138 |
+
return defer.DeferredTask().start_task(preload.preload)
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
def _args_override(config):
|
| 142 |
+
# update config with runtime args
|
| 143 |
+
for key, value in runtime.args.items():
|
| 144 |
+
if hasattr(config, key):
|
| 145 |
+
# conversion based on type of config[key]
|
| 146 |
+
if isinstance(getattr(config, key), bool):
|
| 147 |
+
value = value.lower().strip() == "true"
|
| 148 |
+
elif isinstance(getattr(config, key), int):
|
| 149 |
+
value = int(value)
|
| 150 |
+
elif isinstance(getattr(config, key), float):
|
| 151 |
+
value = float(value)
|
| 152 |
+
elif isinstance(getattr(config, key), str):
|
| 153 |
+
value = str(value)
|
| 154 |
+
else:
|
| 155 |
+
raise Exception(
|
| 156 |
+
f"Unsupported argument type of '{key}': {type(getattr(config, key))}"
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
setattr(config, key, value)
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _set_runtime_config(config: AgentConfig, set: settings.Settings):
|
| 163 |
+
ssh_conf = settings.get_runtime_config(set)
|
| 164 |
+
for key, value in ssh_conf.items():
|
| 165 |
+
if hasattr(config, key):
|
| 166 |
+
setattr(config, key, value)
|
knowledge/custom/TED Podcasts.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ffc5a579932dc6ea1e6c50b386f81c8fd0e9bd2cb4501a4a6856d11623a5bf11
|
| 3 |
+
size 237793
|
models.py
ADDED
|
@@ -0,0 +1,632 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from dataclasses import dataclass, field
|
| 2 |
+
from enum import Enum
|
| 3 |
+
import logging
|
| 4 |
+
import os
|
| 5 |
+
from typing import (
|
| 6 |
+
Any,
|
| 7 |
+
Awaitable,
|
| 8 |
+
Callable,
|
| 9 |
+
List,
|
| 10 |
+
Optional,
|
| 11 |
+
Iterator,
|
| 12 |
+
AsyncIterator,
|
| 13 |
+
Tuple,
|
| 14 |
+
TypedDict,
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
from litellm import completion, acompletion, embedding
|
| 18 |
+
import litellm
|
| 19 |
+
|
| 20 |
+
from python.helpers import dotenv
|
| 21 |
+
from python.helpers.dotenv import load_dotenv
|
| 22 |
+
from python.helpers.providers import get_provider_config
|
| 23 |
+
from python.helpers.rate_limiter import RateLimiter
|
| 24 |
+
from python.helpers.tokens import approximate_tokens
|
| 25 |
+
|
| 26 |
+
from langchain_core.language_models.chat_models import SimpleChatModel
|
| 27 |
+
from langchain_core.outputs.chat_generation import ChatGenerationChunk
|
| 28 |
+
from langchain_core.callbacks.manager import (
|
| 29 |
+
CallbackManagerForLLMRun,
|
| 30 |
+
AsyncCallbackManagerForLLMRun,
|
| 31 |
+
)
|
| 32 |
+
from langchain_core.messages import (
|
| 33 |
+
BaseMessage,
|
| 34 |
+
AIMessageChunk,
|
| 35 |
+
HumanMessage,
|
| 36 |
+
SystemMessage,
|
| 37 |
+
)
|
| 38 |
+
from langchain.embeddings.base import Embeddings
|
| 39 |
+
from sentence_transformers import SentenceTransformer
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# disable extra logging, must be done repeatedly, otherwise browser-use will turn it back on for some reason
|
| 43 |
+
def turn_off_logging():
|
| 44 |
+
os.environ["LITELLM_LOG"] = "ERROR" # only errors
|
| 45 |
+
litellm.suppress_debug_info = True
|
| 46 |
+
# Silence **all** LiteLLM sub-loggers (utils, cost_calculator…)
|
| 47 |
+
for name in logging.Logger.manager.loggerDict:
|
| 48 |
+
if name.lower().startswith("litellm"):
|
| 49 |
+
logging.getLogger(name).setLevel(logging.ERROR)
|
| 50 |
+
|
| 51 |
+
|
| 52 |
+
# init
|
| 53 |
+
load_dotenv()
|
| 54 |
+
turn_off_logging()
|
| 55 |
+
print("DEBUG: models.py loaded")
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class ModelType(Enum):
|
| 59 |
+
CHAT = "Chat"
|
| 60 |
+
EMBEDDING = "Embedding"
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
@dataclass
|
| 64 |
+
class ModelConfig:
|
| 65 |
+
type: ModelType
|
| 66 |
+
provider: str
|
| 67 |
+
name: str
|
| 68 |
+
api_base: str = ""
|
| 69 |
+
ctx_length: int = 0
|
| 70 |
+
limit_requests: int = 0
|
| 71 |
+
limit_input: int = 0
|
| 72 |
+
limit_output: int = 0
|
| 73 |
+
vision: bool = False
|
| 74 |
+
kwargs: dict = field(default_factory=dict)
|
| 75 |
+
|
| 76 |
+
def build_kwargs(self):
|
| 77 |
+
kwargs = self.kwargs.copy() or {}
|
| 78 |
+
if self.api_base and "api_base" not in kwargs:
|
| 79 |
+
kwargs["api_base"] = self.api_base
|
| 80 |
+
return kwargs
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class ChatChunk(TypedDict):
|
| 84 |
+
"""Simplified response chunk for chat models."""
|
| 85 |
+
|
| 86 |
+
response_delta: str
|
| 87 |
+
reasoning_delta: str
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
rate_limiters: dict[str, RateLimiter] = {}
|
| 91 |
+
api_keys_round_robin: dict[str, int] = {}
|
| 92 |
+
|
| 93 |
+
def get_api_key(service: str) -> str:
|
| 94 |
+
# get api key for the service
|
| 95 |
+
key = (
|
| 96 |
+
dotenv.get_dotenv_value(f"API_KEY_{service.upper()}")
|
| 97 |
+
or dotenv.get_dotenv_value(f"{service.upper()}_API_KEY")
|
| 98 |
+
or dotenv.get_dotenv_value(f"{service.upper()}_API_TOKEN")
|
| 99 |
+
or "None"
|
| 100 |
+
)
|
| 101 |
+
# if the key contains a comma, use round-robin
|
| 102 |
+
if "," in key:
|
| 103 |
+
api_keys = [k.strip() for k in key.split(",") if k.strip()]
|
| 104 |
+
api_keys_round_robin[service] = api_keys_round_robin.get(service, -1) + 1
|
| 105 |
+
key = api_keys[api_keys_round_robin[service] % len(api_keys)]
|
| 106 |
+
return key
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def get_rate_limiter(
|
| 110 |
+
provider: str, name: str, requests: int, input: int, output: int
|
| 111 |
+
) -> RateLimiter:
|
| 112 |
+
key = f"{provider}\\{name}"
|
| 113 |
+
rate_limiters[key] = limiter = rate_limiters.get(key, RateLimiter(seconds=60))
|
| 114 |
+
limiter.limits["requests"] = requests or 0
|
| 115 |
+
limiter.limits["input"] = input or 0
|
| 116 |
+
limiter.limits["output"] = output or 0
|
| 117 |
+
return limiter
|
| 118 |
+
|
| 119 |
+
async def apply_rate_limiter(model_config: ModelConfig|None, input_text: str, rate_limiter_callback: Callable[[str, str, int, int], Awaitable[bool]] | None = None):
|
| 120 |
+
if not model_config:
|
| 121 |
+
return
|
| 122 |
+
limiter = get_rate_limiter(
|
| 123 |
+
model_config.provider,
|
| 124 |
+
model_config.name,
|
| 125 |
+
model_config.limit_requests,
|
| 126 |
+
model_config.limit_input,
|
| 127 |
+
model_config.limit_output,
|
| 128 |
+
)
|
| 129 |
+
limiter.add(input=approximate_tokens(input_text))
|
| 130 |
+
limiter.add(requests=1)
|
| 131 |
+
await limiter.wait(rate_limiter_callback)
|
| 132 |
+
return limiter
|
| 133 |
+
|
| 134 |
+
def apply_rate_limiter_sync(model_config: ModelConfig|None, input_text: str, rate_limiter_callback: Callable[[str, str, int, int], Awaitable[bool]] | None = None):
|
| 135 |
+
if not model_config:
|
| 136 |
+
return
|
| 137 |
+
import asyncio, nest_asyncio
|
| 138 |
+
nest_asyncio.apply()
|
| 139 |
+
return asyncio.run(apply_rate_limiter(model_config, input_text, rate_limiter_callback))
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class LiteLLMChatWrapper(SimpleChatModel):
|
| 143 |
+
model_name: str
|
| 144 |
+
provider: str
|
| 145 |
+
kwargs: dict = {}
|
| 146 |
+
|
| 147 |
+
class Config:
|
| 148 |
+
arbitrary_types_allowed = True
|
| 149 |
+
extra = "allow" # Allow extra attributes
|
| 150 |
+
validate_assignment = False # Don't validate on assignment
|
| 151 |
+
|
| 152 |
+
def __init__(self, model: str, provider: str, model_config: Optional[ModelConfig] = None, **kwargs: Any):
|
| 153 |
+
model_value = f"{provider}/{model}"
|
| 154 |
+
super().__init__(model_name=model_value, provider=provider, kwargs=kwargs) # type: ignore
|
| 155 |
+
# Set A0 model config as instance attribute after parent init
|
| 156 |
+
self.a0_model_conf = model_config
|
| 157 |
+
|
| 158 |
+
@property
|
| 159 |
+
def _llm_type(self) -> str:
|
| 160 |
+
return "litellm-chat"
|
| 161 |
+
|
| 162 |
+
def _convert_messages(self, messages: List[BaseMessage]) -> List[dict]:
|
| 163 |
+
result = []
|
| 164 |
+
# Map LangChain message types to LiteLLM roles
|
| 165 |
+
role_mapping = {
|
| 166 |
+
"human": "user",
|
| 167 |
+
"ai": "assistant",
|
| 168 |
+
"system": "system",
|
| 169 |
+
"tool": "tool",
|
| 170 |
+
}
|
| 171 |
+
for m in messages:
|
| 172 |
+
role = role_mapping.get(m.type, m.type)
|
| 173 |
+
message_dict = {"role": role, "content": m.content}
|
| 174 |
+
|
| 175 |
+
# Handle tool calls for AI messages
|
| 176 |
+
tool_calls = getattr(m, "tool_calls", None)
|
| 177 |
+
if tool_calls:
|
| 178 |
+
# Convert LangChain tool calls to LiteLLM format
|
| 179 |
+
new_tool_calls = []
|
| 180 |
+
for tool_call in tool_calls:
|
| 181 |
+
# Ensure arguments is a JSON string
|
| 182 |
+
args = tool_call["args"]
|
| 183 |
+
if isinstance(args, dict):
|
| 184 |
+
import json
|
| 185 |
+
|
| 186 |
+
args_str = json.dumps(args)
|
| 187 |
+
else:
|
| 188 |
+
args_str = str(args)
|
| 189 |
+
|
| 190 |
+
new_tool_calls.append(
|
| 191 |
+
{
|
| 192 |
+
"id": tool_call.get("id", ""),
|
| 193 |
+
"type": "function",
|
| 194 |
+
"function": {
|
| 195 |
+
"name": tool_call["name"],
|
| 196 |
+
"arguments": args_str,
|
| 197 |
+
},
|
| 198 |
+
}
|
| 199 |
+
)
|
| 200 |
+
message_dict["tool_calls"] = new_tool_calls
|
| 201 |
+
|
| 202 |
+
# Handle tool call ID for ToolMessage
|
| 203 |
+
tool_call_id = getattr(m, "tool_call_id", None)
|
| 204 |
+
if tool_call_id:
|
| 205 |
+
message_dict["tool_call_id"] = tool_call_id
|
| 206 |
+
|
| 207 |
+
result.append(message_dict)
|
| 208 |
+
return result
|
| 209 |
+
|
| 210 |
+
def _call(
|
| 211 |
+
self,
|
| 212 |
+
messages: List[BaseMessage],
|
| 213 |
+
stop: Optional[List[str]] = None,
|
| 214 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
| 215 |
+
**kwargs: Any,
|
| 216 |
+
) -> str:
|
| 217 |
+
import asyncio
|
| 218 |
+
|
| 219 |
+
msgs = self._convert_messages(messages)
|
| 220 |
+
|
| 221 |
+
# Apply rate limiting if configured
|
| 222 |
+
apply_rate_limiter_sync(self.a0_model_conf, str(msgs))
|
| 223 |
+
|
| 224 |
+
# Call the model
|
| 225 |
+
resp = completion(
|
| 226 |
+
model=self.model_name, messages=msgs, stop=stop, **{**self.kwargs, **kwargs}
|
| 227 |
+
)
|
| 228 |
+
|
| 229 |
+
# Parse output
|
| 230 |
+
parsed = _parse_chunk(resp)
|
| 231 |
+
return parsed["response_delta"]
|
| 232 |
+
|
| 233 |
+
def _stream(
|
| 234 |
+
self,
|
| 235 |
+
messages: List[BaseMessage],
|
| 236 |
+
stop: Optional[List[str]] = None,
|
| 237 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
| 238 |
+
**kwargs: Any,
|
| 239 |
+
) -> Iterator[ChatGenerationChunk]:
|
| 240 |
+
import asyncio
|
| 241 |
+
|
| 242 |
+
msgs = self._convert_messages(messages)
|
| 243 |
+
|
| 244 |
+
# Apply rate limiting if configured
|
| 245 |
+
apply_rate_limiter_sync(self.a0_model_conf, str(msgs))
|
| 246 |
+
|
| 247 |
+
for chunk in completion(
|
| 248 |
+
model=self.model_name,
|
| 249 |
+
messages=msgs,
|
| 250 |
+
stream=True,
|
| 251 |
+
stop=stop,
|
| 252 |
+
**{**self.kwargs, **kwargs},
|
| 253 |
+
):
|
| 254 |
+
parsed = _parse_chunk(chunk)
|
| 255 |
+
# Only yield chunks with non-None content
|
| 256 |
+
if parsed["response_delta"]:
|
| 257 |
+
yield ChatGenerationChunk(
|
| 258 |
+
message=AIMessageChunk(content=parsed["response_delta"])
|
| 259 |
+
)
|
| 260 |
+
|
| 261 |
+
async def _astream(
|
| 262 |
+
self,
|
| 263 |
+
messages: List[BaseMessage],
|
| 264 |
+
stop: Optional[List[str]] = None,
|
| 265 |
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
| 266 |
+
**kwargs: Any,
|
| 267 |
+
) -> AsyncIterator[ChatGenerationChunk]:
|
| 268 |
+
msgs = self._convert_messages(messages)
|
| 269 |
+
|
| 270 |
+
# Apply rate limiting if configured
|
| 271 |
+
await apply_rate_limiter(self.a0_model_conf, str(msgs))
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
response = await acompletion(
|
| 275 |
+
model=self.model_name,
|
| 276 |
+
messages=msgs,
|
| 277 |
+
stream=True,
|
| 278 |
+
stop=stop,
|
| 279 |
+
**{**self.kwargs, **kwargs},
|
| 280 |
+
)
|
| 281 |
+
async for chunk in response: # type: ignore
|
| 282 |
+
parsed = _parse_chunk(chunk)
|
| 283 |
+
# Only yield chunks with non-None content
|
| 284 |
+
if parsed["response_delta"]:
|
| 285 |
+
yield ChatGenerationChunk(
|
| 286 |
+
message=AIMessageChunk(content=parsed["response_delta"])
|
| 287 |
+
)
|
| 288 |
+
|
| 289 |
+
async def unified_call(
|
| 290 |
+
self,
|
| 291 |
+
system_message="",
|
| 292 |
+
user_message="",
|
| 293 |
+
messages: List[BaseMessage] | None = None,
|
| 294 |
+
response_callback: Callable[[str, str], Awaitable[None]] | None = None,
|
| 295 |
+
reasoning_callback: Callable[[str, str], Awaitable[None]] | None = None,
|
| 296 |
+
tokens_callback: Callable[[str, int], Awaitable[None]] | None = None,
|
| 297 |
+
rate_limiter_callback: Callable[[str, str, int, int], Awaitable[bool]] | None = None,
|
| 298 |
+
**kwargs: Any,
|
| 299 |
+
) -> Tuple[str, str]:
|
| 300 |
+
|
| 301 |
+
turn_off_logging()
|
| 302 |
+
|
| 303 |
+
if not messages:
|
| 304 |
+
messages = []
|
| 305 |
+
# construct messages
|
| 306 |
+
if system_message:
|
| 307 |
+
messages.insert(0, SystemMessage(content=system_message))
|
| 308 |
+
if user_message:
|
| 309 |
+
messages.append(HumanMessage(content=user_message))
|
| 310 |
+
|
| 311 |
+
# convert to litellm format
|
| 312 |
+
msgs_conv = self._convert_messages(messages)
|
| 313 |
+
|
| 314 |
+
# Apply rate limiting if configured
|
| 315 |
+
limiter = await apply_rate_limiter(self.a0_model_conf, str(msgs_conv), rate_limiter_callback)
|
| 316 |
+
|
| 317 |
+
# call model
|
| 318 |
+
print(f"DEBUG: calling acompletion with model={self.model_name}")
|
| 319 |
+
_completion = await acompletion(
|
| 320 |
+
model=self.model_name,
|
| 321 |
+
messages=msgs_conv,
|
| 322 |
+
stream=True,
|
| 323 |
+
**{**self.kwargs, **kwargs},
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
# results
|
| 327 |
+
reasoning = ""
|
| 328 |
+
response = ""
|
| 329 |
+
|
| 330 |
+
# iterate over chunks
|
| 331 |
+
async for chunk in _completion: # type: ignore
|
| 332 |
+
parsed = _parse_chunk(chunk)
|
| 333 |
+
# collect reasoning delta and call callbacks
|
| 334 |
+
if parsed["reasoning_delta"]:
|
| 335 |
+
reasoning += parsed["reasoning_delta"]
|
| 336 |
+
if reasoning_callback:
|
| 337 |
+
await reasoning_callback(parsed["reasoning_delta"], reasoning)
|
| 338 |
+
if tokens_callback:
|
| 339 |
+
await tokens_callback(
|
| 340 |
+
parsed["reasoning_delta"],
|
| 341 |
+
approximate_tokens(parsed["reasoning_delta"]),
|
| 342 |
+
)
|
| 343 |
+
# Add output tokens to rate limiter if configured
|
| 344 |
+
if limiter:
|
| 345 |
+
limiter.add(output=approximate_tokens(parsed["reasoning_delta"]))
|
| 346 |
+
# collect response delta and call callbacks
|
| 347 |
+
if parsed["response_delta"]:
|
| 348 |
+
response += parsed["response_delta"]
|
| 349 |
+
if response_callback:
|
| 350 |
+
await response_callback(parsed["response_delta"], response)
|
| 351 |
+
if tokens_callback:
|
| 352 |
+
await tokens_callback(
|
| 353 |
+
parsed["response_delta"],
|
| 354 |
+
approximate_tokens(parsed["response_delta"]),
|
| 355 |
+
)
|
| 356 |
+
# Add output tokens to rate limiter if configured
|
| 357 |
+
if limiter:
|
| 358 |
+
limiter.add(output=approximate_tokens(parsed["response_delta"]))
|
| 359 |
+
|
| 360 |
+
# return complete results
|
| 361 |
+
return response, reasoning
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
class BrowserCompatibleChatWrapper(LiteLLMChatWrapper):
|
| 365 |
+
"""
|
| 366 |
+
A wrapper for browser agent that can filter/sanitize messages
|
| 367 |
+
before sending them to the LLM.
|
| 368 |
+
"""
|
| 369 |
+
|
| 370 |
+
def __init__(self, *args, **kwargs):
|
| 371 |
+
turn_off_logging()
|
| 372 |
+
super().__init__(*args, **kwargs)
|
| 373 |
+
# Browser-use may expect a 'model' attribute
|
| 374 |
+
self.model = self.model_name
|
| 375 |
+
|
| 376 |
+
def _call(
|
| 377 |
+
self,
|
| 378 |
+
messages: List[BaseMessage],
|
| 379 |
+
stop: Optional[List[str]] = None,
|
| 380 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
| 381 |
+
**kwargs: Any,
|
| 382 |
+
) -> str:
|
| 383 |
+
turn_off_logging()
|
| 384 |
+
result = super()._call(messages, stop, run_manager, **kwargs)
|
| 385 |
+
return result
|
| 386 |
+
|
| 387 |
+
async def _astream(
|
| 388 |
+
self,
|
| 389 |
+
messages: List[BaseMessage],
|
| 390 |
+
stop: Optional[List[str]] = None,
|
| 391 |
+
run_manager: Optional[AsyncCallbackManagerForLLMRun] = None,
|
| 392 |
+
**kwargs: Any,
|
| 393 |
+
) -> AsyncIterator[ChatGenerationChunk]:
|
| 394 |
+
turn_off_logging()
|
| 395 |
+
async for chunk in super()._astream(messages, stop, run_manager, **kwargs):
|
| 396 |
+
yield chunk
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
class LiteLLMEmbeddingWrapper(Embeddings):
|
| 400 |
+
model_name: str
|
| 401 |
+
kwargs: dict = {}
|
| 402 |
+
a0_model_conf: Optional[ModelConfig] = None
|
| 403 |
+
|
| 404 |
+
def __init__(self, model: str, provider: str, model_config: Optional[ModelConfig] = None, **kwargs: Any):
|
| 405 |
+
self.model_name = f"{provider}/{model}" if provider != "openai" else model
|
| 406 |
+
self.kwargs = kwargs
|
| 407 |
+
self.a0_model_conf = model_config
|
| 408 |
+
|
| 409 |
+
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
| 410 |
+
# Apply rate limiting if configured
|
| 411 |
+
apply_rate_limiter_sync(self.a0_model_conf, " ".join(texts))
|
| 412 |
+
|
| 413 |
+
resp = embedding(model=self.model_name, input=texts, **self.kwargs)
|
| 414 |
+
return [
|
| 415 |
+
item.get("embedding") if isinstance(item, dict) else item.embedding # type: ignore
|
| 416 |
+
for item in resp.data # type: ignore
|
| 417 |
+
]
|
| 418 |
+
|
| 419 |
+
def embed_query(self, text: str) -> List[float]:
|
| 420 |
+
# Apply rate limiting if configured
|
| 421 |
+
apply_rate_limiter_sync(self.a0_model_conf, text)
|
| 422 |
+
|
| 423 |
+
resp = embedding(model=self.model_name, input=[text], **self.kwargs)
|
| 424 |
+
item = resp.data[0] # type: ignore
|
| 425 |
+
return item.get("embedding") if isinstance(item, dict) else item.embedding # type: ignore
|
| 426 |
+
|
| 427 |
+
|
| 428 |
+
class LocalSentenceTransformerWrapper(Embeddings):
|
| 429 |
+
"""Local wrapper for sentence-transformers models to avoid HuggingFace API calls"""
|
| 430 |
+
|
| 431 |
+
def __init__(self, provider: str, model: str, model_config: Optional[ModelConfig] = None, **kwargs: Any):
|
| 432 |
+
# Clean common user-input mistakes
|
| 433 |
+
model = model.strip().strip('"').strip("'")
|
| 434 |
+
|
| 435 |
+
# Remove the "sentence-transformers/" prefix if present
|
| 436 |
+
if model.startswith("sentence-transformers/"):
|
| 437 |
+
model = model[len("sentence-transformers/") :]
|
| 438 |
+
|
| 439 |
+
self.model = SentenceTransformer(model, **kwargs)
|
| 440 |
+
self.model_name = model
|
| 441 |
+
self.a0_model_conf = model_config
|
| 442 |
+
|
| 443 |
+
def embed_documents(self, texts: List[str]) -> List[List[float]]:
|
| 444 |
+
# Apply rate limiting if configured
|
| 445 |
+
apply_rate_limiter_sync(self.a0_model_conf, " ".join(texts))
|
| 446 |
+
|
| 447 |
+
embeddings = self.model.encode(texts, convert_to_tensor=False) # type: ignore
|
| 448 |
+
return embeddings.tolist() if hasattr(embeddings, "tolist") else embeddings # type: ignore
|
| 449 |
+
|
| 450 |
+
def embed_query(self, text: str) -> List[float]:
|
| 451 |
+
# Apply rate limiting if configured
|
| 452 |
+
apply_rate_limiter_sync(self.a0_model_conf, text)
|
| 453 |
+
|
| 454 |
+
embedding = self.model.encode([text], convert_to_tensor=False) # type: ignore
|
| 455 |
+
result = (
|
| 456 |
+
embedding[0].tolist() if hasattr(embedding[0], "tolist") else embedding[0]
|
| 457 |
+
)
|
| 458 |
+
return result # type: ignore
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def _get_litellm_chat(
|
| 462 |
+
cls: type = LiteLLMChatWrapper,
|
| 463 |
+
model_name: str = "",
|
| 464 |
+
provider_name: str = "",
|
| 465 |
+
model_config: Optional[ModelConfig] = None,
|
| 466 |
+
**kwargs: Any,
|
| 467 |
+
):
|
| 468 |
+
# use api key from kwargs or env
|
| 469 |
+
api_key = kwargs.pop("api_key", None) or get_api_key(provider_name)
|
| 470 |
+
|
| 471 |
+
# Only pass API key if key is not a placeholder
|
| 472 |
+
if api_key and api_key not in ("None", "NA"):
|
| 473 |
+
kwargs["api_key"] = api_key
|
| 474 |
+
|
| 475 |
+
provider_name, model_name, kwargs = _adjust_call_args(
|
| 476 |
+
provider_name, model_name, kwargs
|
| 477 |
+
)
|
| 478 |
+
print(f"DEBUG: Creating {cls.__name__} with provider={provider_name}, model={model_name}, api_base={kwargs.get('api_base')}")
|
| 479 |
+
return cls(provider=provider_name, model=model_name, model_config=model_config, **kwargs)
|
| 480 |
+
|
| 481 |
+
|
| 482 |
+
def _get_litellm_embedding(model_name: str, provider_name: str, model_config: Optional[ModelConfig] = None, **kwargs: Any):
|
| 483 |
+
# Check if this is a local sentence-transformers model
|
| 484 |
+
if provider_name == "huggingface" and model_name.startswith(
|
| 485 |
+
"sentence-transformers/"
|
| 486 |
+
):
|
| 487 |
+
# Use local sentence-transformers instead of LiteLLM for local models
|
| 488 |
+
provider_name, model_name, kwargs = _adjust_call_args(
|
| 489 |
+
provider_name, model_name, kwargs
|
| 490 |
+
)
|
| 491 |
+
return LocalSentenceTransformerWrapper(
|
| 492 |
+
provider=provider_name, model=model_name, model_config=model_config, **kwargs
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
# use api key from kwargs or env
|
| 496 |
+
api_key = kwargs.pop("api_key", None) or get_api_key(provider_name)
|
| 497 |
+
|
| 498 |
+
# Only pass API key if key is not a placeholder
|
| 499 |
+
if api_key and api_key not in ("None", "NA"):
|
| 500 |
+
kwargs["api_key"] = api_key
|
| 501 |
+
|
| 502 |
+
provider_name, model_name, kwargs = _adjust_call_args(
|
| 503 |
+
provider_name, model_name, kwargs
|
| 504 |
+
)
|
| 505 |
+
return LiteLLMEmbeddingWrapper(model=model_name, provider=provider_name, model_config=model_config, **kwargs)
|
| 506 |
+
|
| 507 |
+
|
| 508 |
+
def _parse_chunk(chunk: Any) -> ChatChunk:
|
| 509 |
+
delta = chunk["choices"][0].get("delta", {})
|
| 510 |
+
message = chunk["choices"][0].get("message", {}) or chunk["choices"][0].get(
|
| 511 |
+
"model_extra", {}
|
| 512 |
+
).get("message", {})
|
| 513 |
+
response_delta = (
|
| 514 |
+
delta.get("content", "")
|
| 515 |
+
if isinstance(delta, dict)
|
| 516 |
+
else getattr(delta, "content", "")
|
| 517 |
+
) or (
|
| 518 |
+
message.get("content", "")
|
| 519 |
+
if isinstance(message, dict)
|
| 520 |
+
else getattr(message, "content", "")
|
| 521 |
+
)
|
| 522 |
+
reasoning_delta = (
|
| 523 |
+
delta.get("reasoning_content", "")
|
| 524 |
+
if isinstance(delta, dict)
|
| 525 |
+
else getattr(delta, "reasoning_content", "")
|
| 526 |
+
)
|
| 527 |
+
return ChatChunk(reasoning_delta=reasoning_delta, response_delta=response_delta)
|
| 528 |
+
|
| 529 |
+
|
| 530 |
+
def _adjust_call_args(provider_name: str, model_name: str, kwargs: dict):
|
| 531 |
+
# Robustly handle provider name if it's the label instead of ID
|
| 532 |
+
label_to_id = {
|
| 533 |
+
"other openai compatible": "other",
|
| 534 |
+
"openai": "openai",
|
| 535 |
+
"anthropic": "anthropic",
|
| 536 |
+
"google": "google",
|
| 537 |
+
"deepseek": "deepseek",
|
| 538 |
+
"groq": "groq",
|
| 539 |
+
"huggingface": "huggingface",
|
| 540 |
+
"lm studio": "lm_studio",
|
| 541 |
+
"mistral ai": "mistral",
|
| 542 |
+
"ollama": "ollama",
|
| 543 |
+
"openrouter": "openrouter",
|
| 544 |
+
"sambanova": "sambanova",
|
| 545 |
+
"venice": "venice"
|
| 546 |
+
}
|
| 547 |
+
|
| 548 |
+
provider_name_low = str(provider_name).lower()
|
| 549 |
+
if provider_name_low in label_to_id:
|
| 550 |
+
provider_name = label_to_id[provider_name_low]
|
| 551 |
+
|
| 552 |
+
# for openrouter add app reference
|
| 553 |
+
if provider_name == "openrouter":
|
| 554 |
+
kwargs["extra_headers"] = {
|
| 555 |
+
"HTTP-Referer": "https://agent-zero.ai",
|
| 556 |
+
"X-Title": "Agent Zero",
|
| 557 |
+
}
|
| 558 |
+
|
| 559 |
+
# remap other to openai for litellm
|
| 560 |
+
if provider_name == "other":
|
| 561 |
+
provider_name = "openai"
|
| 562 |
+
|
| 563 |
+
return provider_name, model_name, kwargs
|
| 564 |
+
|
| 565 |
+
|
| 566 |
+
def _merge_provider_defaults(
|
| 567 |
+
provider_type: str, original_provider: str, kwargs: dict
|
| 568 |
+
) -> tuple[str, dict]:
|
| 569 |
+
provider_name = original_provider # default: unchanged
|
| 570 |
+
|
| 571 |
+
# Robustly handle provider name if it's the label instead of ID
|
| 572 |
+
label_to_id = {
|
| 573 |
+
"other openai compatible": "other",
|
| 574 |
+
"openai": "openai",
|
| 575 |
+
"anthropic": "anthropic",
|
| 576 |
+
"google": "google",
|
| 577 |
+
"deepseek": "deepseek",
|
| 578 |
+
"groq": "groq",
|
| 579 |
+
"huggingface": "huggingface",
|
| 580 |
+
"lm studio": "lm_studio",
|
| 581 |
+
"mistral ai": "mistral",
|
| 582 |
+
"ollama": "ollama",
|
| 583 |
+
"openrouter": "openrouter",
|
| 584 |
+
"sambanova": "sambanova",
|
| 585 |
+
"venice": "venice"
|
| 586 |
+
}
|
| 587 |
+
orig_low = str(original_provider).lower()
|
| 588 |
+
if orig_low in label_to_id:
|
| 589 |
+
original_provider = label_to_id[orig_low]
|
| 590 |
+
provider_name = original_provider
|
| 591 |
+
|
| 592 |
+
cfg = get_provider_config(provider_type, original_provider)
|
| 593 |
+
if cfg:
|
| 594 |
+
provider_name = cfg.get("litellm_provider", original_provider).lower()
|
| 595 |
+
|
| 596 |
+
# Extra arguments nested under `kwargs` for readability
|
| 597 |
+
extra_kwargs = cfg.get("kwargs") if isinstance(cfg, dict) else None # type: ignore[arg-type]
|
| 598 |
+
if isinstance(extra_kwargs, dict):
|
| 599 |
+
for k, v in extra_kwargs.items():
|
| 600 |
+
kwargs.setdefault(k, v)
|
| 601 |
+
|
| 602 |
+
# Inject API key based on the *original* provider id if still missing
|
| 603 |
+
if "api_key" not in kwargs:
|
| 604 |
+
key = get_api_key(original_provider)
|
| 605 |
+
if key and key not in ("None", "NA"):
|
| 606 |
+
kwargs["api_key"] = key
|
| 607 |
+
|
| 608 |
+
return provider_name, kwargs
|
| 609 |
+
|
| 610 |
+
|
| 611 |
+
def get_chat_model(provider: str, name: str, model_config: Optional[ModelConfig] = None, **kwargs: Any) -> LiteLLMChatWrapper:
|
| 612 |
+
orig = str(provider).lower()
|
| 613 |
+
provider_name, kwargs = _merge_provider_defaults("chat", orig, kwargs)
|
| 614 |
+
return _get_litellm_chat(LiteLLMChatWrapper, name, provider_name, model_config, **kwargs)
|
| 615 |
+
|
| 616 |
+
|
| 617 |
+
def get_browser_model(
|
| 618 |
+
provider: str, name: str, model_config: Optional[ModelConfig] = None, **kwargs: Any
|
| 619 |
+
) -> BrowserCompatibleChatWrapper:
|
| 620 |
+
orig = str(provider).lower()
|
| 621 |
+
provider_name, kwargs = _merge_provider_defaults("chat", orig, kwargs)
|
| 622 |
+
return _get_litellm_chat(
|
| 623 |
+
BrowserCompatibleChatWrapper, name, provider_name, model_config, **kwargs
|
| 624 |
+
)
|
| 625 |
+
|
| 626 |
+
|
| 627 |
+
def get_embedding_model(
|
| 628 |
+
provider: str, name: str, model_config: Optional[ModelConfig] = None, **kwargs: Any
|
| 629 |
+
) -> LiteLLMEmbeddingWrapper | LocalSentenceTransformerWrapper:
|
| 630 |
+
orig = str(provider).lower()
|
| 631 |
+
provider_name, kwargs = _merge_provider_defaults("embedding", orig, kwargs)
|
| 632 |
+
return _get_litellm_embedding(name, provider_name, model_config, **kwargs)
|
preload.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from python.helpers import runtime, whisper, settings
|
| 3 |
+
from python.helpers.print_style import PrintStyle
|
| 4 |
+
from python.helpers import kokoro_tts
|
| 5 |
+
import models
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
async def preload():
|
| 9 |
+
PrintStyle().print("Preloading models...")
|
| 10 |
+
try:
|
| 11 |
+
set = settings.get_default_settings()
|
| 12 |
+
|
| 13 |
+
# preload whisper model
|
| 14 |
+
async def preload_whisper():
|
| 15 |
+
try:
|
| 16 |
+
return await whisper.preload(set["stt_model_size"])
|
| 17 |
+
except Exception as e:
|
| 18 |
+
PrintStyle().error(f"Error in preload_whisper: {e}")
|
| 19 |
+
|
| 20 |
+
# preload embedding model
|
| 21 |
+
async def preload_embedding():
|
| 22 |
+
if set["embed_model_provider"].lower() == "huggingface":
|
| 23 |
+
try:
|
| 24 |
+
# Use the new LiteLLM-based model system
|
| 25 |
+
emb_mod = models.get_embedding_model(
|
| 26 |
+
"huggingface", set["embed_model_name"]
|
| 27 |
+
)
|
| 28 |
+
emb_txt = await emb_mod.aembed_query("test")
|
| 29 |
+
return emb_txt
|
| 30 |
+
except Exception as e:
|
| 31 |
+
PrintStyle().error(f"Error in preload_embedding: {e}")
|
| 32 |
+
|
| 33 |
+
# preload kokoro tts model if enabled
|
| 34 |
+
async def preload_kokoro():
|
| 35 |
+
if set["tts_kokoro"]:
|
| 36 |
+
try:
|
| 37 |
+
return await kokoro_tts.preload()
|
| 38 |
+
except Exception as e:
|
| 39 |
+
PrintStyle().error(f"Error in preload_kokoro: {e}")
|
| 40 |
+
|
| 41 |
+
# async tasks to preload
|
| 42 |
+
tasks = [
|
| 43 |
+
preload_embedding(),
|
| 44 |
+
# preload_whisper(),
|
| 45 |
+
# preload_kokoro()
|
| 46 |
+
]
|
| 47 |
+
|
| 48 |
+
await asyncio.gather(*tasks, return_exceptions=True)
|
| 49 |
+
PrintStyle().print("Preload completed")
|
| 50 |
+
except Exception as e:
|
| 51 |
+
PrintStyle().error(f"Error in preload: {e}")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# preload transcription model
|
| 55 |
+
if __name__ == "__main__":
|
| 56 |
+
PrintStyle().print("Running preload...")
|
| 57 |
+
runtime.initialize()
|
| 58 |
+
asyncio.run(preload())
|
python/api/chat.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent import AgentContext, UserMessage
|
| 2 |
+
from python.helpers.api import ApiHandler, Request, Response
|
| 3 |
+
from python.helpers import files, dotenv
|
| 4 |
+
from initialize import initialize_agent
|
| 5 |
+
import os
|
| 6 |
+
import base64
|
| 7 |
+
|
| 8 |
+
class Chat(ApiHandler):
|
| 9 |
+
@classmethod
|
| 10 |
+
def requires_auth(cls) -> bool:
|
| 11 |
+
return False
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def requires_csrf(cls) -> bool:
|
| 15 |
+
return False
|
| 16 |
+
|
| 17 |
+
@classmethod
|
| 18 |
+
def requires_api_key(cls) -> bool:
|
| 19 |
+
return True
|
| 20 |
+
|
| 21 |
+
async def process(self, input: dict, request: Request) -> dict:
|
| 22 |
+
text = input.get("message") or input.get("text") or ""
|
| 23 |
+
ctxid = input.get("context")
|
| 24 |
+
subagent = input.get("subagent") or input.get("profile")
|
| 25 |
+
file_data = input.get("file")
|
| 26 |
+
file_name = input.get("file_name", "uploaded_file")
|
| 27 |
+
|
| 28 |
+
# Ensure latest environment variables are loaded
|
| 29 |
+
dotenv.load_dotenv()
|
| 30 |
+
|
| 31 |
+
# Automatically use BLABLADOR_API_KEY for 'other' provider if available
|
| 32 |
+
blablador_key = os.getenv("BLABLADOR_API_KEY")
|
| 33 |
+
if blablador_key:
|
| 34 |
+
os.environ.setdefault("OTHER_API_KEY", blablador_key)
|
| 35 |
+
os.environ.setdefault("API_KEY_OTHER", blablador_key)
|
| 36 |
+
|
| 37 |
+
context = self.get_context(ctxid)
|
| 38 |
+
|
| 39 |
+
# Dynamically use the latest settings for every request
|
| 40 |
+
config = initialize_agent()
|
| 41 |
+
|
| 42 |
+
# Robustly handle provider name if it's the label instead of ID
|
| 43 |
+
if config.chat_model.provider == "Other OpenAI compatible":
|
| 44 |
+
config.chat_model.provider = "other"
|
| 45 |
+
if config.utility_model.provider == "Other OpenAI compatible":
|
| 46 |
+
config.utility_model.provider = "other"
|
| 47 |
+
|
| 48 |
+
# Override profile if provided via subagent parameter
|
| 49 |
+
if subagent:
|
| 50 |
+
config.profile = subagent
|
| 51 |
+
if subagent not in config.knowledge_subdirs:
|
| 52 |
+
config.knowledge_subdirs.append(subagent)
|
| 53 |
+
|
| 54 |
+
context.config = config
|
| 55 |
+
|
| 56 |
+
# Apply config to all agents in the chain
|
| 57 |
+
curr_agent = context.agent0
|
| 58 |
+
while curr_agent:
|
| 59 |
+
curr_agent.config = config
|
| 60 |
+
curr_agent = curr_agent.data.get(curr_agent.DATA_NAME_SUBORDINATE)
|
| 61 |
+
|
| 62 |
+
attachment_paths = []
|
| 63 |
+
if file_data:
|
| 64 |
+
# Sanitize file name to prevent path traversal
|
| 65 |
+
file_name = os.path.basename(file_name)
|
| 66 |
+
# Integrate into knowledge of file system
|
| 67 |
+
knowledge_dir = files.get_abs_path("knowledge/custom")
|
| 68 |
+
os.makedirs(knowledge_dir, exist_ok=True)
|
| 69 |
+
save_path = os.path.join(knowledge_dir, file_name)
|
| 70 |
+
|
| 71 |
+
try:
|
| 72 |
+
if isinstance(file_data, str) and "," in file_data:
|
| 73 |
+
header, encoded = file_data.split(",", 1)
|
| 74 |
+
file_data = encoded
|
| 75 |
+
decoded_data = base64.b64decode(file_data)
|
| 76 |
+
with open(save_path, "wb") as f:
|
| 77 |
+
f.write(decoded_data)
|
| 78 |
+
except Exception:
|
| 79 |
+
with open(save_path, "w") as f:
|
| 80 |
+
f.write(str(file_data))
|
| 81 |
+
|
| 82 |
+
attachment_paths.append(save_path)
|
| 83 |
+
|
| 84 |
+
msg = UserMessage(text, attachment_paths)
|
| 85 |
+
task = context.communicate(msg)
|
| 86 |
+
result = await task.result()
|
| 87 |
+
|
| 88 |
+
return {"message": result, "context": context.id}
|
python/api/csrf_token.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from python.helpers.api import (
|
| 2 |
+
ApiHandler,
|
| 3 |
+
Input,
|
| 4 |
+
Output,
|
| 5 |
+
Request,
|
| 6 |
+
Response,
|
| 7 |
+
session,
|
| 8 |
+
)
|
| 9 |
+
from python.helpers import runtime, csrf
|
| 10 |
+
|
| 11 |
+
class GetCsrfToken(ApiHandler):
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def get_methods(cls) -> list[str]:
|
| 15 |
+
return ["GET"]
|
| 16 |
+
|
| 17 |
+
@classmethod
|
| 18 |
+
def requires_csrf(cls) -> bool:
|
| 19 |
+
return False
|
| 20 |
+
|
| 21 |
+
async def process(self, input: Input, request: Request) -> Output:
|
| 22 |
+
token = csrf.generate_csrf_token()
|
| 23 |
+
return {"token": token, "runtime_id": runtime.get_runtime_id()}
|
python/api/docs.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from python.helpers.api import ApiHandler, Request, Output
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
class Docs(ApiHandler):
|
| 5 |
+
@classmethod
|
| 6 |
+
def requires_auth(cls) -> bool:
|
| 7 |
+
return False
|
| 8 |
+
|
| 9 |
+
@classmethod
|
| 10 |
+
def requires_csrf(cls) -> bool:
|
| 11 |
+
return False
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def get_methods(cls) -> list[str]:
|
| 15 |
+
return ["GET"]
|
| 16 |
+
|
| 17 |
+
async def process(self, input: dict, request: Request) -> dict:
|
| 18 |
+
auth_token = os.getenv("AUTHENTICATION_TOKEN")
|
| 19 |
+
auth_type = "Bearer token required" if auth_token else "Basic Auth or API Key required"
|
| 20 |
+
|
| 21 |
+
return {
|
| 22 |
+
"title": "Agent-skillset API Documentation",
|
| 23 |
+
"authentication": auth_type,
|
| 24 |
+
"endpoints": [
|
| 25 |
+
{
|
| 26 |
+
"path": "/health",
|
| 27 |
+
"method": "GET",
|
| 28 |
+
"description": "Check if the service is running.",
|
| 29 |
+
"auth_required": False
|
| 30 |
+
},
|
| 31 |
+
{
|
| 32 |
+
"path": "/chat",
|
| 33 |
+
"method": "POST",
|
| 34 |
+
"description": "Send a message and receive the final response.",
|
| 35 |
+
"parameters": {
|
| 36 |
+
"message": "The text message to send (required).",
|
| 37 |
+
"subagent": "The name of the agent profile to use (optional).",
|
| 38 |
+
"file": "Base64 encoded file content for context (optional).",
|
| 39 |
+
"file_name": "The name of the file being uploaded (optional).",
|
| 40 |
+
"context": "The ID of an existing chat context (optional)."
|
| 41 |
+
},
|
| 42 |
+
"headers": {
|
| 43 |
+
"Authorization": "Bearer YOUR_AUTHENTICATION_TOKEN"
|
| 44 |
+
}
|
| 45 |
+
},
|
| 46 |
+
{
|
| 47 |
+
"path": "/stream",
|
| 48 |
+
"method": "POST",
|
| 49 |
+
"description": "Stream the agent's response using Server-Sent Events (SSE).",
|
| 50 |
+
"parameters": {
|
| 51 |
+
"message": "The text message to send (required).",
|
| 52 |
+
"subagent": "The name of the agent profile to use (optional).",
|
| 53 |
+
"file": "Base64 encoded file content for context (optional).",
|
| 54 |
+
"file_name": "The name of the file being uploaded (optional).",
|
| 55 |
+
"context": "The ID of an existing chat context (optional)."
|
| 56 |
+
},
|
| 57 |
+
"headers": {
|
| 58 |
+
"Authorization": "Bearer YOUR_AUTHENTICATION_TOKEN"
|
| 59 |
+
}
|
| 60 |
+
},
|
| 61 |
+
{
|
| 62 |
+
"path": "/set",
|
| 63 |
+
"method": "POST",
|
| 64 |
+
"description": "Dynamically update application settings and API keys.",
|
| 65 |
+
"parameters": {
|
| 66 |
+
"any_setting_key": "The value to set for that setting.",
|
| 67 |
+
"api_key_PROVIDER": "Set the API key for a specific provider (e.g., api_key_openai)."
|
| 68 |
+
},
|
| 69 |
+
"headers": {
|
| 70 |
+
"Authorization": "Bearer YOUR_AUTHENTICATION_TOKEN"
|
| 71 |
+
}
|
| 72 |
+
},
|
| 73 |
+
{
|
| 74 |
+
"path": "/get",
|
| 75 |
+
"method": "GET",
|
| 76 |
+
"description": "Retrieve the current application settings.",
|
| 77 |
+
"headers": {
|
| 78 |
+
"Authorization": "Bearer YOUR_AUTHENTICATION_TOKEN"
|
| 79 |
+
}
|
| 80 |
+
}
|
| 81 |
+
]
|
| 82 |
+
}
|
python/api/get.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from python.helpers.api import ApiHandler, Request, Response
|
| 2 |
+
from python.helpers import settings
|
| 3 |
+
|
| 4 |
+
class Get(ApiHandler):
|
| 5 |
+
@classmethod
|
| 6 |
+
def requires_auth(cls) -> bool:
|
| 7 |
+
return False
|
| 8 |
+
|
| 9 |
+
@classmethod
|
| 10 |
+
def requires_csrf(cls) -> bool:
|
| 11 |
+
return False
|
| 12 |
+
|
| 13 |
+
@classmethod
|
| 14 |
+
def requires_api_key(cls) -> bool:
|
| 15 |
+
return True
|
| 16 |
+
|
| 17 |
+
@classmethod
|
| 18 |
+
def get_methods(cls) -> list[str]:
|
| 19 |
+
return ["GET"]
|
| 20 |
+
|
| 21 |
+
async def process(self, input: dict, request: Request) -> dict:
|
| 22 |
+
current_settings = settings.get_settings()
|
| 23 |
+
return {"settings": current_settings}
|
python/api/health.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from python.helpers.api import ApiHandler, Request, Response
|
| 2 |
+
|
| 3 |
+
class Health(ApiHandler):
|
| 4 |
+
@classmethod
|
| 5 |
+
def requires_auth(cls) -> bool:
|
| 6 |
+
return False
|
| 7 |
+
|
| 8 |
+
@classmethod
|
| 9 |
+
def requires_csrf(cls) -> bool:
|
| 10 |
+
return False
|
| 11 |
+
|
| 12 |
+
@classmethod
|
| 13 |
+
def get_methods(cls) -> list[str]:
|
| 14 |
+
return ["GET"]
|
| 15 |
+
|
| 16 |
+
async def process(self, input: dict, request: Request) -> dict:
|
| 17 |
+
return {"status": "ok", "message": "Service is running"}
|
python/api/set.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from python.helpers.api import ApiHandler, Request, Response
|
| 2 |
+
from python.helpers import settings, dotenv
|
| 3 |
+
import os
|
| 4 |
+
|
| 5 |
+
class Set(ApiHandler):
|
| 6 |
+
@classmethod
|
| 7 |
+
def requires_auth(cls) -> bool:
|
| 8 |
+
return True
|
| 9 |
+
|
| 10 |
+
@classmethod
|
| 11 |
+
def requires_csrf(cls) -> bool:
|
| 12 |
+
return False
|
| 13 |
+
|
| 14 |
+
@classmethod
|
| 15 |
+
def requires_api_key(cls) -> bool:
|
| 16 |
+
return True
|
| 17 |
+
|
| 18 |
+
async def process(self, input: dict, request: Request) -> dict:
|
| 19 |
+
# Separate API keys from other settings
|
| 20 |
+
api_keys = {}
|
| 21 |
+
other_settings = {}
|
| 22 |
+
|
| 23 |
+
for k, v in input.items():
|
| 24 |
+
if k.startswith("api_key_"):
|
| 25 |
+
provider = k.replace("api_key_", "").upper()
|
| 26 |
+
api_keys[provider] = v
|
| 27 |
+
elif k.endswith("_API_KEY") or k.endswith("_API_TOKEN"):
|
| 28 |
+
api_keys[k.upper()] = v
|
| 29 |
+
else:
|
| 30 |
+
other_settings[k] = v
|
| 31 |
+
|
| 32 |
+
if other_settings:
|
| 33 |
+
settings.set_settings_delta(other_settings)
|
| 34 |
+
|
| 35 |
+
if api_keys:
|
| 36 |
+
current_settings = settings.get_settings()
|
| 37 |
+
for key, val in api_keys.items():
|
| 38 |
+
# Save to settings
|
| 39 |
+
current_settings["api_keys"][key.lower()] = val
|
| 40 |
+
|
| 41 |
+
# Save to dotenv and os.environ
|
| 42 |
+
dotenv.save_dotenv_value(f"API_KEY_{key.upper()}", val)
|
| 43 |
+
dotenv.save_dotenv_value(f"{key.upper()}_API_KEY", val)
|
| 44 |
+
|
| 45 |
+
# Special case for 'other' provider remapping to 'openai'
|
| 46 |
+
if key.upper() == "OTHER":
|
| 47 |
+
dotenv.save_dotenv_value("OPENAI_API_KEY", val)
|
| 48 |
+
dotenv.save_dotenv_value("API_KEY_OPENAI", val)
|
| 49 |
+
|
| 50 |
+
settings.set_settings(current_settings)
|
| 51 |
+
|
| 52 |
+
return {"status": "success", "message": "Settings updated"}
|
python/api/stream.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent import AgentContext, UserMessage
|
| 2 |
+
from python.helpers.api import ApiHandler, Request, Response
|
| 3 |
+
from python.helpers import files, dotenv
|
| 4 |
+
from initialize import initialize_agent
|
| 5 |
+
import os
|
| 6 |
+
import json
|
| 7 |
+
import base64
|
| 8 |
+
import queue
|
| 9 |
+
import traceback
|
| 10 |
+
|
| 11 |
+
class Stream(ApiHandler):
|
| 12 |
+
@classmethod
|
| 13 |
+
def requires_auth(cls) -> bool:
|
| 14 |
+
return False
|
| 15 |
+
|
| 16 |
+
@classmethod
|
| 17 |
+
def requires_csrf(cls) -> bool:
|
| 18 |
+
return False
|
| 19 |
+
|
| 20 |
+
@classmethod
|
| 21 |
+
def requires_api_key(cls) -> bool:
|
| 22 |
+
return True
|
| 23 |
+
|
| 24 |
+
async def process(self, input: dict, request: Request) -> Response:
|
| 25 |
+
try:
|
| 26 |
+
text = input.get("message") or input.get("text") or ""
|
| 27 |
+
ctxid = input.get("context")
|
| 28 |
+
subagent = input.get("subagent") or input.get("profile")
|
| 29 |
+
file_data = input.get("file")
|
| 30 |
+
file_name = input.get("file_name", "uploaded_file")
|
| 31 |
+
|
| 32 |
+
dotenv.load_dotenv()
|
| 33 |
+
|
| 34 |
+
# Automatically use BLABLADOR_API_KEY for 'other' provider if available
|
| 35 |
+
blablador_key = os.getenv("BLABLADOR_API_KEY")
|
| 36 |
+
if blablador_key:
|
| 37 |
+
os.environ.setdefault("OTHER_API_KEY", blablador_key)
|
| 38 |
+
os.environ.setdefault("API_KEY_OTHER", blablador_key)
|
| 39 |
+
|
| 40 |
+
context = self.get_context(ctxid)
|
| 41 |
+
config = initialize_agent()
|
| 42 |
+
|
| 43 |
+
if config.chat_model.provider == "Other OpenAI compatible":
|
| 44 |
+
config.chat_model.provider = "other"
|
| 45 |
+
if config.utility_model.provider == "Other OpenAI compatible":
|
| 46 |
+
config.utility_model.provider = "other"
|
| 47 |
+
|
| 48 |
+
if subagent:
|
| 49 |
+
config.profile = subagent
|
| 50 |
+
if subagent not in config.knowledge_subdirs:
|
| 51 |
+
config.knowledge_subdirs.append(subagent)
|
| 52 |
+
|
| 53 |
+
context.config = config
|
| 54 |
+
curr_agent = context.agent0
|
| 55 |
+
while curr_agent:
|
| 56 |
+
curr_agent.config = config
|
| 57 |
+
curr_agent = curr_agent.data.get(curr_agent.DATA_NAME_SUBORDINATE)
|
| 58 |
+
|
| 59 |
+
attachment_paths = []
|
| 60 |
+
if file_data:
|
| 61 |
+
# Sanitize file name to prevent path traversal
|
| 62 |
+
file_name = os.path.basename(file_name)
|
| 63 |
+
knowledge_dir = files.get_abs_path("knowledge/custom")
|
| 64 |
+
os.makedirs(knowledge_dir, exist_ok=True)
|
| 65 |
+
save_path = os.path.join(knowledge_dir, file_name)
|
| 66 |
+
try:
|
| 67 |
+
if isinstance(file_data, str) and "," in file_data:
|
| 68 |
+
header, encoded = file_data.split(",", 1)
|
| 69 |
+
file_data = encoded
|
| 70 |
+
decoded_data = base64.b64decode(file_data)
|
| 71 |
+
with open(save_path, "wb") as f:
|
| 72 |
+
f.write(decoded_data)
|
| 73 |
+
except Exception:
|
| 74 |
+
with open(save_path, "w") as f:
|
| 75 |
+
f.write(str(file_data))
|
| 76 |
+
attachment_paths.append(save_path)
|
| 77 |
+
|
| 78 |
+
sync_queue = queue.Queue()
|
| 79 |
+
context.stream_queue = sync_queue
|
| 80 |
+
|
| 81 |
+
msg = UserMessage(text, attachment_paths)
|
| 82 |
+
task = context.communicate(msg)
|
| 83 |
+
|
| 84 |
+
def generate():
|
| 85 |
+
try:
|
| 86 |
+
while task.is_alive() or not sync_queue.empty():
|
| 87 |
+
try:
|
| 88 |
+
chunk = sync_queue.get(timeout=0.1)
|
| 89 |
+
yield f"data: {json.dumps(chunk)}\n\n"
|
| 90 |
+
except queue.Empty:
|
| 91 |
+
if not task.is_alive():
|
| 92 |
+
break
|
| 93 |
+
continue
|
| 94 |
+
except Exception as e:
|
| 95 |
+
yield f"data: {json.dumps({'type': 'error', 'text': str(e)})}\n\n"
|
| 96 |
+
break
|
| 97 |
+
|
| 98 |
+
try:
|
| 99 |
+
result = task.result_sync(timeout=300)
|
| 100 |
+
yield f"data: {json.dumps({'type': 'final', 'text': result})}\n\n"
|
| 101 |
+
except Exception as e:
|
| 102 |
+
yield f"data: {json.dumps({'type': 'error', 'text': f'Result error: {str(e)}'})}\n\n"
|
| 103 |
+
except Exception as e:
|
| 104 |
+
yield f"data: {json.dumps({'type': 'error', 'text': f'Generator error: {str(e)}'})}\n\n"
|
| 105 |
+
finally:
|
| 106 |
+
if hasattr(context, 'stream_queue'):
|
| 107 |
+
delattr(context, 'stream_queue')
|
| 108 |
+
|
| 109 |
+
return Response(generate(), mimetype='text/event-stream')
|
| 110 |
+
except Exception as e:
|
| 111 |
+
return Response(f"Error: {str(e)}\n{traceback.format_exc()}", status=500)
|
python/extensions/reasoning_stream/_30_api_stream.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
async def reasoning_stream(agent, text, **kwargs):
|
| 2 |
+
if hasattr(agent.context, 'stream_queue'):
|
| 3 |
+
agent.context.stream_queue.put({'type': 'reasoning', 'text': text})
|
python/extensions/response_stream/_30_api_stream.py
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
async def response_stream(agent, text, parsed, **kwargs):
|
| 2 |
+
if hasattr(agent.context, 'stream_queue'):
|
| 3 |
+
agent.context.stream_queue.put({'type': 'response', 'text': text, 'parsed': parsed})
|
python/extensions/system_prompt/_10_system_prompt.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any
|
| 2 |
+
from python.helpers.extension import Extension
|
| 3 |
+
from python.helpers.mcp_handler import MCPConfig
|
| 4 |
+
from agent import Agent, LoopData
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
class SystemPrompt(Extension):
|
| 8 |
+
|
| 9 |
+
async def execute(self, system_prompt: list[str] = [], loop_data: LoopData = LoopData(), **kwargs: Any):
|
| 10 |
+
# append main system prompt and tools
|
| 11 |
+
main = get_main_prompt(self.agent)
|
| 12 |
+
tools = get_tools_prompt(self.agent)
|
| 13 |
+
mcp_tools = get_mcp_tools_prompt(self.agent)
|
| 14 |
+
final_instruction = "When you are done with the task, use the 'response' tool to return the final answer."
|
| 15 |
+
|
| 16 |
+
system_prompt.append(main)
|
| 17 |
+
system_prompt.append(tools)
|
| 18 |
+
if mcp_tools:
|
| 19 |
+
system_prompt.append(mcp_tools)
|
| 20 |
+
system_prompt.append(final_instruction)
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def get_main_prompt(agent: Agent):
|
| 24 |
+
return agent.read_prompt("agent.system.main.md")
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_tools_prompt(agent: Agent):
|
| 28 |
+
prompt = agent.read_prompt("agent.system.tools.md")
|
| 29 |
+
if agent.config.chat_model.vision:
|
| 30 |
+
prompt += '\n\n' + agent.read_prompt("agent.system.tools_vision.md")
|
| 31 |
+
return prompt
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def get_mcp_tools_prompt(agent: Agent):
|
| 35 |
+
mcp_config = MCPConfig.get_instance()
|
| 36 |
+
if mcp_config.servers:
|
| 37 |
+
pre_progress = agent.context.log.progress
|
| 38 |
+
agent.context.log.set_progress("Collecting MCP tools") # MCP might be initializing, better inform via progress bar
|
| 39 |
+
tools = MCPConfig.get_instance().get_tools_prompt()
|
| 40 |
+
agent.context.log.set_progress(pre_progress) # return original progress
|
| 41 |
+
return tools
|
| 42 |
+
return ""
|
| 43 |
+
|
python/helpers/csrf.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import secrets
|
| 2 |
+
import hmac
|
| 3 |
+
import hashlib
|
| 4 |
+
import time
|
| 5 |
+
|
| 6 |
+
CSRF_SECRET = secrets.token_bytes(32)
|
| 7 |
+
TOKEN_TTL = 3600 # 1 hour validity
|
| 8 |
+
|
| 9 |
+
def generate_csrf_token():
|
| 10 |
+
nonce = secrets.token_hex(16) # 128-bit random
|
| 11 |
+
timestamp = str(int(time.time()))
|
| 12 |
+
data = f"{nonce}:{timestamp}"
|
| 13 |
+
sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 14 |
+
return f"{data}.{sig}"
|
| 15 |
+
|
| 16 |
+
def verify_csrf_token(token):
|
| 17 |
+
try:
|
| 18 |
+
data, sig = token.rsplit(".", 1)
|
| 19 |
+
expected_sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 20 |
+
if not hmac.compare_digest(sig, expected_sig):
|
| 21 |
+
return False
|
| 22 |
+
# check TTL
|
| 23 |
+
nonce, timestamp = data.split(":")
|
| 24 |
+
if time.time() - int(timestamp) > TOKEN_TTL:
|
| 25 |
+
return False
|
| 26 |
+
return True
|
| 27 |
+
except Exception:
|
| 28 |
+
return False
|
python/helpers/mcp_server.py
ADDED
|
@@ -0,0 +1,433 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import Annotated, Literal, Union
|
| 3 |
+
from urllib.parse import urlparse
|
| 4 |
+
from openai import BaseModel
|
| 5 |
+
from pydantic import Field
|
| 6 |
+
from fastmcp import FastMCP
|
| 7 |
+
|
| 8 |
+
from agent import AgentContext, AgentContextType, UserMessage
|
| 9 |
+
from python.helpers.persist_chat import remove_chat
|
| 10 |
+
from initialize import initialize_agent
|
| 11 |
+
from python.helpers.print_style import PrintStyle
|
| 12 |
+
from python.helpers import settings
|
| 13 |
+
from starlette.middleware import Middleware
|
| 14 |
+
from starlette.middleware.base import BaseHTTPMiddleware
|
| 15 |
+
from starlette.exceptions import HTTPException as StarletteHTTPException
|
| 16 |
+
from starlette.types import ASGIApp, Receive, Scope, Send
|
| 17 |
+
from fastmcp.server.http import create_sse_app
|
| 18 |
+
from starlette.requests import Request
|
| 19 |
+
import threading
|
| 20 |
+
|
| 21 |
+
_PRINTER = PrintStyle(italic=True, font_color="green", padding=False)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
mcp_server: FastMCP = FastMCP(
|
| 25 |
+
name="Agent Zero integrated MCP Server",
|
| 26 |
+
instructions="""
|
| 27 |
+
Connect to remote Agent Zero instance.
|
| 28 |
+
Agent Zero is a general AI assistant controlling it's linux environment.
|
| 29 |
+
Agent Zero can install software, manage files, execute commands, code, use internet, etc.
|
| 30 |
+
Agent Zero's environment is isolated unless configured otherwise.
|
| 31 |
+
""",
|
| 32 |
+
)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class ToolResponse(BaseModel):
|
| 36 |
+
status: Literal["success"] = Field(
|
| 37 |
+
description="The status of the response", default="success"
|
| 38 |
+
)
|
| 39 |
+
response: str = Field(
|
| 40 |
+
description="The response from the remote Agent Zero Instance"
|
| 41 |
+
)
|
| 42 |
+
chat_id: str = Field(description="The id of the chat this message belongs to.")
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
class ToolError(BaseModel):
|
| 46 |
+
status: Literal["error"] = Field(
|
| 47 |
+
description="The status of the response", default="error"
|
| 48 |
+
)
|
| 49 |
+
error: str = Field(
|
| 50 |
+
description="The error message from the remote Agent Zero Instance"
|
| 51 |
+
)
|
| 52 |
+
chat_id: str = Field(description="The id of the chat this message belongs to.")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
SEND_MESSAGE_DESCRIPTION = """
|
| 56 |
+
Send a message to the remote Agent Zero Instance.
|
| 57 |
+
This tool is used to send a message to the remote Agent Zero Instance connected remotely via MCP.
|
| 58 |
+
"""
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
@mcp_server.tool(
|
| 62 |
+
name="send_message",
|
| 63 |
+
description=SEND_MESSAGE_DESCRIPTION,
|
| 64 |
+
tags={
|
| 65 |
+
"agent_zero",
|
| 66 |
+
"chat",
|
| 67 |
+
"remote",
|
| 68 |
+
"communication",
|
| 69 |
+
"dialogue",
|
| 70 |
+
"sse",
|
| 71 |
+
"send",
|
| 72 |
+
"message",
|
| 73 |
+
"start",
|
| 74 |
+
"new",
|
| 75 |
+
"continue",
|
| 76 |
+
},
|
| 77 |
+
annotations={
|
| 78 |
+
"remote": True,
|
| 79 |
+
"readOnlyHint": False,
|
| 80 |
+
"destructiveHint": False,
|
| 81 |
+
"idempotentHint": False,
|
| 82 |
+
"openWorldHint": False,
|
| 83 |
+
"title": SEND_MESSAGE_DESCRIPTION,
|
| 84 |
+
},
|
| 85 |
+
)
|
| 86 |
+
async def send_message(
|
| 87 |
+
message: Annotated[
|
| 88 |
+
str,
|
| 89 |
+
Field(
|
| 90 |
+
description="The message to send to the remote Agent Zero Instance",
|
| 91 |
+
title="message",
|
| 92 |
+
),
|
| 93 |
+
],
|
| 94 |
+
attachments: (
|
| 95 |
+
Annotated[
|
| 96 |
+
list[str],
|
| 97 |
+
Field(
|
| 98 |
+
description="Optional: A list of attachments (file paths or web urls) to send to the remote Agent Zero Instance with the message. Default: Empty list",
|
| 99 |
+
title="attachments",
|
| 100 |
+
),
|
| 101 |
+
]
|
| 102 |
+
| None
|
| 103 |
+
) = None,
|
| 104 |
+
chat_id: (
|
| 105 |
+
Annotated[
|
| 106 |
+
str,
|
| 107 |
+
Field(
|
| 108 |
+
description="Optional: ID of the chat. Used to continue a chat. This value is returned in response to sending previous message. Default: Empty string",
|
| 109 |
+
title="chat_id",
|
| 110 |
+
),
|
| 111 |
+
]
|
| 112 |
+
| None
|
| 113 |
+
) = None,
|
| 114 |
+
persistent_chat: (
|
| 115 |
+
Annotated[
|
| 116 |
+
bool,
|
| 117 |
+
Field(
|
| 118 |
+
description="Optional: Whether to use a persistent chat. If true, the chat will be saved and can be continued later. Default: False.",
|
| 119 |
+
title="persistent_chat",
|
| 120 |
+
),
|
| 121 |
+
]
|
| 122 |
+
| None
|
| 123 |
+
) = None,
|
| 124 |
+
) -> Annotated[
|
| 125 |
+
Union[ToolResponse, ToolError],
|
| 126 |
+
Field(
|
| 127 |
+
description="The response from the remote Agent Zero Instance", title="response"
|
| 128 |
+
),
|
| 129 |
+
]:
|
| 130 |
+
context: AgentContext | None = None
|
| 131 |
+
if chat_id:
|
| 132 |
+
context = AgentContext.get(chat_id)
|
| 133 |
+
if not context:
|
| 134 |
+
return ToolError(error="Chat not found", chat_id=chat_id)
|
| 135 |
+
else:
|
| 136 |
+
# If the chat is found, we use the persistent chat flag to determine
|
| 137 |
+
# whether we should save the chat or delete it afterwards
|
| 138 |
+
# If we continue a conversation, it must be persistent
|
| 139 |
+
persistent_chat = True
|
| 140 |
+
else:
|
| 141 |
+
config = initialize_agent()
|
| 142 |
+
context = AgentContext(config=config, type=AgentContextType.BACKGROUND)
|
| 143 |
+
|
| 144 |
+
if not message:
|
| 145 |
+
return ToolError(
|
| 146 |
+
error="Message is required", chat_id=context.id if persistent_chat else ""
|
| 147 |
+
)
|
| 148 |
+
|
| 149 |
+
try:
|
| 150 |
+
response = await _run_chat(context, message, attachments)
|
| 151 |
+
if not persistent_chat:
|
| 152 |
+
context.reset()
|
| 153 |
+
AgentContext.remove(context.id)
|
| 154 |
+
remove_chat(context.id)
|
| 155 |
+
return ToolResponse(
|
| 156 |
+
response=response, chat_id=context.id if persistent_chat else ""
|
| 157 |
+
)
|
| 158 |
+
except Exception as e:
|
| 159 |
+
return ToolError(error=str(e), chat_id=context.id if persistent_chat else "")
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
FINISH_CHAT_DESCRIPTION = """
|
| 163 |
+
Finish a chat with the remote Agent Zero Instance.
|
| 164 |
+
This tool is used to finish a persistent chat (send_message with persistent_chat=True) with the remote Agent Zero Instance connected remotely via MCP.
|
| 165 |
+
If you want to continue the chat, use the send_message tool instead.
|
| 166 |
+
Always use this tool to finish persistent chat conversations with remote Agent Zero.
|
| 167 |
+
"""
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
@mcp_server.tool(
|
| 171 |
+
name="finish_chat",
|
| 172 |
+
description=FINISH_CHAT_DESCRIPTION,
|
| 173 |
+
tags={
|
| 174 |
+
"agent_zero",
|
| 175 |
+
"chat",
|
| 176 |
+
"remote",
|
| 177 |
+
"communication",
|
| 178 |
+
"dialogue",
|
| 179 |
+
"sse",
|
| 180 |
+
"finish",
|
| 181 |
+
"close",
|
| 182 |
+
"end",
|
| 183 |
+
"stop",
|
| 184 |
+
},
|
| 185 |
+
annotations={
|
| 186 |
+
"remote": True,
|
| 187 |
+
"readOnlyHint": False,
|
| 188 |
+
"destructiveHint": True,
|
| 189 |
+
"idempotentHint": False,
|
| 190 |
+
"openWorldHint": False,
|
| 191 |
+
"title": FINISH_CHAT_DESCRIPTION,
|
| 192 |
+
},
|
| 193 |
+
)
|
| 194 |
+
async def finish_chat(
|
| 195 |
+
chat_id: Annotated[
|
| 196 |
+
str,
|
| 197 |
+
Field(
|
| 198 |
+
description="ID of the chat to be finished. This value is returned in response to sending previous message.",
|
| 199 |
+
title="chat_id",
|
| 200 |
+
),
|
| 201 |
+
]
|
| 202 |
+
) -> Annotated[
|
| 203 |
+
Union[ToolResponse, ToolError],
|
| 204 |
+
Field(
|
| 205 |
+
description="The response from the remote Agent Zero Instance", title="response"
|
| 206 |
+
),
|
| 207 |
+
]:
|
| 208 |
+
if not chat_id:
|
| 209 |
+
return ToolError(error="Chat ID is required", chat_id="")
|
| 210 |
+
|
| 211 |
+
context = AgentContext.get(chat_id)
|
| 212 |
+
if not context:
|
| 213 |
+
return ToolError(error="Chat not found", chat_id=chat_id)
|
| 214 |
+
else:
|
| 215 |
+
context.reset()
|
| 216 |
+
AgentContext.remove(context.id)
|
| 217 |
+
remove_chat(context.id)
|
| 218 |
+
return ToolResponse(response="Chat finished", chat_id=chat_id)
|
| 219 |
+
|
| 220 |
+
|
| 221 |
+
async def _run_chat(
|
| 222 |
+
context: AgentContext, message: str, attachments: list[str] | None = None
|
| 223 |
+
):
|
| 224 |
+
try:
|
| 225 |
+
_PRINTER.print("MCP Chat message received")
|
| 226 |
+
|
| 227 |
+
# Attachment filenames for logging
|
| 228 |
+
attachment_filenames = []
|
| 229 |
+
if attachments:
|
| 230 |
+
for attachment in attachments:
|
| 231 |
+
if os.path.exists(attachment):
|
| 232 |
+
attachment_filenames.append(attachment)
|
| 233 |
+
else:
|
| 234 |
+
try:
|
| 235 |
+
url = urlparse(attachment)
|
| 236 |
+
if url.scheme in ["http", "https", "ftp", "ftps", "sftp"]:
|
| 237 |
+
attachment_filenames.append(attachment)
|
| 238 |
+
else:
|
| 239 |
+
_PRINTER.print(f"Skipping attachment: [{attachment}]")
|
| 240 |
+
except Exception:
|
| 241 |
+
_PRINTER.print(f"Skipping attachment: [{attachment}]")
|
| 242 |
+
|
| 243 |
+
_PRINTER.print("User message:")
|
| 244 |
+
_PRINTER.print(f"> {message}")
|
| 245 |
+
if attachment_filenames:
|
| 246 |
+
_PRINTER.print("Attachments:")
|
| 247 |
+
for filename in attachment_filenames:
|
| 248 |
+
_PRINTER.print(f"- {filename}")
|
| 249 |
+
|
| 250 |
+
task = context.communicate(
|
| 251 |
+
UserMessage(
|
| 252 |
+
message=message, system_message=[], attachments=attachment_filenames
|
| 253 |
+
)
|
| 254 |
+
)
|
| 255 |
+
result = await task.result()
|
| 256 |
+
|
| 257 |
+
# Success
|
| 258 |
+
_PRINTER.print(f"MCP Chat message completed: {result}")
|
| 259 |
+
|
| 260 |
+
return result
|
| 261 |
+
|
| 262 |
+
except Exception as e:
|
| 263 |
+
# Error
|
| 264 |
+
_PRINTER.print(f"MCP Chat message failed: {e}")
|
| 265 |
+
|
| 266 |
+
raise RuntimeError(f"MCP Chat message failed: {e}") from e
|
| 267 |
+
|
| 268 |
+
|
| 269 |
+
class DynamicMcpProxy:
|
| 270 |
+
_instance: "DynamicMcpProxy | None" = None
|
| 271 |
+
|
| 272 |
+
"""A dynamic proxy that allows swapping the underlying MCP applications on the fly."""
|
| 273 |
+
|
| 274 |
+
def __init__(self):
|
| 275 |
+
cfg = settings.get_settings()
|
| 276 |
+
self.token = ""
|
| 277 |
+
self.sse_app: ASGIApp | None = None
|
| 278 |
+
self.http_app: ASGIApp | None = None
|
| 279 |
+
self.http_session_manager = None
|
| 280 |
+
self.http_session_task_group = None
|
| 281 |
+
self._lock = threading.RLock() # Use RLock to avoid deadlocks
|
| 282 |
+
self.reconfigure(cfg["mcp_server_token"])
|
| 283 |
+
|
| 284 |
+
@staticmethod
|
| 285 |
+
def get_instance():
|
| 286 |
+
if DynamicMcpProxy._instance is None:
|
| 287 |
+
DynamicMcpProxy._instance = DynamicMcpProxy()
|
| 288 |
+
return DynamicMcpProxy._instance
|
| 289 |
+
|
| 290 |
+
def reconfigure(self, token: str):
|
| 291 |
+
if self.token == token:
|
| 292 |
+
return
|
| 293 |
+
|
| 294 |
+
self.token = token
|
| 295 |
+
sse_path = f"/t-{self.token}/sse"
|
| 296 |
+
http_path = f"/t-{self.token}/http"
|
| 297 |
+
message_path = f"/t-{self.token}/messages/"
|
| 298 |
+
|
| 299 |
+
# Update settings in the MCP server instance if provided
|
| 300 |
+
mcp_server.settings.message_path = message_path
|
| 301 |
+
mcp_server.settings.sse_path = sse_path
|
| 302 |
+
|
| 303 |
+
# Create new MCP apps with updated settings
|
| 304 |
+
with self._lock:
|
| 305 |
+
self.sse_app = create_sse_app(
|
| 306 |
+
server=mcp_server,
|
| 307 |
+
message_path=mcp_server.settings.message_path,
|
| 308 |
+
sse_path=mcp_server.settings.sse_path,
|
| 309 |
+
auth_server_provider=mcp_server._auth_server_provider,
|
| 310 |
+
auth_settings=mcp_server.settings.auth,
|
| 311 |
+
debug=mcp_server.settings.debug,
|
| 312 |
+
routes=mcp_server._additional_http_routes,
|
| 313 |
+
middleware=[Middleware(BaseHTTPMiddleware, dispatch=mcp_middleware)],
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
# For HTTP, we need to create a custom app since the lifespan manager
|
| 317 |
+
# doesn't work properly in our Flask/Werkzeug environment
|
| 318 |
+
self.http_app = self._create_custom_http_app(
|
| 319 |
+
http_path,
|
| 320 |
+
mcp_server._auth_server_provider,
|
| 321 |
+
mcp_server.settings.auth,
|
| 322 |
+
mcp_server.settings.debug,
|
| 323 |
+
mcp_server._additional_http_routes,
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
def _create_custom_http_app(self, streamable_http_path, auth_server_provider, auth_settings, debug, routes):
|
| 327 |
+
"""Create a custom HTTP app that manages the session manager manually."""
|
| 328 |
+
from fastmcp.server.http import setup_auth_middleware_and_routes, create_base_app
|
| 329 |
+
from mcp.server.streamable_http_manager import StreamableHTTPSessionManager
|
| 330 |
+
from starlette.routing import Mount
|
| 331 |
+
from mcp.server.auth.middleware.bearer_auth import RequireAuthMiddleware
|
| 332 |
+
import anyio
|
| 333 |
+
|
| 334 |
+
server_routes = []
|
| 335 |
+
server_middleware = []
|
| 336 |
+
|
| 337 |
+
self.http_session_task_group = None
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
# Create session manager
|
| 341 |
+
self.http_session_manager = StreamableHTTPSessionManager(
|
| 342 |
+
app=mcp_server._mcp_server,
|
| 343 |
+
event_store=None,
|
| 344 |
+
json_response=True,
|
| 345 |
+
stateless=False,
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
# Custom ASGI handler that ensures task group is initialized
|
| 350 |
+
async def handle_streamable_http(scope, receive, send):
|
| 351 |
+
# Lazy initialization of task group
|
| 352 |
+
if self.http_session_task_group is None:
|
| 353 |
+
self.http_session_task_group = anyio.create_task_group()
|
| 354 |
+
await self.http_session_task_group.__aenter__()
|
| 355 |
+
if self.http_session_manager:
|
| 356 |
+
self.http_session_manager._task_group = self.http_session_task_group
|
| 357 |
+
|
| 358 |
+
if self.http_session_manager:
|
| 359 |
+
await self.http_session_manager.handle_request(scope, receive, send)
|
| 360 |
+
|
| 361 |
+
# Get auth middleware and routes
|
| 362 |
+
auth_middleware, auth_routes, required_scopes = setup_auth_middleware_and_routes(
|
| 363 |
+
auth_server_provider, auth_settings
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
server_routes.extend(auth_routes)
|
| 367 |
+
server_middleware.extend(auth_middleware)
|
| 368 |
+
|
| 369 |
+
# Add StreamableHTTP routes with or without auth
|
| 370 |
+
if auth_server_provider:
|
| 371 |
+
server_routes.append(
|
| 372 |
+
Mount(
|
| 373 |
+
streamable_http_path,
|
| 374 |
+
app=RequireAuthMiddleware(handle_streamable_http, required_scopes),
|
| 375 |
+
)
|
| 376 |
+
)
|
| 377 |
+
else:
|
| 378 |
+
server_routes.append(
|
| 379 |
+
Mount(
|
| 380 |
+
streamable_http_path,
|
| 381 |
+
app=handle_streamable_http,
|
| 382 |
+
)
|
| 383 |
+
)
|
| 384 |
+
|
| 385 |
+
# Add custom routes with lowest precedence
|
| 386 |
+
if routes:
|
| 387 |
+
server_routes.extend(routes)
|
| 388 |
+
|
| 389 |
+
# Add middleware
|
| 390 |
+
server_middleware.append(Middleware(BaseHTTPMiddleware, dispatch=mcp_middleware))
|
| 391 |
+
|
| 392 |
+
# Create and return the app
|
| 393 |
+
return create_base_app(
|
| 394 |
+
routes=server_routes,
|
| 395 |
+
middleware=server_middleware,
|
| 396 |
+
debug=debug,
|
| 397 |
+
)
|
| 398 |
+
|
| 399 |
+
async def __call__(self, scope: Scope, receive: Receive, send: Send) -> None:
|
| 400 |
+
"""Forward the ASGI calls to the appropriate app based on the URL path"""
|
| 401 |
+
with self._lock:
|
| 402 |
+
sse_app = self.sse_app
|
| 403 |
+
http_app = self.http_app
|
| 404 |
+
|
| 405 |
+
if not sse_app or not http_app:
|
| 406 |
+
raise RuntimeError("MCP apps not initialized")
|
| 407 |
+
|
| 408 |
+
# Route based on path
|
| 409 |
+
path = scope.get("path", "")
|
| 410 |
+
|
| 411 |
+
if f"/t-{self.token}/sse" in path or f"t-{self.token}/messages" in path:
|
| 412 |
+
# Route to SSE app
|
| 413 |
+
await sse_app(scope, receive, send)
|
| 414 |
+
elif f"/t-{self.token}/http" in path:
|
| 415 |
+
# Route to HTTP app
|
| 416 |
+
await http_app(scope, receive, send)
|
| 417 |
+
else:
|
| 418 |
+
raise StarletteHTTPException(
|
| 419 |
+
status_code=403, detail="MCP forbidden"
|
| 420 |
+
)
|
| 421 |
+
|
| 422 |
+
|
| 423 |
+
async def mcp_middleware(request: Request, call_next):
|
| 424 |
+
|
| 425 |
+
# check if MCP server is enabled
|
| 426 |
+
cfg = settings.get_settings()
|
| 427 |
+
if not cfg["mcp_server_enabled"]:
|
| 428 |
+
PrintStyle.error("[MCP] Access denied: MCP server is disabled in settings.")
|
| 429 |
+
raise StarletteHTTPException(
|
| 430 |
+
status_code=403, detail="MCP server is disabled in settings."
|
| 431 |
+
)
|
| 432 |
+
|
| 433 |
+
return await call_next(request)
|
python/helpers/searxng.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import aiohttp
|
| 2 |
+
import asyncio
|
| 3 |
+
from python.helpers import runtime
|
| 4 |
+
|
| 5 |
+
# List of public SearxNG instances
|
| 6 |
+
# Find more at https://searx.space/
|
| 7 |
+
INSTANCES = [
|
| 8 |
+
"https://searx.be/search",
|
| 9 |
+
"https://searx.info/search",
|
| 10 |
+
"https://searx.work/search",
|
| 11 |
+
"https://searx.priv.au/search",
|
| 12 |
+
"https://searx.tiekoetter.com/search",
|
| 13 |
+
"https://searx.baczek.me/search",
|
| 14 |
+
"https://searx.rodeo/search",
|
| 15 |
+
]
|
| 16 |
+
|
| 17 |
+
async def search(query:str):
|
| 18 |
+
return await _search(query=query)
|
| 19 |
+
|
| 20 |
+
async def _search(query:str):
|
| 21 |
+
timeout = aiohttp.ClientTimeout(total=30)
|
| 22 |
+
for instance in INSTANCES:
|
| 23 |
+
try:
|
| 24 |
+
async with aiohttp.ClientSession(timeout=timeout) as session:
|
| 25 |
+
async with session.post(instance, data={"q": query, "format": "json"}) as response:
|
| 26 |
+
if response.status == 200:
|
| 27 |
+
try:
|
| 28 |
+
return await response.json()
|
| 29 |
+
except aiohttp.client_exceptions.ContentTypeError:
|
| 30 |
+
# This instance is not returning JSON, so we try the next one
|
| 31 |
+
continue
|
| 32 |
+
except (aiohttp.ClientConnectorError, asyncio.TimeoutError):
|
| 33 |
+
# This instance is not reachable or timed out, so we try the next one
|
| 34 |
+
continue
|
| 35 |
+
# If all instances fail, we return an error
|
| 36 |
+
raise Exception("All SearxNG instances failed to respond.")
|
python/helpers/settings.py
ADDED
|
@@ -0,0 +1,1623 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import hashlib
|
| 3 |
+
import json
|
| 4 |
+
import os
|
| 5 |
+
import re
|
| 6 |
+
import subprocess
|
| 7 |
+
from typing import Any, Literal, TypedDict, cast
|
| 8 |
+
|
| 9 |
+
import models
|
| 10 |
+
from python.helpers import runtime, whisper, defer, git
|
| 11 |
+
from . import files, dotenv
|
| 12 |
+
from python.helpers.print_style import PrintStyle
|
| 13 |
+
from python.helpers.providers import get_providers
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class Settings(TypedDict):
|
| 17 |
+
version: str
|
| 18 |
+
|
| 19 |
+
chat_model_provider: str
|
| 20 |
+
chat_model_name: str
|
| 21 |
+
chat_model_api_base: str
|
| 22 |
+
chat_model_kwargs: dict[str, str]
|
| 23 |
+
chat_model_ctx_length: int
|
| 24 |
+
chat_model_ctx_history: float
|
| 25 |
+
chat_model_vision: bool
|
| 26 |
+
chat_model_rl_requests: int
|
| 27 |
+
chat_model_rl_input: int
|
| 28 |
+
chat_model_rl_output: int
|
| 29 |
+
|
| 30 |
+
util_model_provider: str
|
| 31 |
+
util_model_name: str
|
| 32 |
+
util_model_api_base: str
|
| 33 |
+
util_model_kwargs: dict[str, str]
|
| 34 |
+
util_model_ctx_length: int
|
| 35 |
+
util_model_ctx_input: float
|
| 36 |
+
util_model_rl_requests: int
|
| 37 |
+
util_model_rl_input: int
|
| 38 |
+
util_model_rl_output: int
|
| 39 |
+
|
| 40 |
+
embed_model_provider: str
|
| 41 |
+
embed_model_name: str
|
| 42 |
+
embed_model_api_base: str
|
| 43 |
+
embed_model_kwargs: dict[str, str]
|
| 44 |
+
embed_model_rl_requests: int
|
| 45 |
+
embed_model_rl_input: int
|
| 46 |
+
|
| 47 |
+
browser_model_provider: str
|
| 48 |
+
browser_model_name: str
|
| 49 |
+
browser_model_api_base: str
|
| 50 |
+
browser_model_vision: bool
|
| 51 |
+
browser_model_rl_requests: int
|
| 52 |
+
browser_model_rl_input: int
|
| 53 |
+
browser_model_rl_output: int
|
| 54 |
+
browser_model_kwargs: dict[str, str]
|
| 55 |
+
|
| 56 |
+
agent_profile: str
|
| 57 |
+
agent_memory_subdir: str
|
| 58 |
+
agent_knowledge_subdir: str
|
| 59 |
+
|
| 60 |
+
memory_recall_enabled: bool
|
| 61 |
+
memory_recall_delayed: bool
|
| 62 |
+
memory_recall_interval: int
|
| 63 |
+
memory_recall_history_len: int
|
| 64 |
+
memory_recall_memories_max_search: int
|
| 65 |
+
memory_recall_solutions_max_search: int
|
| 66 |
+
memory_recall_memories_max_result: int
|
| 67 |
+
memory_recall_solutions_max_result: int
|
| 68 |
+
memory_recall_similarity_threshold: float
|
| 69 |
+
memory_recall_query_prep: bool
|
| 70 |
+
memory_recall_post_filter: bool
|
| 71 |
+
memory_memorize_enabled: bool
|
| 72 |
+
memory_memorize_consolidation: bool
|
| 73 |
+
memory_memorize_replace_threshold: float
|
| 74 |
+
|
| 75 |
+
api_keys: dict[str, str]
|
| 76 |
+
|
| 77 |
+
auth_login: str
|
| 78 |
+
auth_password: str
|
| 79 |
+
root_password: str
|
| 80 |
+
|
| 81 |
+
rfc_auto_docker: bool
|
| 82 |
+
rfc_url: str
|
| 83 |
+
rfc_password: str
|
| 84 |
+
rfc_port_http: int
|
| 85 |
+
rfc_port_ssh: int
|
| 86 |
+
|
| 87 |
+
shell_interface: Literal['local','ssh']
|
| 88 |
+
|
| 89 |
+
stt_model_size: str
|
| 90 |
+
stt_language: str
|
| 91 |
+
stt_silence_threshold: float
|
| 92 |
+
stt_silence_duration: int
|
| 93 |
+
stt_waiting_timeout: int
|
| 94 |
+
|
| 95 |
+
tts_kokoro: bool
|
| 96 |
+
|
| 97 |
+
mcp_servers: str
|
| 98 |
+
mcp_client_init_timeout: int
|
| 99 |
+
mcp_client_tool_timeout: int
|
| 100 |
+
mcp_server_enabled: bool
|
| 101 |
+
mcp_server_token: str
|
| 102 |
+
|
| 103 |
+
a2a_server_enabled: bool
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class PartialSettings(Settings, total=False):
|
| 108 |
+
pass
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class FieldOption(TypedDict):
|
| 112 |
+
value: str
|
| 113 |
+
label: str
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
class SettingsField(TypedDict, total=False):
|
| 117 |
+
id: str
|
| 118 |
+
title: str
|
| 119 |
+
description: str
|
| 120 |
+
type: Literal[
|
| 121 |
+
"text",
|
| 122 |
+
"number",
|
| 123 |
+
"select",
|
| 124 |
+
"range",
|
| 125 |
+
"textarea",
|
| 126 |
+
"password",
|
| 127 |
+
"switch",
|
| 128 |
+
"button",
|
| 129 |
+
"html",
|
| 130 |
+
]
|
| 131 |
+
value: Any
|
| 132 |
+
min: float
|
| 133 |
+
max: float
|
| 134 |
+
step: float
|
| 135 |
+
hidden: bool
|
| 136 |
+
options: list[FieldOption]
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
class SettingsSection(TypedDict, total=False):
|
| 140 |
+
id: str
|
| 141 |
+
title: str
|
| 142 |
+
description: str
|
| 143 |
+
fields: list[SettingsField]
|
| 144 |
+
tab: str # Indicates which tab this section belongs to
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
class SettingsOutput(TypedDict):
|
| 148 |
+
sections: list[SettingsSection]
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
PASSWORD_PLACEHOLDER = "****PSWD****"
|
| 152 |
+
API_KEY_PLACEHOLDER = "************"
|
| 153 |
+
|
| 154 |
+
SETTINGS_FILE = files.get_abs_path("tmp/settings.json")
|
| 155 |
+
_settings: Settings | None = None
|
| 156 |
+
|
| 157 |
+
|
| 158 |
+
def convert_out(settings: Settings) -> SettingsOutput:
|
| 159 |
+
default_settings = get_default_settings()
|
| 160 |
+
|
| 161 |
+
# main model section
|
| 162 |
+
chat_model_fields: list[SettingsField] = []
|
| 163 |
+
chat_model_fields.append(
|
| 164 |
+
{
|
| 165 |
+
"id": "chat_model_provider",
|
| 166 |
+
"title": "Chat model provider",
|
| 167 |
+
"description": "Select provider for main chat model used by Agent Zero",
|
| 168 |
+
"type": "select",
|
| 169 |
+
"value": settings["chat_model_provider"],
|
| 170 |
+
"options": cast(list[FieldOption], get_providers("chat")),
|
| 171 |
+
}
|
| 172 |
+
)
|
| 173 |
+
chat_model_fields.append(
|
| 174 |
+
{
|
| 175 |
+
"id": "chat_model_name",
|
| 176 |
+
"title": "Chat model name",
|
| 177 |
+
"description": "Exact name of model from selected provider",
|
| 178 |
+
"type": "text",
|
| 179 |
+
"value": settings["chat_model_name"],
|
| 180 |
+
}
|
| 181 |
+
)
|
| 182 |
+
|
| 183 |
+
chat_model_fields.append(
|
| 184 |
+
{
|
| 185 |
+
"id": "chat_model_api_base",
|
| 186 |
+
"title": "Chat model API base URL",
|
| 187 |
+
"description": "API base URL for main chat model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
|
| 188 |
+
"type": "text",
|
| 189 |
+
"value": settings["chat_model_api_base"],
|
| 190 |
+
}
|
| 191 |
+
)
|
| 192 |
+
|
| 193 |
+
chat_model_fields.append(
|
| 194 |
+
{
|
| 195 |
+
"id": "chat_model_ctx_length",
|
| 196 |
+
"title": "Chat model context length",
|
| 197 |
+
"description": "Maximum number of tokens in the context window for LLM. System prompt, chat history, RAG and response all count towards this limit.",
|
| 198 |
+
"type": "number",
|
| 199 |
+
"value": settings["chat_model_ctx_length"],
|
| 200 |
+
}
|
| 201 |
+
)
|
| 202 |
+
|
| 203 |
+
chat_model_fields.append(
|
| 204 |
+
{
|
| 205 |
+
"id": "chat_model_ctx_history",
|
| 206 |
+
"title": "Context window space for chat history",
|
| 207 |
+
"description": "Portion of context window dedicated to chat history visible to the agent. Chat history will automatically be optimized to fit. Smaller size will result in shorter and more summarized history. The remaining space will be used for system prompt, RAG and response.",
|
| 208 |
+
"type": "range",
|
| 209 |
+
"min": 0.01,
|
| 210 |
+
"max": 1,
|
| 211 |
+
"step": 0.01,
|
| 212 |
+
"value": settings["chat_model_ctx_history"],
|
| 213 |
+
}
|
| 214 |
+
)
|
| 215 |
+
|
| 216 |
+
chat_model_fields.append(
|
| 217 |
+
{
|
| 218 |
+
"id": "chat_model_vision",
|
| 219 |
+
"title": "Supports Vision",
|
| 220 |
+
"description": "Models capable of Vision can for example natively see the content of image attachments.",
|
| 221 |
+
"type": "switch",
|
| 222 |
+
"value": settings["chat_model_vision"],
|
| 223 |
+
}
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
chat_model_fields.append(
|
| 227 |
+
{
|
| 228 |
+
"id": "chat_model_rl_requests",
|
| 229 |
+
"title": "Requests per minute limit",
|
| 230 |
+
"description": "Limits the number of requests per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 231 |
+
"type": "number",
|
| 232 |
+
"value": settings["chat_model_rl_requests"],
|
| 233 |
+
}
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
chat_model_fields.append(
|
| 237 |
+
{
|
| 238 |
+
"id": "chat_model_rl_input",
|
| 239 |
+
"title": "Input tokens per minute limit",
|
| 240 |
+
"description": "Limits the number of input tokens per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 241 |
+
"type": "number",
|
| 242 |
+
"value": settings["chat_model_rl_input"],
|
| 243 |
+
}
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
chat_model_fields.append(
|
| 247 |
+
{
|
| 248 |
+
"id": "chat_model_rl_output",
|
| 249 |
+
"title": "Output tokens per minute limit",
|
| 250 |
+
"description": "Limits the number of output tokens per minute to the chat model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 251 |
+
"type": "number",
|
| 252 |
+
"value": settings["chat_model_rl_output"],
|
| 253 |
+
}
|
| 254 |
+
)
|
| 255 |
+
|
| 256 |
+
chat_model_fields.append(
|
| 257 |
+
{
|
| 258 |
+
"id": "chat_model_kwargs",
|
| 259 |
+
"title": "Chat model additional parameters",
|
| 260 |
+
"description": "Any other parameters supported by <a href='https://docs.litellm.ai/docs/set_keys' target='_blank'>LiteLLM</a>. Format is KEY=VALUE on individual lines, just like .env file.",
|
| 261 |
+
"type": "textarea",
|
| 262 |
+
"value": _dict_to_env(settings["chat_model_kwargs"]),
|
| 263 |
+
}
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
chat_model_section: SettingsSection = {
|
| 267 |
+
"id": "chat_model",
|
| 268 |
+
"title": "Chat Model",
|
| 269 |
+
"description": "Selection and settings for main chat model used by Agent Zero",
|
| 270 |
+
"fields": chat_model_fields,
|
| 271 |
+
"tab": "agent",
|
| 272 |
+
}
|
| 273 |
+
|
| 274 |
+
# main model section
|
| 275 |
+
util_model_fields: list[SettingsField] = []
|
| 276 |
+
util_model_fields.append(
|
| 277 |
+
{
|
| 278 |
+
"id": "util_model_provider",
|
| 279 |
+
"title": "Utility model provider",
|
| 280 |
+
"description": "Select provider for utility model used by the framework",
|
| 281 |
+
"type": "select",
|
| 282 |
+
"value": settings["util_model_provider"],
|
| 283 |
+
"options": cast(list[FieldOption], get_providers("chat")),
|
| 284 |
+
}
|
| 285 |
+
)
|
| 286 |
+
util_model_fields.append(
|
| 287 |
+
{
|
| 288 |
+
"id": "util_model_name",
|
| 289 |
+
"title": "Utility model name",
|
| 290 |
+
"description": "Exact name of model from selected provider",
|
| 291 |
+
"type": "text",
|
| 292 |
+
"value": settings["util_model_name"],
|
| 293 |
+
}
|
| 294 |
+
)
|
| 295 |
+
|
| 296 |
+
util_model_fields.append(
|
| 297 |
+
{
|
| 298 |
+
"id": "util_model_api_base",
|
| 299 |
+
"title": "Utility model API base URL",
|
| 300 |
+
"description": "API base URL for utility model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
|
| 301 |
+
"type": "text",
|
| 302 |
+
"value": settings["util_model_api_base"],
|
| 303 |
+
}
|
| 304 |
+
)
|
| 305 |
+
|
| 306 |
+
util_model_fields.append(
|
| 307 |
+
{
|
| 308 |
+
"id": "util_model_rl_requests",
|
| 309 |
+
"title": "Requests per minute limit",
|
| 310 |
+
"description": "Limits the number of requests per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 311 |
+
"type": "number",
|
| 312 |
+
"value": settings["util_model_rl_requests"],
|
| 313 |
+
}
|
| 314 |
+
)
|
| 315 |
+
|
| 316 |
+
util_model_fields.append(
|
| 317 |
+
{
|
| 318 |
+
"id": "util_model_rl_input",
|
| 319 |
+
"title": "Input tokens per minute limit",
|
| 320 |
+
"description": "Limits the number of input tokens per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 321 |
+
"type": "number",
|
| 322 |
+
"value": settings["util_model_rl_input"],
|
| 323 |
+
}
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
util_model_fields.append(
|
| 327 |
+
{
|
| 328 |
+
"id": "util_model_rl_output",
|
| 329 |
+
"title": "Output tokens per minute limit",
|
| 330 |
+
"description": "Limits the number of output tokens per minute to the utility model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 331 |
+
"type": "number",
|
| 332 |
+
"value": settings["util_model_rl_output"],
|
| 333 |
+
}
|
| 334 |
+
)
|
| 335 |
+
|
| 336 |
+
util_model_fields.append(
|
| 337 |
+
{
|
| 338 |
+
"id": "util_model_kwargs",
|
| 339 |
+
"title": "Utility model additional parameters",
|
| 340 |
+
"description": "Any other parameters supported by <a href='https://docs.litellm.ai/docs/set_keys' target='_blank'>LiteLLM</a>. Format is KEY=VALUE on individual lines, just like .env file.",
|
| 341 |
+
"type": "textarea",
|
| 342 |
+
"value": _dict_to_env(settings["util_model_kwargs"]),
|
| 343 |
+
}
|
| 344 |
+
)
|
| 345 |
+
|
| 346 |
+
util_model_section: SettingsSection = {
|
| 347 |
+
"id": "util_model",
|
| 348 |
+
"title": "Utility model",
|
| 349 |
+
"description": "Smaller, cheaper, faster model for handling utility tasks like organizing memory, preparing prompts, summarizing.",
|
| 350 |
+
"fields": util_model_fields,
|
| 351 |
+
"tab": "agent",
|
| 352 |
+
}
|
| 353 |
+
|
| 354 |
+
# embedding model section
|
| 355 |
+
embed_model_fields: list[SettingsField] = []
|
| 356 |
+
embed_model_fields.append(
|
| 357 |
+
{
|
| 358 |
+
"id": "embed_model_provider",
|
| 359 |
+
"title": "Embedding model provider",
|
| 360 |
+
"description": "Select provider for embedding model used by the framework",
|
| 361 |
+
"type": "select",
|
| 362 |
+
"value": settings["embed_model_provider"],
|
| 363 |
+
"options": cast(list[FieldOption], get_providers("embedding")),
|
| 364 |
+
}
|
| 365 |
+
)
|
| 366 |
+
embed_model_fields.append(
|
| 367 |
+
{
|
| 368 |
+
"id": "embed_model_name",
|
| 369 |
+
"title": "Embedding model name",
|
| 370 |
+
"description": "Exact name of model from selected provider",
|
| 371 |
+
"type": "text",
|
| 372 |
+
"value": settings["embed_model_name"],
|
| 373 |
+
}
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
embed_model_fields.append(
|
| 377 |
+
{
|
| 378 |
+
"id": "embed_model_api_base",
|
| 379 |
+
"title": "Embedding model API base URL",
|
| 380 |
+
"description": "API base URL for embedding model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
|
| 381 |
+
"type": "text",
|
| 382 |
+
"value": settings["embed_model_api_base"],
|
| 383 |
+
}
|
| 384 |
+
)
|
| 385 |
+
|
| 386 |
+
embed_model_fields.append(
|
| 387 |
+
{
|
| 388 |
+
"id": "embed_model_rl_requests",
|
| 389 |
+
"title": "Requests per minute limit",
|
| 390 |
+
"description": "Limits the number of requests per minute to the embedding model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 391 |
+
"type": "number",
|
| 392 |
+
"value": settings["embed_model_rl_requests"],
|
| 393 |
+
}
|
| 394 |
+
)
|
| 395 |
+
|
| 396 |
+
embed_model_fields.append(
|
| 397 |
+
{
|
| 398 |
+
"id": "embed_model_rl_input",
|
| 399 |
+
"title": "Input tokens per minute limit",
|
| 400 |
+
"description": "Limits the number of input tokens per minute to the embedding model. Waits if the limit is exceeded. Set to 0 to disable rate limiting.",
|
| 401 |
+
"type": "number",
|
| 402 |
+
"value": settings["embed_model_rl_input"],
|
| 403 |
+
}
|
| 404 |
+
)
|
| 405 |
+
|
| 406 |
+
embed_model_fields.append(
|
| 407 |
+
{
|
| 408 |
+
"id": "embed_model_kwargs",
|
| 409 |
+
"title": "Embedding model additional parameters",
|
| 410 |
+
"description": "Any other parameters supported by <a href='https://docs.litellm.ai/docs/set_keys' target='_blank'>LiteLLM</a>. Format is KEY=VALUE on individual lines, just like .env file.",
|
| 411 |
+
"type": "textarea",
|
| 412 |
+
"value": _dict_to_env(settings["embed_model_kwargs"]),
|
| 413 |
+
}
|
| 414 |
+
)
|
| 415 |
+
|
| 416 |
+
embed_model_section: SettingsSection = {
|
| 417 |
+
"id": "embed_model",
|
| 418 |
+
"title": "Embedding Model",
|
| 419 |
+
"description": f"Settings for the embedding model used by Agent Zero.<br><h4>⚠️ No need to change</h4>The default HuggingFace model {default_settings['embed_model_name']} is preloaded and runs locally within the docker container and there's no need to change it unless you have a specific requirements for embedding.",
|
| 420 |
+
"fields": embed_model_fields,
|
| 421 |
+
"tab": "agent",
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
# embedding model section
|
| 425 |
+
browser_model_fields: list[SettingsField] = []
|
| 426 |
+
browser_model_fields.append(
|
| 427 |
+
{
|
| 428 |
+
"id": "browser_model_provider",
|
| 429 |
+
"title": "Web Browser model provider",
|
| 430 |
+
"description": "Select provider for web browser model used by <a href='https://github.com/browser-use/browser-use' target='_blank'>browser-use</a> framework",
|
| 431 |
+
"type": "select",
|
| 432 |
+
"value": settings["browser_model_provider"],
|
| 433 |
+
"options": cast(list[FieldOption], get_providers("chat")),
|
| 434 |
+
}
|
| 435 |
+
)
|
| 436 |
+
browser_model_fields.append(
|
| 437 |
+
{
|
| 438 |
+
"id": "browser_model_name",
|
| 439 |
+
"title": "Web Browser model name",
|
| 440 |
+
"description": "Exact name of model from selected provider",
|
| 441 |
+
"type": "text",
|
| 442 |
+
"value": settings["browser_model_name"],
|
| 443 |
+
}
|
| 444 |
+
)
|
| 445 |
+
|
| 446 |
+
browser_model_fields.append(
|
| 447 |
+
{
|
| 448 |
+
"id": "browser_model_api_base",
|
| 449 |
+
"title": "Web Browser model API base URL",
|
| 450 |
+
"description": "API base URL for web browser model. Leave empty for default. Only relevant for Azure, local and custom (other) providers.",
|
| 451 |
+
"type": "text",
|
| 452 |
+
"value": settings["browser_model_api_base"],
|
| 453 |
+
}
|
| 454 |
+
)
|
| 455 |
+
|
| 456 |
+
browser_model_fields.append(
|
| 457 |
+
{
|
| 458 |
+
"id": "browser_model_vision",
|
| 459 |
+
"title": "Use Vision",
|
| 460 |
+
"description": "Models capable of Vision can use it to analyze web pages from screenshots. Increases quality but also token usage.",
|
| 461 |
+
"type": "switch",
|
| 462 |
+
"value": settings["browser_model_vision"],
|
| 463 |
+
}
|
| 464 |
+
)
|
| 465 |
+
|
| 466 |
+
browser_model_fields.append(
|
| 467 |
+
{
|
| 468 |
+
"id": "browser_model_rl_requests",
|
| 469 |
+
"title": "Web Browser model rate limit requests",
|
| 470 |
+
"description": "Rate limit requests for web browser model.",
|
| 471 |
+
"type": "number",
|
| 472 |
+
"value": settings["browser_model_rl_requests"],
|
| 473 |
+
}
|
| 474 |
+
)
|
| 475 |
+
|
| 476 |
+
browser_model_fields.append(
|
| 477 |
+
{
|
| 478 |
+
"id": "browser_model_rl_input",
|
| 479 |
+
"title": "Web Browser model rate limit input",
|
| 480 |
+
"description": "Rate limit input for web browser model.",
|
| 481 |
+
"type": "number",
|
| 482 |
+
"value": settings["browser_model_rl_input"],
|
| 483 |
+
}
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
browser_model_fields.append(
|
| 487 |
+
{
|
| 488 |
+
"id": "browser_model_rl_output",
|
| 489 |
+
"title": "Web Browser model rate limit output",
|
| 490 |
+
"description": "Rate limit output for web browser model.",
|
| 491 |
+
"type": "number",
|
| 492 |
+
"value": settings["browser_model_rl_output"],
|
| 493 |
+
}
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
browser_model_fields.append(
|
| 497 |
+
{
|
| 498 |
+
"id": "browser_model_kwargs",
|
| 499 |
+
"title": "Web Browser model additional parameters",
|
| 500 |
+
"description": "Any other parameters supported by <a href='https://docs.litellm.ai/docs/set_keys' target='_blank'>LiteLLM</a>. Format is KEY=VALUE on individual lines, just like .env file.",
|
| 501 |
+
"type": "textarea",
|
| 502 |
+
"value": _dict_to_env(settings["browser_model_kwargs"]),
|
| 503 |
+
}
|
| 504 |
+
)
|
| 505 |
+
|
| 506 |
+
browser_model_section: SettingsSection = {
|
| 507 |
+
"id": "browser_model",
|
| 508 |
+
"title": "Web Browser Model",
|
| 509 |
+
"description": "Settings for the web browser model. Agent Zero uses <a href='https://github.com/browser-use/browser-use' target='_blank'>browser-use</a> agentic framework to handle web interactions.",
|
| 510 |
+
"fields": browser_model_fields,
|
| 511 |
+
"tab": "agent",
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
# basic auth section
|
| 515 |
+
auth_fields: list[SettingsField] = []
|
| 516 |
+
|
| 517 |
+
auth_fields.append(
|
| 518 |
+
{
|
| 519 |
+
"id": "auth_login",
|
| 520 |
+
"title": "UI Login",
|
| 521 |
+
"description": "Set user name for web UI",
|
| 522 |
+
"type": "text",
|
| 523 |
+
"value": dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN) or "",
|
| 524 |
+
}
|
| 525 |
+
)
|
| 526 |
+
|
| 527 |
+
auth_fields.append(
|
| 528 |
+
{
|
| 529 |
+
"id": "auth_password",
|
| 530 |
+
"title": "UI Password",
|
| 531 |
+
"description": "Set user password for web UI",
|
| 532 |
+
"type": "password",
|
| 533 |
+
"value": (
|
| 534 |
+
PASSWORD_PLACEHOLDER
|
| 535 |
+
if dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD)
|
| 536 |
+
else ""
|
| 537 |
+
),
|
| 538 |
+
}
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
if runtime.is_dockerized():
|
| 542 |
+
auth_fields.append(
|
| 543 |
+
{
|
| 544 |
+
"id": "root_password",
|
| 545 |
+
"title": "root Password",
|
| 546 |
+
"description": "Change linux root password in docker container. This password can be used for SSH access. Original password was randomly generated during setup.",
|
| 547 |
+
"type": "password",
|
| 548 |
+
"value": "",
|
| 549 |
+
}
|
| 550 |
+
)
|
| 551 |
+
|
| 552 |
+
auth_section: SettingsSection = {
|
| 553 |
+
"id": "auth",
|
| 554 |
+
"title": "Authentication",
|
| 555 |
+
"description": "Settings for authentication to use Agent Zero Web UI.",
|
| 556 |
+
"fields": auth_fields,
|
| 557 |
+
"tab": "external",
|
| 558 |
+
}
|
| 559 |
+
|
| 560 |
+
# api keys model section
|
| 561 |
+
api_keys_fields: list[SettingsField] = []
|
| 562 |
+
|
| 563 |
+
# Collect unique providers from both chat and embedding sections
|
| 564 |
+
providers_seen: set[str] = set()
|
| 565 |
+
for p_type in ("chat", "embedding"):
|
| 566 |
+
for provider in get_providers(p_type):
|
| 567 |
+
pid_lower = provider["value"].lower()
|
| 568 |
+
if pid_lower in providers_seen:
|
| 569 |
+
continue
|
| 570 |
+
providers_seen.add(pid_lower)
|
| 571 |
+
api_keys_fields.append(
|
| 572 |
+
_get_api_key_field(settings, pid_lower, provider["label"])
|
| 573 |
+
)
|
| 574 |
+
|
| 575 |
+
api_keys_section: SettingsSection = {
|
| 576 |
+
"id": "api_keys",
|
| 577 |
+
"title": "API Keys",
|
| 578 |
+
"description": "API keys for model providers and services used by Agent Zero. You can set multiple API keys separated by a comma (,). They will be used in round-robin fashion.",
|
| 579 |
+
"fields": api_keys_fields,
|
| 580 |
+
"tab": "external",
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
# Agent config section
|
| 584 |
+
agent_fields: list[SettingsField] = []
|
| 585 |
+
|
| 586 |
+
agent_fields.append(
|
| 587 |
+
{
|
| 588 |
+
"id": "agent_profile",
|
| 589 |
+
"title": "Default agent profile",
|
| 590 |
+
"description": "Subdirectory of /agents folder to be used by default agent no. 0. Subordinate agents can be spawned with other profiles, that is on their superior agent to decide. This setting affects the behaviour of the top level agent you communicate with.",
|
| 591 |
+
"type": "select",
|
| 592 |
+
"value": settings["agent_profile"],
|
| 593 |
+
"options": [
|
| 594 |
+
{"value": subdir, "label": subdir}
|
| 595 |
+
for subdir in files.get_subdirectories("agents")
|
| 596 |
+
if subdir != "_example"
|
| 597 |
+
],
|
| 598 |
+
}
|
| 599 |
+
)
|
| 600 |
+
|
| 601 |
+
agent_fields.append(
|
| 602 |
+
{
|
| 603 |
+
"id": "agent_knowledge_subdir",
|
| 604 |
+
"title": "Knowledge subdirectory",
|
| 605 |
+
"description": "Subdirectory of /knowledge folder to use for agent knowledge import. 'default' subfolder is always imported and contains framework knowledge.",
|
| 606 |
+
"type": "select",
|
| 607 |
+
"value": settings["agent_knowledge_subdir"],
|
| 608 |
+
"options": [
|
| 609 |
+
{"value": subdir, "label": subdir}
|
| 610 |
+
for subdir in files.get_subdirectories("knowledge", exclude="default")
|
| 611 |
+
],
|
| 612 |
+
}
|
| 613 |
+
)
|
| 614 |
+
|
| 615 |
+
agent_section: SettingsSection = {
|
| 616 |
+
"id": "agent",
|
| 617 |
+
"title": "Agent Config",
|
| 618 |
+
"description": "Agent parameters.",
|
| 619 |
+
"fields": agent_fields,
|
| 620 |
+
"tab": "agent",
|
| 621 |
+
}
|
| 622 |
+
|
| 623 |
+
memory_fields: list[SettingsField] = []
|
| 624 |
+
|
| 625 |
+
memory_fields.append(
|
| 626 |
+
{
|
| 627 |
+
"id": "agent_memory_subdir",
|
| 628 |
+
"title": "Memory Subdirectory",
|
| 629 |
+
"description": "Subdirectory of /memory folder to use for agent memory storage. Used to separate memory storage between different instances.",
|
| 630 |
+
"type": "text",
|
| 631 |
+
"value": settings["agent_memory_subdir"],
|
| 632 |
+
# "options": [
|
| 633 |
+
# {"value": subdir, "label": subdir}
|
| 634 |
+
# for subdir in files.get_subdirectories("memory", exclude="embeddings")
|
| 635 |
+
# ],
|
| 636 |
+
}
|
| 637 |
+
)
|
| 638 |
+
|
| 639 |
+
memory_fields.append(
|
| 640 |
+
{
|
| 641 |
+
"id": "memory_recall_enabled",
|
| 642 |
+
"title": "Memory auto-recall enabled",
|
| 643 |
+
"description": "Agent Zero will automatically recall memories based on convesation context.",
|
| 644 |
+
"type": "switch",
|
| 645 |
+
"value": settings["memory_recall_enabled"],
|
| 646 |
+
}
|
| 647 |
+
)
|
| 648 |
+
|
| 649 |
+
memory_fields.append(
|
| 650 |
+
{
|
| 651 |
+
"id": "memory_recall_delayed",
|
| 652 |
+
"title": "Memory auto-recall delayed",
|
| 653 |
+
"description": "The agent will not wait for auto memory recall. Memories will be delivered one message later. This speeds up agent's response time but may result in less relevant first step.",
|
| 654 |
+
"type": "switch",
|
| 655 |
+
"value": settings["memory_recall_delayed"],
|
| 656 |
+
}
|
| 657 |
+
)
|
| 658 |
+
|
| 659 |
+
memory_fields.append(
|
| 660 |
+
{
|
| 661 |
+
"id": "memory_recall_query_prep",
|
| 662 |
+
"title": "Auto-recall AI query preparation",
|
| 663 |
+
"description": "Enables vector DB query preparation from conversation context by utility LLM for auto-recall. Improves search quality, adds 1 utility LLM call per auto-recall.",
|
| 664 |
+
"type": "switch",
|
| 665 |
+
"value": settings["memory_recall_query_prep"],
|
| 666 |
+
}
|
| 667 |
+
)
|
| 668 |
+
|
| 669 |
+
memory_fields.append(
|
| 670 |
+
{
|
| 671 |
+
"id": "memory_recall_post_filter",
|
| 672 |
+
"title": "Auto-recall AI post-filtering",
|
| 673 |
+
"description": "Enables memory relevance filtering by utility LLM for auto-recall. Improves search quality, adds 1 utility LLM call per auto-recall.",
|
| 674 |
+
"type": "switch",
|
| 675 |
+
"value": settings["memory_recall_post_filter"],
|
| 676 |
+
}
|
| 677 |
+
)
|
| 678 |
+
|
| 679 |
+
memory_fields.append(
|
| 680 |
+
{
|
| 681 |
+
"id": "memory_recall_interval",
|
| 682 |
+
"title": "Memory auto-recall interval",
|
| 683 |
+
"description": "Memories are recalled after every user or superior agent message. During agent's monologue, memories are recalled every X turns based on this parameter.",
|
| 684 |
+
"type": "range",
|
| 685 |
+
"min": 1,
|
| 686 |
+
"max": 10,
|
| 687 |
+
"step": 1,
|
| 688 |
+
"value": settings["memory_recall_interval"],
|
| 689 |
+
}
|
| 690 |
+
)
|
| 691 |
+
|
| 692 |
+
memory_fields.append(
|
| 693 |
+
{
|
| 694 |
+
"id": "memory_recall_history_len",
|
| 695 |
+
"title": "Memory auto-recall history length",
|
| 696 |
+
"description": "The length of conversation history passed to memory recall LLM for context (in characters).",
|
| 697 |
+
"type": "number",
|
| 698 |
+
"value": settings["memory_recall_history_len"],
|
| 699 |
+
}
|
| 700 |
+
)
|
| 701 |
+
|
| 702 |
+
memory_fields.append(
|
| 703 |
+
{
|
| 704 |
+
"id": "memory_recall_similarity_threshold",
|
| 705 |
+
"title": "Memory auto-recall similarity threshold",
|
| 706 |
+
"description": "The threshold for similarity search in memory recall (0 = no similarity, 1 = exact match).",
|
| 707 |
+
"type": "range",
|
| 708 |
+
"min": 0,
|
| 709 |
+
"max": 1,
|
| 710 |
+
"step": 0.01,
|
| 711 |
+
"value": settings["memory_recall_similarity_threshold"],
|
| 712 |
+
}
|
| 713 |
+
)
|
| 714 |
+
|
| 715 |
+
memory_fields.append(
|
| 716 |
+
{
|
| 717 |
+
"id": "memory_recall_memories_max_search",
|
| 718 |
+
"title": "Memory auto-recall max memories to search",
|
| 719 |
+
"description": "The maximum number of memories returned by vector DB for further processing.",
|
| 720 |
+
"type": "number",
|
| 721 |
+
"value": settings["memory_recall_memories_max_search"],
|
| 722 |
+
}
|
| 723 |
+
)
|
| 724 |
+
|
| 725 |
+
memory_fields.append(
|
| 726 |
+
{
|
| 727 |
+
"id": "memory_recall_memories_max_result",
|
| 728 |
+
"title": "Memory auto-recall max memories to use",
|
| 729 |
+
"description": "The maximum number of memories to inject into A0's context window.",
|
| 730 |
+
"type": "number",
|
| 731 |
+
"value": settings["memory_recall_memories_max_result"],
|
| 732 |
+
}
|
| 733 |
+
)
|
| 734 |
+
|
| 735 |
+
memory_fields.append(
|
| 736 |
+
{
|
| 737 |
+
"id": "memory_recall_solutions_max_search",
|
| 738 |
+
"title": "Memory auto-recall max solutions to search",
|
| 739 |
+
"description": "The maximum number of solutions returned by vector DB for further processing.",
|
| 740 |
+
"type": "number",
|
| 741 |
+
"value": settings["memory_recall_solutions_max_search"],
|
| 742 |
+
}
|
| 743 |
+
)
|
| 744 |
+
|
| 745 |
+
memory_fields.append(
|
| 746 |
+
{
|
| 747 |
+
"id": "memory_recall_solutions_max_result",
|
| 748 |
+
"title": "Memory auto-recall max solutions to use",
|
| 749 |
+
"description": "The maximum number of solutions to inject into A0's context window.",
|
| 750 |
+
"type": "number",
|
| 751 |
+
"value": settings["memory_recall_solutions_max_result"],
|
| 752 |
+
}
|
| 753 |
+
)
|
| 754 |
+
|
| 755 |
+
memory_fields.append(
|
| 756 |
+
{
|
| 757 |
+
"id": "memory_memorize_enabled",
|
| 758 |
+
"title": "Auto-memorize enabled",
|
| 759 |
+
"description": "A0 will automatically memorize facts and solutions from conversation history.",
|
| 760 |
+
"type": "switch",
|
| 761 |
+
"value": settings["memory_memorize_enabled"],
|
| 762 |
+
}
|
| 763 |
+
)
|
| 764 |
+
|
| 765 |
+
memory_fields.append(
|
| 766 |
+
{
|
| 767 |
+
"id": "memory_memorize_consolidation",
|
| 768 |
+
"title": "Auto-memorize AI consolidation",
|
| 769 |
+
"description": "A0 will automatically consolidate similar memories using utility LLM. Improves memory quality over time, adds 2 utility LLM calls per memory.",
|
| 770 |
+
"type": "switch",
|
| 771 |
+
"value": settings["memory_memorize_consolidation"],
|
| 772 |
+
}
|
| 773 |
+
)
|
| 774 |
+
|
| 775 |
+
memory_fields.append(
|
| 776 |
+
{
|
| 777 |
+
"id": "memory_memorize_replace_threshold",
|
| 778 |
+
"title": "Auto-memorize replacement threshold",
|
| 779 |
+
"description": "Only applies when AI consolidation is disabled. Replaces previous similar memories with new ones based on this threshold. 0 = replace even if not similar at all, 1 = replace only if exact match.",
|
| 780 |
+
"type": "range",
|
| 781 |
+
"min": 0,
|
| 782 |
+
"max": 1,
|
| 783 |
+
"step": 0.01,
|
| 784 |
+
"value": settings["memory_memorize_replace_threshold"],
|
| 785 |
+
}
|
| 786 |
+
)
|
| 787 |
+
|
| 788 |
+
memory_section: SettingsSection = {
|
| 789 |
+
"id": "memory",
|
| 790 |
+
"title": "Memory",
|
| 791 |
+
"description": "Configuration of A0's memory system. A0 memorizes and recalls memories automatically to help it's context awareness.",
|
| 792 |
+
"fields": memory_fields,
|
| 793 |
+
"tab": "agent",
|
| 794 |
+
}
|
| 795 |
+
|
| 796 |
+
dev_fields: list[SettingsField] = []
|
| 797 |
+
|
| 798 |
+
dev_fields.append(
|
| 799 |
+
{
|
| 800 |
+
"id": "shell_interface",
|
| 801 |
+
"title": "Shell Interface",
|
| 802 |
+
"description": "Terminal interface used for Code Execution Tool. Local Python TTY works locally in both dockerized and development environments. SSH always connects to dockerized environment (automatically at localhost or RFC host address).",
|
| 803 |
+
"type": "select",
|
| 804 |
+
"value": settings["shell_interface"],
|
| 805 |
+
"options": [{"value": "local", "label": "Local Python TTY"}, {"value": "ssh", "label": "SSH"}],
|
| 806 |
+
}
|
| 807 |
+
)
|
| 808 |
+
|
| 809 |
+
if runtime.is_development():
|
| 810 |
+
# dev_fields.append(
|
| 811 |
+
# {
|
| 812 |
+
# "id": "rfc_auto_docker",
|
| 813 |
+
# "title": "RFC Auto Docker Management",
|
| 814 |
+
# "description": "Automatically create dockerized instance of A0 for RFCs using this instance's code base and, settings and .env.",
|
| 815 |
+
# "type": "text",
|
| 816 |
+
# "value": settings["rfc_auto_docker"],
|
| 817 |
+
# }
|
| 818 |
+
# )
|
| 819 |
+
|
| 820 |
+
dev_fields.append(
|
| 821 |
+
{
|
| 822 |
+
"id": "rfc_url",
|
| 823 |
+
"title": "RFC Destination URL",
|
| 824 |
+
"description": "URL of dockerized A0 instance for remote function calls. Do not specify port here.",
|
| 825 |
+
"type": "text",
|
| 826 |
+
"value": settings["rfc_url"],
|
| 827 |
+
}
|
| 828 |
+
)
|
| 829 |
+
|
| 830 |
+
dev_fields.append(
|
| 831 |
+
{
|
| 832 |
+
"id": "rfc_password",
|
| 833 |
+
"title": "RFC Password",
|
| 834 |
+
"description": "Password for remote function calls. Passwords must match on both Flare instances. RFCs can not be used with empty password.",
|
| 835 |
+
"type": "password",
|
| 836 |
+
"value": (
|
| 837 |
+
PASSWORD_PLACEHOLDER
|
| 838 |
+
if dotenv.get_dotenv_value(dotenv.KEY_RFC_PASSWORD)
|
| 839 |
+
else ""
|
| 840 |
+
),
|
| 841 |
+
}
|
| 842 |
+
)
|
| 843 |
+
|
| 844 |
+
if runtime.is_development():
|
| 845 |
+
dev_fields.append(
|
| 846 |
+
{
|
| 847 |
+
"id": "rfc_port_http",
|
| 848 |
+
"title": "RFC HTTP port",
|
| 849 |
+
"description": "HTTP port for dockerized instance of A0.",
|
| 850 |
+
"type": "text",
|
| 851 |
+
"value": settings["rfc_port_http"],
|
| 852 |
+
}
|
| 853 |
+
)
|
| 854 |
+
|
| 855 |
+
dev_fields.append(
|
| 856 |
+
{
|
| 857 |
+
"id": "rfc_port_ssh",
|
| 858 |
+
"title": "RFC SSH port",
|
| 859 |
+
"description": "SSH port for dockerized instance of A0.",
|
| 860 |
+
"type": "text",
|
| 861 |
+
"value": settings["rfc_port_ssh"],
|
| 862 |
+
}
|
| 863 |
+
)
|
| 864 |
+
|
| 865 |
+
dev_section: SettingsSection = {
|
| 866 |
+
"id": "dev",
|
| 867 |
+
"title": "Development",
|
| 868 |
+
"description": "Parameters for A0 framework development. RFCs (remote function calls) are used to call functions on another A0 instance. You can develop and debug A0 natively on your local system while redirecting some functions to A0 instance in docker. This is crucial for development as A0 needs to run in standardized environment to support all features.",
|
| 869 |
+
"fields": dev_fields,
|
| 870 |
+
"tab": "developer",
|
| 871 |
+
}
|
| 872 |
+
|
| 873 |
+
# code_exec_fields: list[SettingsField] = []
|
| 874 |
+
|
| 875 |
+
# code_exec_fields.append(
|
| 876 |
+
# {
|
| 877 |
+
# "id": "code_exec_ssh_enabled",
|
| 878 |
+
# "title": "Use SSH for code execution",
|
| 879 |
+
# "description": "Code execution will use SSH to connect to the terminal. When disabled, a local python terminal interface is used instead. SSH should only be used in development environment or when encountering issues with the local python terminal interface.",
|
| 880 |
+
# "type": "switch",
|
| 881 |
+
# "value": settings["code_exec_ssh_enabled"],
|
| 882 |
+
# }
|
| 883 |
+
# )
|
| 884 |
+
|
| 885 |
+
# code_exec_fields.append(
|
| 886 |
+
# {
|
| 887 |
+
# "id": "code_exec_ssh_addr",
|
| 888 |
+
# "title": "Code execution SSH address",
|
| 889 |
+
# "description": "Address of the SSH server for code execution. Only applies when SSH is enabled.",
|
| 890 |
+
# "type": "text",
|
| 891 |
+
# "value": settings["code_exec_ssh_addr"],
|
| 892 |
+
# }
|
| 893 |
+
# )
|
| 894 |
+
|
| 895 |
+
# code_exec_fields.append(
|
| 896 |
+
# {
|
| 897 |
+
# "id": "code_exec_ssh_port",
|
| 898 |
+
# "title": "Code execution SSH port",
|
| 899 |
+
# "description": "Port of the SSH server for code execution. Only applies when SSH is enabled.",
|
| 900 |
+
# "type": "text",
|
| 901 |
+
# "value": settings["code_exec_ssh_port"],
|
| 902 |
+
# }
|
| 903 |
+
# )
|
| 904 |
+
|
| 905 |
+
# code_exec_section: SettingsSection = {
|
| 906 |
+
# "id": "code_exec",
|
| 907 |
+
# "title": "Code execution",
|
| 908 |
+
# "description": "Configuration of code execution by the agent.",
|
| 909 |
+
# "fields": code_exec_fields,
|
| 910 |
+
# "tab": "developer",
|
| 911 |
+
# }
|
| 912 |
+
|
| 913 |
+
# Speech to text section
|
| 914 |
+
stt_fields: list[SettingsField] = []
|
| 915 |
+
|
| 916 |
+
stt_fields.append(
|
| 917 |
+
{
|
| 918 |
+
"id": "stt_microphone_section",
|
| 919 |
+
"title": "Microphone device",
|
| 920 |
+
"description": "Select the microphone device to use for speech-to-text.",
|
| 921 |
+
"value": "<x-component path='/settings/speech/microphone.html' />",
|
| 922 |
+
"type": "html",
|
| 923 |
+
}
|
| 924 |
+
)
|
| 925 |
+
|
| 926 |
+
stt_fields.append(
|
| 927 |
+
{
|
| 928 |
+
"id": "stt_model_size",
|
| 929 |
+
"title": "Speech-to-text model size",
|
| 930 |
+
"description": "Select the speech-to-text model size",
|
| 931 |
+
"type": "select",
|
| 932 |
+
"value": settings["stt_model_size"],
|
| 933 |
+
"options": [
|
| 934 |
+
{"value": "tiny", "label": "Tiny (39M, English)"},
|
| 935 |
+
{"value": "base", "label": "Base (74M, English)"},
|
| 936 |
+
{"value": "small", "label": "Small (244M, English)"},
|
| 937 |
+
{"value": "medium", "label": "Medium (769M, English)"},
|
| 938 |
+
{"value": "large", "label": "Large (1.5B, Multilingual)"},
|
| 939 |
+
{"value": "turbo", "label": "Turbo (Multilingual)"},
|
| 940 |
+
],
|
| 941 |
+
}
|
| 942 |
+
)
|
| 943 |
+
|
| 944 |
+
stt_fields.append(
|
| 945 |
+
{
|
| 946 |
+
"id": "stt_language",
|
| 947 |
+
"title": "Speech-to-text language code",
|
| 948 |
+
"description": "Language code (e.g. en, fr, it)",
|
| 949 |
+
"type": "text",
|
| 950 |
+
"value": settings["stt_language"],
|
| 951 |
+
}
|
| 952 |
+
)
|
| 953 |
+
|
| 954 |
+
stt_fields.append(
|
| 955 |
+
{
|
| 956 |
+
"id": "stt_silence_threshold",
|
| 957 |
+
"title": "Microphone silence threshold",
|
| 958 |
+
"description": "Silence detection threshold. Lower values are more sensitive to noise.",
|
| 959 |
+
"type": "range",
|
| 960 |
+
"min": 0,
|
| 961 |
+
"max": 1,
|
| 962 |
+
"step": 0.01,
|
| 963 |
+
"value": settings["stt_silence_threshold"],
|
| 964 |
+
}
|
| 965 |
+
)
|
| 966 |
+
|
| 967 |
+
stt_fields.append(
|
| 968 |
+
{
|
| 969 |
+
"id": "stt_silence_duration",
|
| 970 |
+
"title": "Microphone silence duration (ms)",
|
| 971 |
+
"description": "Duration of silence before the system considers speaking to have ended.",
|
| 972 |
+
"type": "text",
|
| 973 |
+
"value": settings["stt_silence_duration"],
|
| 974 |
+
}
|
| 975 |
+
)
|
| 976 |
+
|
| 977 |
+
stt_fields.append(
|
| 978 |
+
{
|
| 979 |
+
"id": "stt_waiting_timeout",
|
| 980 |
+
"title": "Microphone waiting timeout (ms)",
|
| 981 |
+
"description": "Duration of silence before the system closes the microphone.",
|
| 982 |
+
"type": "text",
|
| 983 |
+
"value": settings["stt_waiting_timeout"],
|
| 984 |
+
}
|
| 985 |
+
)
|
| 986 |
+
|
| 987 |
+
# TTS fields
|
| 988 |
+
tts_fields: list[SettingsField] = []
|
| 989 |
+
|
| 990 |
+
tts_fields.append(
|
| 991 |
+
{
|
| 992 |
+
"id": "tts_kokoro",
|
| 993 |
+
"title": "Enable Kokoro TTS",
|
| 994 |
+
"description": "Enable higher quality server-side AI (Kokoro) instead of browser-based text-to-speech.",
|
| 995 |
+
"type": "switch",
|
| 996 |
+
"value": settings["tts_kokoro"],
|
| 997 |
+
}
|
| 998 |
+
)
|
| 999 |
+
|
| 1000 |
+
speech_section: SettingsSection = {
|
| 1001 |
+
"id": "speech",
|
| 1002 |
+
"title": "Speech",
|
| 1003 |
+
"description": "Voice transcription and speech synthesis settings.",
|
| 1004 |
+
"fields": stt_fields + tts_fields,
|
| 1005 |
+
"tab": "agent",
|
| 1006 |
+
}
|
| 1007 |
+
|
| 1008 |
+
# MCP section
|
| 1009 |
+
mcp_client_fields: list[SettingsField] = []
|
| 1010 |
+
|
| 1011 |
+
mcp_client_fields.append(
|
| 1012 |
+
{
|
| 1013 |
+
"id": "mcp_servers_config",
|
| 1014 |
+
"title": "MCP Servers Configuration",
|
| 1015 |
+
"description": "External MCP servers can be configured here.",
|
| 1016 |
+
"type": "button",
|
| 1017 |
+
"value": "Open",
|
| 1018 |
+
}
|
| 1019 |
+
)
|
| 1020 |
+
|
| 1021 |
+
mcp_client_fields.append(
|
| 1022 |
+
{
|
| 1023 |
+
"id": "mcp_servers",
|
| 1024 |
+
"title": "MCP Servers",
|
| 1025 |
+
"description": "(JSON list of) >> RemoteServer <<: [name, url, headers, timeout (opt), sse_read_timeout (opt), disabled (opt)] / >> Local Server <<: [name, command, args, env, encoding (opt), encoding_error_handler (opt), disabled (opt)]",
|
| 1026 |
+
"type": "textarea",
|
| 1027 |
+
"value": settings["mcp_servers"],
|
| 1028 |
+
"hidden": True,
|
| 1029 |
+
}
|
| 1030 |
+
)
|
| 1031 |
+
|
| 1032 |
+
mcp_client_fields.append(
|
| 1033 |
+
{
|
| 1034 |
+
"id": "mcp_client_init_timeout",
|
| 1035 |
+
"title": "MCP Client Init Timeout",
|
| 1036 |
+
"description": "Timeout for MCP client initialization (in seconds). Higher values might be required for complex MCPs, but might also slowdown system startup.",
|
| 1037 |
+
"type": "number",
|
| 1038 |
+
"value": settings["mcp_client_init_timeout"],
|
| 1039 |
+
}
|
| 1040 |
+
)
|
| 1041 |
+
|
| 1042 |
+
mcp_client_fields.append(
|
| 1043 |
+
{
|
| 1044 |
+
"id": "mcp_client_tool_timeout",
|
| 1045 |
+
"title": "MCP Client Tool Timeout",
|
| 1046 |
+
"description": "Timeout for MCP client tool execution. Higher values might be required for complex tools, but might also result in long responses with failing tools.",
|
| 1047 |
+
"type": "number",
|
| 1048 |
+
"value": settings["mcp_client_tool_timeout"],
|
| 1049 |
+
}
|
| 1050 |
+
)
|
| 1051 |
+
|
| 1052 |
+
mcp_client_section: SettingsSection = {
|
| 1053 |
+
"id": "mcp_client",
|
| 1054 |
+
"title": "External MCP Servers",
|
| 1055 |
+
"description": "Agent Zero can use external MCP servers, local or remote as tools.",
|
| 1056 |
+
"fields": mcp_client_fields,
|
| 1057 |
+
"tab": "mcp",
|
| 1058 |
+
}
|
| 1059 |
+
|
| 1060 |
+
mcp_server_fields: list[SettingsField] = []
|
| 1061 |
+
|
| 1062 |
+
mcp_server_fields.append(
|
| 1063 |
+
{
|
| 1064 |
+
"id": "mcp_server_enabled",
|
| 1065 |
+
"title": "Enable A0 MCP Server",
|
| 1066 |
+
"description": "Expose Agent Zero as an SSE/HTTP MCP server. This will make this A0 instance available to MCP clients.",
|
| 1067 |
+
"type": "switch",
|
| 1068 |
+
"value": settings["mcp_server_enabled"],
|
| 1069 |
+
}
|
| 1070 |
+
)
|
| 1071 |
+
|
| 1072 |
+
mcp_server_fields.append(
|
| 1073 |
+
{
|
| 1074 |
+
"id": "mcp_server_token",
|
| 1075 |
+
"title": "MCP Server Token",
|
| 1076 |
+
"description": "Token for MCP server authentication.",
|
| 1077 |
+
"type": "text",
|
| 1078 |
+
"hidden": True,
|
| 1079 |
+
"value": settings["mcp_server_token"],
|
| 1080 |
+
}
|
| 1081 |
+
)
|
| 1082 |
+
|
| 1083 |
+
mcp_server_section: SettingsSection = {
|
| 1084 |
+
"id": "mcp_server",
|
| 1085 |
+
"title": "A0 MCP Server",
|
| 1086 |
+
"description": "Agent Zero can be exposed as an SSE MCP server. See <a href=\"javascript:openModal('settings/mcp/server/example.html')\">connection example</a>.",
|
| 1087 |
+
"fields": mcp_server_fields,
|
| 1088 |
+
"tab": "mcp",
|
| 1089 |
+
}
|
| 1090 |
+
|
| 1091 |
+
# -------- A2A Section --------
|
| 1092 |
+
a2a_fields: list[SettingsField] = []
|
| 1093 |
+
|
| 1094 |
+
a2a_fields.append(
|
| 1095 |
+
{
|
| 1096 |
+
"id": "a2a_server_enabled",
|
| 1097 |
+
"title": "Enable A2A server",
|
| 1098 |
+
"description": "Expose Agent Zero as A2A server. This allows other agents to connect to A0 via A2A protocol.",
|
| 1099 |
+
"type": "switch",
|
| 1100 |
+
"value": settings["a2a_server_enabled"],
|
| 1101 |
+
}
|
| 1102 |
+
)
|
| 1103 |
+
|
| 1104 |
+
a2a_section: SettingsSection = {
|
| 1105 |
+
"id": "a2a_server",
|
| 1106 |
+
"title": "A0 A2A Server",
|
| 1107 |
+
"description": "Agent Zero can be exposed as an A2A server. See <a href=\"javascript:openModal('settings/a2a/a2a-connection.html')\">connection example</a>.",
|
| 1108 |
+
"fields": a2a_fields,
|
| 1109 |
+
"tab": "mcp",
|
| 1110 |
+
}
|
| 1111 |
+
|
| 1112 |
+
|
| 1113 |
+
# External API section
|
| 1114 |
+
external_api_fields: list[SettingsField] = []
|
| 1115 |
+
|
| 1116 |
+
external_api_fields.append(
|
| 1117 |
+
{
|
| 1118 |
+
"id": "external_api_examples",
|
| 1119 |
+
"title": "API Examples",
|
| 1120 |
+
"description": "View examples for using Agent Zero's external API endpoints with API key authentication.",
|
| 1121 |
+
"type": "button",
|
| 1122 |
+
"value": "Show API Examples",
|
| 1123 |
+
}
|
| 1124 |
+
)
|
| 1125 |
+
|
| 1126 |
+
external_api_section: SettingsSection = {
|
| 1127 |
+
"id": "external_api",
|
| 1128 |
+
"title": "External API",
|
| 1129 |
+
"description": "Agent Zero provides external API endpoints for integration with other applications. "
|
| 1130 |
+
"These endpoints use API key authentication and support text messages and file attachments.",
|
| 1131 |
+
"fields": external_api_fields,
|
| 1132 |
+
"tab": "external",
|
| 1133 |
+
}
|
| 1134 |
+
|
| 1135 |
+
# Backup & Restore section
|
| 1136 |
+
backup_fields: list[SettingsField] = []
|
| 1137 |
+
|
| 1138 |
+
backup_fields.append(
|
| 1139 |
+
{
|
| 1140 |
+
"id": "backup_create",
|
| 1141 |
+
"title": "Create Backup",
|
| 1142 |
+
"description": "Create a backup archive of selected files and configurations "
|
| 1143 |
+
"using customizable patterns.",
|
| 1144 |
+
"type": "button",
|
| 1145 |
+
"value": "Create Backup",
|
| 1146 |
+
}
|
| 1147 |
+
)
|
| 1148 |
+
|
| 1149 |
+
backup_fields.append(
|
| 1150 |
+
{
|
| 1151 |
+
"id": "backup_restore",
|
| 1152 |
+
"title": "Restore from Backup",
|
| 1153 |
+
"description": "Restore files and configurations from a backup archive "
|
| 1154 |
+
"with pattern-based selection.",
|
| 1155 |
+
"type": "button",
|
| 1156 |
+
"value": "Restore Backup",
|
| 1157 |
+
}
|
| 1158 |
+
)
|
| 1159 |
+
|
| 1160 |
+
backup_section: SettingsSection = {
|
| 1161 |
+
"id": "backup_restore",
|
| 1162 |
+
"title": "Backup & Restore",
|
| 1163 |
+
"description": "Backup and restore Agent Zero data and configurations "
|
| 1164 |
+
"using glob pattern-based file selection.",
|
| 1165 |
+
"fields": backup_fields,
|
| 1166 |
+
"tab": "backup",
|
| 1167 |
+
}
|
| 1168 |
+
|
| 1169 |
+
# Add the section to the result
|
| 1170 |
+
result: SettingsOutput = {
|
| 1171 |
+
"sections": [
|
| 1172 |
+
agent_section,
|
| 1173 |
+
chat_model_section,
|
| 1174 |
+
util_model_section,
|
| 1175 |
+
browser_model_section,
|
| 1176 |
+
embed_model_section,
|
| 1177 |
+
memory_section,
|
| 1178 |
+
speech_section,
|
| 1179 |
+
api_keys_section,
|
| 1180 |
+
auth_section,
|
| 1181 |
+
mcp_client_section,
|
| 1182 |
+
mcp_server_section,
|
| 1183 |
+
a2a_section,
|
| 1184 |
+
external_api_section,
|
| 1185 |
+
backup_section,
|
| 1186 |
+
dev_section,
|
| 1187 |
+
# code_exec_section,
|
| 1188 |
+
]
|
| 1189 |
+
}
|
| 1190 |
+
return result
|
| 1191 |
+
|
| 1192 |
+
|
| 1193 |
+
def _get_api_key_field(settings: Settings, provider: str, title: str) -> SettingsField:
|
| 1194 |
+
key = settings["api_keys"].get(provider, models.get_api_key(provider))
|
| 1195 |
+
# For API keys, use simple asterisk placeholder for existing keys
|
| 1196 |
+
return {
|
| 1197 |
+
"id": f"api_key_{provider}",
|
| 1198 |
+
"title": title,
|
| 1199 |
+
"type": "text",
|
| 1200 |
+
"value": (API_KEY_PLACEHOLDER if key and key != "None" else ""),
|
| 1201 |
+
}
|
| 1202 |
+
|
| 1203 |
+
|
| 1204 |
+
def convert_in(settings: dict) -> Settings:
|
| 1205 |
+
current = get_settings()
|
| 1206 |
+
for section in settings["sections"]:
|
| 1207 |
+
if "fields" in section:
|
| 1208 |
+
for field in section["fields"]:
|
| 1209 |
+
# Skip saving if value is a placeholder
|
| 1210 |
+
should_skip = (
|
| 1211 |
+
field["value"] == PASSWORD_PLACEHOLDER or
|
| 1212 |
+
field["value"] == API_KEY_PLACEHOLDER
|
| 1213 |
+
)
|
| 1214 |
+
|
| 1215 |
+
if not should_skip:
|
| 1216 |
+
if field["id"].endswith("_kwargs"):
|
| 1217 |
+
current[field["id"]] = _env_to_dict(field["value"])
|
| 1218 |
+
elif field["id"].startswith("api_key_"):
|
| 1219 |
+
current["api_keys"][field["id"]] = field["value"]
|
| 1220 |
+
else:
|
| 1221 |
+
current[field["id"]] = field["value"]
|
| 1222 |
+
return current
|
| 1223 |
+
|
| 1224 |
+
|
| 1225 |
+
def get_settings() -> Settings:
|
| 1226 |
+
global _settings
|
| 1227 |
+
if not _settings:
|
| 1228 |
+
_settings = _read_settings_file()
|
| 1229 |
+
if not _settings:
|
| 1230 |
+
_settings = get_default_settings()
|
| 1231 |
+
norm = normalize_settings(_settings)
|
| 1232 |
+
return norm
|
| 1233 |
+
|
| 1234 |
+
|
| 1235 |
+
def set_settings(settings: Settings, apply: bool = True):
|
| 1236 |
+
global _settings
|
| 1237 |
+
previous = _settings
|
| 1238 |
+
_settings = normalize_settings(settings)
|
| 1239 |
+
_write_settings_file(_settings)
|
| 1240 |
+
if apply:
|
| 1241 |
+
_apply_settings(previous)
|
| 1242 |
+
|
| 1243 |
+
|
| 1244 |
+
def set_settings_delta(delta: dict, apply: bool = True):
|
| 1245 |
+
current = get_settings()
|
| 1246 |
+
new = {**current, **delta}
|
| 1247 |
+
set_settings(new, apply) # type: ignore
|
| 1248 |
+
|
| 1249 |
+
|
| 1250 |
+
def normalize_settings(settings: Settings) -> Settings:
|
| 1251 |
+
copy = settings.copy()
|
| 1252 |
+
default = get_default_settings()
|
| 1253 |
+
|
| 1254 |
+
# Automatically use BLABLADOR_API_KEY for 'other' provider if available
|
| 1255 |
+
blablador_key = os.getenv("BLABLADOR_API_KEY")
|
| 1256 |
+
if blablador_key:
|
| 1257 |
+
os.environ.setdefault("OTHER_API_KEY", blablador_key)
|
| 1258 |
+
os.environ.setdefault("API_KEY_OTHER", blablador_key)
|
| 1259 |
+
os.environ.setdefault("OPENAI_API_KEY", blablador_key)
|
| 1260 |
+
os.environ.setdefault("API_KEY_OPENAI", blablador_key)
|
| 1261 |
+
|
| 1262 |
+
# Robustly handle provider name if it's the label instead of ID
|
| 1263 |
+
label_to_id = {
|
| 1264 |
+
"Other OpenAI compatible": "other",
|
| 1265 |
+
"OpenAI": "openai",
|
| 1266 |
+
"Anthropic": "anthropic",
|
| 1267 |
+
"Google": "google",
|
| 1268 |
+
"DeepSeek": "deepseek",
|
| 1269 |
+
"Groq": "groq",
|
| 1270 |
+
"HuggingFace": "huggingface",
|
| 1271 |
+
"LM Studio": "lm_studio",
|
| 1272 |
+
"Mistral AI": "mistral",
|
| 1273 |
+
"Ollama": "ollama",
|
| 1274 |
+
"OpenRouter": "openrouter",
|
| 1275 |
+
"Sambanova": "sambanova",
|
| 1276 |
+
"Venice": "venice"
|
| 1277 |
+
}
|
| 1278 |
+
|
| 1279 |
+
for key in ["chat_model_provider", "util_model_provider", "embed_model_provider", "browser_model_provider"]:
|
| 1280 |
+
if key in copy and copy[key] in label_to_id:
|
| 1281 |
+
print(f"DEBUG: Normalizing {key} from '{copy[key]}' to '{label_to_id[copy[key]]}'")
|
| 1282 |
+
copy[key] = label_to_id[copy[key]]
|
| 1283 |
+
|
| 1284 |
+
# adjust settings values to match current version if needed
|
| 1285 |
+
if "version" not in copy or copy["version"] != default["version"]:
|
| 1286 |
+
_adjust_to_version(copy, default)
|
| 1287 |
+
copy["version"] = default["version"] # sync version
|
| 1288 |
+
|
| 1289 |
+
# remove keys that are not in default
|
| 1290 |
+
keys_to_remove = [key for key in copy if key not in default]
|
| 1291 |
+
for key in keys_to_remove:
|
| 1292 |
+
del copy[key]
|
| 1293 |
+
|
| 1294 |
+
# add missing keys and normalize types
|
| 1295 |
+
for key, value in default.items():
|
| 1296 |
+
if key not in copy:
|
| 1297 |
+
copy[key] = value
|
| 1298 |
+
else:
|
| 1299 |
+
try:
|
| 1300 |
+
copy[key] = type(value)(copy[key]) # type: ignore
|
| 1301 |
+
if isinstance(copy[key], str):
|
| 1302 |
+
copy[key] = copy[key].strip() # strip strings
|
| 1303 |
+
except (ValueError, TypeError):
|
| 1304 |
+
copy[key] = value # make default instead
|
| 1305 |
+
|
| 1306 |
+
# mcp server token is set automatically
|
| 1307 |
+
copy["mcp_server_token"] = create_auth_token()
|
| 1308 |
+
|
| 1309 |
+
return copy
|
| 1310 |
+
|
| 1311 |
+
|
| 1312 |
+
def _adjust_to_version(settings: Settings, default: Settings):
|
| 1313 |
+
# starting with 0.9, the default prompt subfolder for agent no. 0 is agent0
|
| 1314 |
+
# switch to agent0 if the old default is used from v0.8
|
| 1315 |
+
if "version" not in settings or settings["version"].startswith("v0.8"):
|
| 1316 |
+
if "agent_profile" not in settings or settings["agent_profile"] == "default":
|
| 1317 |
+
settings["agent_profile"] = "agent0"
|
| 1318 |
+
|
| 1319 |
+
|
| 1320 |
+
def _read_settings_file() -> Settings | None:
|
| 1321 |
+
if os.path.exists(SETTINGS_FILE):
|
| 1322 |
+
content = files.read_file(SETTINGS_FILE)
|
| 1323 |
+
parsed = json.loads(content)
|
| 1324 |
+
return normalize_settings(parsed)
|
| 1325 |
+
|
| 1326 |
+
|
| 1327 |
+
def _write_settings_file(settings: Settings):
|
| 1328 |
+
settings = settings.copy()
|
| 1329 |
+
_write_sensitive_settings(settings)
|
| 1330 |
+
_remove_sensitive_settings(settings)
|
| 1331 |
+
|
| 1332 |
+
# write settings
|
| 1333 |
+
content = json.dumps(settings, indent=4)
|
| 1334 |
+
files.write_file(SETTINGS_FILE, content)
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
def _remove_sensitive_settings(settings: Settings):
|
| 1338 |
+
settings["api_keys"] = {}
|
| 1339 |
+
settings["auth_login"] = ""
|
| 1340 |
+
settings["auth_password"] = ""
|
| 1341 |
+
settings["rfc_password"] = ""
|
| 1342 |
+
settings["root_password"] = ""
|
| 1343 |
+
settings["mcp_server_token"] = ""
|
| 1344 |
+
|
| 1345 |
+
|
| 1346 |
+
def _write_sensitive_settings(settings: Settings):
|
| 1347 |
+
for key, val in settings["api_keys"].items():
|
| 1348 |
+
dotenv.save_dotenv_value(key.upper(), val)
|
| 1349 |
+
|
| 1350 |
+
dotenv.save_dotenv_value(dotenv.KEY_AUTH_LOGIN, settings["auth_login"])
|
| 1351 |
+
if settings["auth_password"]:
|
| 1352 |
+
dotenv.save_dotenv_value(dotenv.KEY_AUTH_PASSWORD, settings["auth_password"])
|
| 1353 |
+
if settings["rfc_password"]:
|
| 1354 |
+
dotenv.save_dotenv_value(dotenv.KEY_RFC_PASSWORD, settings["rfc_password"])
|
| 1355 |
+
|
| 1356 |
+
if settings["root_password"]:
|
| 1357 |
+
dotenv.save_dotenv_value(dotenv.KEY_ROOT_PASSWORD, settings["root_password"])
|
| 1358 |
+
if settings["root_password"]:
|
| 1359 |
+
set_root_password(settings["root_password"])
|
| 1360 |
+
|
| 1361 |
+
|
| 1362 |
+
def get_default_settings() -> Settings:
|
| 1363 |
+
return Settings(
|
| 1364 |
+
version=_get_version(),
|
| 1365 |
+
chat_model_provider="openrouter",
|
| 1366 |
+
chat_model_name="openai/gpt-4.1",
|
| 1367 |
+
chat_model_api_base="",
|
| 1368 |
+
chat_model_kwargs={"temperature": "0"},
|
| 1369 |
+
chat_model_ctx_length=100000,
|
| 1370 |
+
chat_model_ctx_history=0.7,
|
| 1371 |
+
chat_model_vision=True,
|
| 1372 |
+
chat_model_rl_requests=0,
|
| 1373 |
+
chat_model_rl_input=0,
|
| 1374 |
+
chat_model_rl_output=0,
|
| 1375 |
+
util_model_provider="openrouter",
|
| 1376 |
+
util_model_name="openai/gpt-4.1-mini",
|
| 1377 |
+
util_model_api_base="",
|
| 1378 |
+
util_model_ctx_length=100000,
|
| 1379 |
+
util_model_ctx_input=0.7,
|
| 1380 |
+
util_model_kwargs={"temperature": "0"},
|
| 1381 |
+
util_model_rl_requests=0,
|
| 1382 |
+
util_model_rl_input=0,
|
| 1383 |
+
util_model_rl_output=0,
|
| 1384 |
+
embed_model_provider="huggingface",
|
| 1385 |
+
embed_model_name="sentence-transformers/all-MiniLM-L6-v2",
|
| 1386 |
+
embed_model_api_base="",
|
| 1387 |
+
embed_model_kwargs={},
|
| 1388 |
+
embed_model_rl_requests=0,
|
| 1389 |
+
embed_model_rl_input=0,
|
| 1390 |
+
browser_model_provider="openrouter",
|
| 1391 |
+
browser_model_name="openai/gpt-4.1",
|
| 1392 |
+
browser_model_api_base="",
|
| 1393 |
+
browser_model_vision=True,
|
| 1394 |
+
browser_model_rl_requests=0,
|
| 1395 |
+
browser_model_rl_input=0,
|
| 1396 |
+
browser_model_rl_output=0,
|
| 1397 |
+
browser_model_kwargs={"temperature": "0"},
|
| 1398 |
+
memory_recall_enabled=True,
|
| 1399 |
+
memory_recall_delayed=False,
|
| 1400 |
+
memory_recall_interval=3,
|
| 1401 |
+
memory_recall_history_len=10000,
|
| 1402 |
+
memory_recall_memories_max_search=12,
|
| 1403 |
+
memory_recall_solutions_max_search=8,
|
| 1404 |
+
memory_recall_memories_max_result=5,
|
| 1405 |
+
memory_recall_solutions_max_result=3,
|
| 1406 |
+
memory_recall_similarity_threshold=0.7,
|
| 1407 |
+
memory_recall_query_prep=True,
|
| 1408 |
+
memory_recall_post_filter=True,
|
| 1409 |
+
memory_memorize_enabled=True,
|
| 1410 |
+
memory_memorize_consolidation=True,
|
| 1411 |
+
memory_memorize_replace_threshold=0.9,
|
| 1412 |
+
api_keys={},
|
| 1413 |
+
auth_login="",
|
| 1414 |
+
auth_password="",
|
| 1415 |
+
root_password="",
|
| 1416 |
+
agent_profile="agent0",
|
| 1417 |
+
agent_memory_subdir="default",
|
| 1418 |
+
agent_knowledge_subdir="custom",
|
| 1419 |
+
rfc_auto_docker=True,
|
| 1420 |
+
rfc_url="localhost",
|
| 1421 |
+
rfc_password="",
|
| 1422 |
+
rfc_port_http=55080,
|
| 1423 |
+
rfc_port_ssh=55022,
|
| 1424 |
+
shell_interface="local" if runtime.is_dockerized() else "ssh",
|
| 1425 |
+
stt_model_size="base",
|
| 1426 |
+
stt_language="en",
|
| 1427 |
+
stt_silence_threshold=0.3,
|
| 1428 |
+
stt_silence_duration=1000,
|
| 1429 |
+
stt_waiting_timeout=2000,
|
| 1430 |
+
tts_kokoro=True,
|
| 1431 |
+
mcp_servers='{\n "mcpServers": {}\n}',
|
| 1432 |
+
mcp_client_init_timeout=10,
|
| 1433 |
+
mcp_client_tool_timeout=120,
|
| 1434 |
+
mcp_server_enabled=False,
|
| 1435 |
+
mcp_server_token=create_auth_token(),
|
| 1436 |
+
a2a_server_enabled=False,
|
| 1437 |
+
)
|
| 1438 |
+
|
| 1439 |
+
|
| 1440 |
+
def _apply_settings(previous: Settings | None):
|
| 1441 |
+
global _settings
|
| 1442 |
+
if _settings:
|
| 1443 |
+
from agent import AgentContext
|
| 1444 |
+
from initialize import initialize_agent
|
| 1445 |
+
|
| 1446 |
+
config = initialize_agent()
|
| 1447 |
+
for ctx in AgentContext._contexts.values():
|
| 1448 |
+
ctx.config = config # reinitialize context config with new settings
|
| 1449 |
+
# apply config to agents
|
| 1450 |
+
agent = ctx.agent0
|
| 1451 |
+
while agent:
|
| 1452 |
+
agent.config = ctx.config
|
| 1453 |
+
agent = agent.get_data(agent.DATA_NAME_SUBORDINATE)
|
| 1454 |
+
|
| 1455 |
+
# reload whisper model if necessary
|
| 1456 |
+
if not previous or _settings["stt_model_size"] != previous["stt_model_size"]:
|
| 1457 |
+
task = defer.DeferredTask().start_task(
|
| 1458 |
+
whisper.preload, _settings["stt_model_size"]
|
| 1459 |
+
) # TODO overkill, replace with background task
|
| 1460 |
+
|
| 1461 |
+
# force memory reload on embedding model change
|
| 1462 |
+
if not previous or (
|
| 1463 |
+
_settings["embed_model_name"] != previous["embed_model_name"]
|
| 1464 |
+
or _settings["embed_model_provider"] != previous["embed_model_provider"]
|
| 1465 |
+
or _settings["embed_model_kwargs"] != previous["embed_model_kwargs"]
|
| 1466 |
+
):
|
| 1467 |
+
from python.helpers.memory import reload as memory_reload
|
| 1468 |
+
|
| 1469 |
+
memory_reload()
|
| 1470 |
+
|
| 1471 |
+
# update mcp settings if necessary
|
| 1472 |
+
if not previous or _settings["mcp_servers"] != previous["mcp_servers"]:
|
| 1473 |
+
from python.helpers.mcp_handler import MCPConfig
|
| 1474 |
+
|
| 1475 |
+
async def update_mcp_settings(mcp_servers: str):
|
| 1476 |
+
PrintStyle(
|
| 1477 |
+
background_color="black", font_color="white", padding=True
|
| 1478 |
+
).print("Updating MCP config...")
|
| 1479 |
+
AgentContext.log_to_all(
|
| 1480 |
+
type="info", content="Updating MCP settings...", temp=True
|
| 1481 |
+
)
|
| 1482 |
+
|
| 1483 |
+
mcp_config = MCPConfig.get_instance()
|
| 1484 |
+
try:
|
| 1485 |
+
MCPConfig.update(mcp_servers)
|
| 1486 |
+
except Exception as e:
|
| 1487 |
+
AgentContext.log_to_all(
|
| 1488 |
+
type="error",
|
| 1489 |
+
content=f"Failed to update MCP settings: {e}",
|
| 1490 |
+
temp=False,
|
| 1491 |
+
)
|
| 1492 |
+
(
|
| 1493 |
+
PrintStyle(
|
| 1494 |
+
background_color="red", font_color="black", padding=True
|
| 1495 |
+
).print("Failed to update MCP settings")
|
| 1496 |
+
)
|
| 1497 |
+
(
|
| 1498 |
+
PrintStyle(
|
| 1499 |
+
background_color="black", font_color="red", padding=True
|
| 1500 |
+
).print(f"{e}")
|
| 1501 |
+
)
|
| 1502 |
+
|
| 1503 |
+
PrintStyle(
|
| 1504 |
+
background_color="#6734C3", font_color="white", padding=True
|
| 1505 |
+
).print("Parsed MCP config:")
|
| 1506 |
+
(
|
| 1507 |
+
PrintStyle(
|
| 1508 |
+
background_color="#334455", font_color="white", padding=False
|
| 1509 |
+
).print(mcp_config.model_dump_json())
|
| 1510 |
+
)
|
| 1511 |
+
AgentContext.log_to_all(
|
| 1512 |
+
type="info", content="Finished updating MCP settings.", temp=True
|
| 1513 |
+
)
|
| 1514 |
+
|
| 1515 |
+
task2 = defer.DeferredTask().start_task(
|
| 1516 |
+
update_mcp_settings, config.mcp_servers
|
| 1517 |
+
) # TODO overkill, replace with background task
|
| 1518 |
+
|
| 1519 |
+
# update token in mcp server
|
| 1520 |
+
current_token = (
|
| 1521 |
+
create_auth_token()
|
| 1522 |
+
) # TODO - ugly, token in settings is generated from dotenv and does not always correspond
|
| 1523 |
+
if not previous or current_token != previous["mcp_server_token"]:
|
| 1524 |
+
|
| 1525 |
+
async def update_mcp_token(token: str):
|
| 1526 |
+
from python.helpers.mcp_server import DynamicMcpProxy
|
| 1527 |
+
|
| 1528 |
+
DynamicMcpProxy.get_instance().reconfigure(token=token)
|
| 1529 |
+
|
| 1530 |
+
task3 = defer.DeferredTask().start_task(
|
| 1531 |
+
update_mcp_token, current_token
|
| 1532 |
+
) # TODO overkill, replace with background task
|
| 1533 |
+
|
| 1534 |
+
# update token in a2a server
|
| 1535 |
+
if not previous or current_token != previous["mcp_server_token"]:
|
| 1536 |
+
|
| 1537 |
+
async def update_a2a_token(token: str):
|
| 1538 |
+
from python.helpers.fasta2a_server import DynamicA2AProxy
|
| 1539 |
+
|
| 1540 |
+
DynamicA2AProxy.get_instance().reconfigure(token=token)
|
| 1541 |
+
|
| 1542 |
+
task4 = defer.DeferredTask().start_task(
|
| 1543 |
+
update_a2a_token, current_token
|
| 1544 |
+
) # TODO overkill, replace with background task
|
| 1545 |
+
|
| 1546 |
+
|
| 1547 |
+
def _env_to_dict(data: str):
|
| 1548 |
+
env_dict = {}
|
| 1549 |
+
line_pattern = re.compile(r"\s*([^#][^=]*)\s*=\s*(.*)")
|
| 1550 |
+
for line in data.splitlines():
|
| 1551 |
+
match = line_pattern.match(line)
|
| 1552 |
+
if match:
|
| 1553 |
+
key, value = match.groups()
|
| 1554 |
+
# Remove optional surrounding quotes (single or double)
|
| 1555 |
+
value = value.strip().strip('"').strip("'")
|
| 1556 |
+
env_dict[key.strip()] = value
|
| 1557 |
+
return env_dict
|
| 1558 |
+
|
| 1559 |
+
|
| 1560 |
+
def _dict_to_env(data_dict):
|
| 1561 |
+
lines = []
|
| 1562 |
+
for key, value in data_dict.items():
|
| 1563 |
+
if "\n" in value:
|
| 1564 |
+
value = f"'{value}'"
|
| 1565 |
+
elif " " in value or value == "" or any(c in value for c in "\"'"):
|
| 1566 |
+
value = f'"{value}"'
|
| 1567 |
+
lines.append(f"{key}={value}")
|
| 1568 |
+
return "\n".join(lines)
|
| 1569 |
+
|
| 1570 |
+
|
| 1571 |
+
def set_root_password(password: str):
|
| 1572 |
+
if not runtime.is_dockerized():
|
| 1573 |
+
raise Exception("root password can only be set in dockerized environments")
|
| 1574 |
+
_result = subprocess.run(
|
| 1575 |
+
["chpasswd"],
|
| 1576 |
+
input=f"root:{password}".encode(),
|
| 1577 |
+
capture_output=True,
|
| 1578 |
+
check=True,
|
| 1579 |
+
)
|
| 1580 |
+
dotenv.save_dotenv_value(dotenv.KEY_ROOT_PASSWORD, password)
|
| 1581 |
+
|
| 1582 |
+
|
| 1583 |
+
def get_runtime_config(set: Settings):
|
| 1584 |
+
if runtime.is_dockerized():
|
| 1585 |
+
return {
|
| 1586 |
+
"code_exec_ssh_enabled": set["shell_interface"] == "ssh",
|
| 1587 |
+
"code_exec_ssh_addr": "localhost",
|
| 1588 |
+
"code_exec_ssh_port": 22,
|
| 1589 |
+
"code_exec_ssh_user": "root",
|
| 1590 |
+
}
|
| 1591 |
+
else:
|
| 1592 |
+
host = set["rfc_url"]
|
| 1593 |
+
if "//" in host:
|
| 1594 |
+
host = host.split("//")[1]
|
| 1595 |
+
if ":" in host:
|
| 1596 |
+
host, port = host.split(":")
|
| 1597 |
+
if host.endswith("/"):
|
| 1598 |
+
host = host[:-1]
|
| 1599 |
+
return {
|
| 1600 |
+
"code_exec_ssh_enabled": set["shell_interface"] == "ssh",
|
| 1601 |
+
"code_exec_ssh_addr": host,
|
| 1602 |
+
"code_exec_ssh_port": set["rfc_port_ssh"],
|
| 1603 |
+
"code_exec_ssh_user": "root",
|
| 1604 |
+
}
|
| 1605 |
+
|
| 1606 |
+
|
| 1607 |
+
def create_auth_token() -> str:
|
| 1608 |
+
runtime_id = runtime.get_persistent_id()
|
| 1609 |
+
username = dotenv.get_dotenv_value(dotenv.KEY_AUTH_LOGIN) or ""
|
| 1610 |
+
password = dotenv.get_dotenv_value(dotenv.KEY_AUTH_PASSWORD) or ""
|
| 1611 |
+
# use base64 encoding for a more compact token with alphanumeric chars
|
| 1612 |
+
hash_bytes = hashlib.sha256(f"{runtime_id}:{username}:{password}".encode()).digest()
|
| 1613 |
+
# encode as base64 and remove any non-alphanumeric chars (like +, /, =)
|
| 1614 |
+
b64_token = base64.urlsafe_b64encode(hash_bytes).decode().replace("=", "")
|
| 1615 |
+
return b64_token[:16]
|
| 1616 |
+
|
| 1617 |
+
|
| 1618 |
+
def _get_version():
|
| 1619 |
+
try:
|
| 1620 |
+
git_info = git.get_git_info()
|
| 1621 |
+
return str(git_info.get("short_tag", "")).strip() or "unknown"
|
| 1622 |
+
except Exception:
|
| 1623 |
+
return "unknown"
|
python/tools/search_engine.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
from typing import List, Dict, Any
|
| 3 |
+
from python.helpers.tool import Tool, Response
|
| 4 |
+
|
| 5 |
+
# The base URL of the public SearXNG instance.
|
| 6 |
+
# WARNING: Using a public, third-party instance is not recommended for production
|
| 7 |
+
# due to significant reliability, security, and privacy risks.
|
| 8 |
+
SEARXNG_BASE_URL = "https://CJJ-on-HF-SearXNG.hf.space"
|
| 9 |
+
|
| 10 |
+
class SearchEngine(Tool):
|
| 11 |
+
"""
|
| 12 |
+
A tool to perform web searches using a public SearXNG instance.
|
| 13 |
+
"""
|
| 14 |
+
|
| 15 |
+
async def execute(self, query: str, category: str = "general", num_results: int = 5) -> Response:
|
| 16 |
+
"""
|
| 17 |
+
Performs a web search using a public SearXNG instance and returns formatted results.
|
| 18 |
+
This tool allows for targeted searches using categories defined in the SearXNG instance.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
query (str): The search query string.
|
| 22 |
+
category (str): The SearXNG category to search in (e.g., 'science', 'it', 'news').
|
| 23 |
+
Defaults to 'general' for a broad search.
|
| 24 |
+
num_results (int): The maximum number of search results to return. Defaults to 5.
|
| 25 |
+
|
| 26 |
+
Returns:
|
| 27 |
+
Response: A Response object containing the formatted search results or an error message.
|
| 28 |
+
"""
|
| 29 |
+
if not query:
|
| 30 |
+
return Response(message="Error: The search query cannot be empty.", break_loop=False)
|
| 31 |
+
|
| 32 |
+
# Construct the query with a category prefix if specified.
|
| 33 |
+
# This leverages the power of SearXNG's engine configuration.
|
| 34 |
+
search_query = f"!{category} {query}" if category and category != "general" else query
|
| 35 |
+
|
| 36 |
+
params = {
|
| 37 |
+
"q": search_query,
|
| 38 |
+
"format": "json", # Essential for machine-readable output
|
| 39 |
+
"pageno": 1,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
try:
|
| 43 |
+
response = requests.get(
|
| 44 |
+
f"{SEARXNG_BASE_URL}/search",
|
| 45 |
+
params=params,
|
| 46 |
+
timeout=15 # A generous but necessary timeout for a public service
|
| 47 |
+
)
|
| 48 |
+
# Raise an HTTPError for bad responses (4xx or 5xx)
|
| 49 |
+
response.raise_for_status()
|
| 50 |
+
|
| 51 |
+
data = response.json()
|
| 52 |
+
results: List[Dict[str, Any]] = data.get("results", [])
|
| 53 |
+
|
| 54 |
+
if not results:
|
| 55 |
+
return Response(message=f"No search results found for the query: '{query}'", break_loop=False)
|
| 56 |
+
|
| 57 |
+
# Format the results into a clean, readable string for the agent
|
| 58 |
+
formatted_output = []
|
| 59 |
+
for i, res in enumerate(results[:num_results]):
|
| 60 |
+
title = res.get("title", "No Title Provided")
|
| 61 |
+
url = res.get("url", "No URL Provided")
|
| 62 |
+
snippet = res.get("content") or res.get("description", "No Snippet Provided")
|
| 63 |
+
|
| 64 |
+
# Sanitize snippet to remove excessive newlines for cleaner LLM input
|
| 65 |
+
clean_snippet = ' '.join(snippet.split()) if snippet else "No Snippet Provided"
|
| 66 |
+
|
| 67 |
+
formatted_output.append(
|
| 68 |
+
f"Result {i+1}:\n"
|
| 69 |
+
f" Title: {title}\n"
|
| 70 |
+
f" URL: {url}\n"
|
| 71 |
+
f" Snippet: {clean_snippet}"
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
return Response(message="\n---\n".join(formatted_output), break_loop=False)
|
| 75 |
+
|
| 76 |
+
except requests.exceptions.Timeout:
|
| 77 |
+
return Response(message="Error: The search request timed out. The SearXNG instance may be offline or overloaded.", break_loop=False)
|
| 78 |
+
except requests.exceptions.RequestException as e:
|
| 79 |
+
return Response(message=f"Error: A network error occurred while contacting the search service: {e}", break_loop=False)
|
| 80 |
+
except ValueError: # Catches JSON decoding errors
|
| 81 |
+
return Response(message="Error: Failed to parse a valid JSON response from the search service. The service might be down or returning malformed data.", break_loop=False)
|
requirements.txt
ADDED
|
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
setuptools
|
| 2 |
+
a2wsgi==1.10.8
|
| 3 |
+
ansio==0.0.2
|
| 4 |
+
browser-use==0.2.5
|
| 5 |
+
docker==7.1.0
|
| 6 |
+
duckduckgo-search==6.1.12
|
| 7 |
+
faiss-cpu==1.11.0
|
| 8 |
+
fastmcp==2.3.4
|
| 9 |
+
fasta2a==0.5.0
|
| 10 |
+
flask[async]==3.0.3
|
| 11 |
+
flask-basicauth==0.2.0
|
| 12 |
+
flaredantic==0.1.4
|
| 13 |
+
GitPython==3.1.43
|
| 14 |
+
inputimeout==1.0.4
|
| 15 |
+
kokoro>=0.9.2
|
| 16 |
+
simpleeval==1.0.3
|
| 17 |
+
langchain-core==0.3.49
|
| 18 |
+
langchain-community==0.3.19
|
| 19 |
+
langchain-unstructured[all-docs]==0.1.6
|
| 20 |
+
lxml_html_clean==0.3.1
|
| 21 |
+
markdown==3.7
|
| 22 |
+
mcp==1.9.0
|
| 23 |
+
newspaper3k==0.2.8
|
| 24 |
+
paramiko==3.5.0
|
| 25 |
+
playwright==1.52.0
|
| 26 |
+
pypdf==4.3.1
|
| 27 |
+
python-dotenv==1.1.0
|
| 28 |
+
pytz==2024.2
|
| 29 |
+
sentence-transformers==3.0.1
|
| 30 |
+
tiktoken==0.8.0
|
| 31 |
+
unstructured[all-docs]==0.16.23
|
| 32 |
+
unstructured-client==0.31.0
|
| 33 |
+
webcolors==24.6.0
|
| 34 |
+
nest-asyncio==1.6.0
|
| 35 |
+
crontab==1.0.1
|
| 36 |
+
litellm==1.74
|
| 37 |
+
markdownify==1.1.0
|
| 38 |
+
pymupdf==1.25.3
|
| 39 |
+
pytesseract==0.3.13
|
| 40 |
+
pdf2image==1.17.0
|
| 41 |
+
crontab==1.0.1
|
| 42 |
+
pathspec>=0.12.1
|
| 43 |
+
psutil>=7.0.0
|
| 44 |
+
soundfile==0.13.1
|
run_ui.py
ADDED
|
@@ -0,0 +1,322 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import secrets, hmac, hashlib, time, base64
|
| 2 |
+
from datetime import timedelta
|
| 3 |
+
import os
|
| 4 |
+
import socket
|
| 5 |
+
import struct
|
| 6 |
+
from functools import wraps
|
| 7 |
+
import threading
|
| 8 |
+
from flask import Flask, request, Response, session
|
| 9 |
+
from flask_basicauth import BasicAuth
|
| 10 |
+
import initialize
|
| 11 |
+
from python.helpers import files, git, mcp_server, fasta2a_server
|
| 12 |
+
from python.helpers.files import get_abs_path
|
| 13 |
+
from python.helpers import runtime, dotenv, process
|
| 14 |
+
from python.helpers.extract_tools import load_classes_from_folder
|
| 15 |
+
from python.helpers.api import ApiHandler
|
| 16 |
+
from python.helpers.print_style import PrintStyle
|
| 17 |
+
import atexit
|
| 18 |
+
import asyncio
|
| 19 |
+
|
| 20 |
+
CSRF_SECRET = secrets.token_bytes(32) # or os.environ["CSRF_SECRET"].encode()
|
| 21 |
+
TOKEN_TTL = 3600 # 1 hour validity
|
| 22 |
+
|
| 23 |
+
def generate_csrf_token():
|
| 24 |
+
nonce = secrets.token_hex(16) # 128-bit random
|
| 25 |
+
timestamp = str(int(time.time()))
|
| 26 |
+
data = f"{nonce}:{timestamp}"
|
| 27 |
+
sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 28 |
+
return f"{data}.{sig}"
|
| 29 |
+
|
| 30 |
+
def verify_csrf_token(token):
|
| 31 |
+
try:
|
| 32 |
+
data, sig = token.rsplit(".", 1)
|
| 33 |
+
expected_sig = hmac.new(CSRF_SECRET, data.encode(), hashlib.sha256).hexdigest()
|
| 34 |
+
if not hmac.compare_digest(sig, expected_sig):
|
| 35 |
+
return False
|
| 36 |
+
# check TTL
|
| 37 |
+
nonce, timestamp = data.split(":")
|
| 38 |
+
if time.time() - int(timestamp) > TOKEN_TTL:
|
| 39 |
+
return False
|
| 40 |
+
return True
|
| 41 |
+
except Exception:
|
| 42 |
+
return False
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# Set the new timezone to 'UTC'
|
| 46 |
+
os.environ["TZ"] = "UTC"
|
| 47 |
+
os.environ["TOKENIZERS_PARALLELISM"] = "false"
|
| 48 |
+
# Apply the timezone change
|
| 49 |
+
if hasattr(time, 'tzset'):
|
| 50 |
+
time.tzset()
|
| 51 |
+
|
| 52 |
+
# initialize the internal Flask server
|
| 53 |
+
webapp = Flask("app", static_folder=get_abs_path("./webui"), static_url_path="/")
|
| 54 |
+
webapp.secret_key = os.getenv("FLASK_SECRET_KEY") or secrets.token_hex(32)
|
| 55 |
+
webapp.config.update(
|
| 56 |
+
JSON_SORT_KEYS=False,
|
| 57 |
+
SESSION_COOKIE_NAME="session_" + runtime.get_runtime_id(), # bind the session cookie name to runtime id to prevent session collision on same host
|
| 58 |
+
SESSION_COOKIE_SAMESITE="Strict",
|
| 59 |
+
SESSION_PERMANENT=True,
|
| 60 |
+
PERMANENT_SESSION_LIFETIME=timedelta(days=1)
|
| 61 |
+
)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
lock = threading.Lock()
|
| 65 |
+
|
| 66 |
+
# Set up basic authentication for UI and API but not MCP
|
| 67 |
+
basic_auth = BasicAuth(webapp)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def is_loopback_address(address):
|
| 71 |
+
loopback_checker = {
|
| 72 |
+
socket.AF_INET: lambda x: struct.unpack("!I", socket.inet_aton(x))[0]
|
| 73 |
+
>> (32 - 8)
|
| 74 |
+
== 127,
|
| 75 |
+
socket.AF_INET6: lambda x: x == "::1",
|
| 76 |
+
}
|
| 77 |
+
address_type = "hostname"
|
| 78 |
+
try:
|
| 79 |
+
socket.inet_pton(socket.AF_INET6, address)
|
| 80 |
+
address_type = "ipv6"
|
| 81 |
+
except socket.error:
|
| 82 |
+
try:
|
| 83 |
+
socket.inet_pton(socket.AF_INET, address)
|
| 84 |
+
address_type = "ipv4"
|
| 85 |
+
except socket.error:
|
| 86 |
+
address_type = "hostname"
|
| 87 |
+
|
| 88 |
+
if address_type == "ipv4":
|
| 89 |
+
return loopback_checker[socket.AF_INET](address)
|
| 90 |
+
elif address_type == "ipv6":
|
| 91 |
+
return loopback_checker[socket.AF_INET6](address)
|
| 92 |
+
else:
|
| 93 |
+
for family in (socket.AF_INET, socket.AF_INET6):
|
| 94 |
+
try:
|
| 95 |
+
r = socket.getaddrinfo(address, None, family, socket.SOCK_STREAM)
|
| 96 |
+
except socket.gaierror:
|
| 97 |
+
return False
|
| 98 |
+
for family, _, _, _, sockaddr in r:
|
| 99 |
+
if not loopback_checker[family](sockaddr[0]):
|
| 100 |
+
return False
|
| 101 |
+
return True
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
def requires_api_key(f):
|
| 105 |
+
@wraps(f)
|
| 106 |
+
async def decorated(*args, **kwargs):
|
| 107 |
+
# Check for Bearer token first (Hugging Face requirement)
|
| 108 |
+
auth_token = os.getenv("AUTHENTICATION_TOKEN")
|
| 109 |
+
if auth_token:
|
| 110 |
+
print(f"DEBUG: AUTHENTICATION_TOKEN is set. Length: {len(auth_token)}")
|
| 111 |
+
auth_header = request.headers.get("Authorization")
|
| 112 |
+
if auth_header and auth_header.startswith("Bearer "):
|
| 113 |
+
bearer_token = auth_header.split(" ", 1)[1]
|
| 114 |
+
if bearer_token == auth_token:
|
| 115 |
+
return await f(*args, **kwargs)
|
| 116 |
+
# If Bearer token is provided but incorrect, or not provided when AUTHENTICATION_TOKEN is set
|
| 117 |
+
if auth_header:
|
| 118 |
+
return Response("Invalid Bearer token", 401)
|
| 119 |
+
|
| 120 |
+
# Fallback to the default auth token from settings (same as MCP server)
|
| 121 |
+
from python.helpers.settings import get_settings
|
| 122 |
+
valid_api_key = get_settings()["mcp_server_token"]
|
| 123 |
+
|
| 124 |
+
if api_key := request.headers.get("X-API-KEY"):
|
| 125 |
+
if api_key != valid_api_key:
|
| 126 |
+
return Response("Invalid API key", 401)
|
| 127 |
+
elif request.json and request.json.get("api_key"):
|
| 128 |
+
api_key = request.json.get("api_key")
|
| 129 |
+
if api_key != valid_api_key:
|
| 130 |
+
return Response("Invalid API key", 401)
|
| 131 |
+
else:
|
| 132 |
+
# If AUTHENTICATION_TOKEN was set but we reached here, it means Bearer was missing/wrong
|
| 133 |
+
if auth_token:
|
| 134 |
+
return Response("Bearer token required", 401)
|
| 135 |
+
return Response("API key required", 401)
|
| 136 |
+
return await f(*args, **kwargs)
|
| 137 |
+
|
| 138 |
+
return decorated
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
# allow only loopback addresses
|
| 142 |
+
def requires_loopback(f):
|
| 143 |
+
@wraps(f)
|
| 144 |
+
async def decorated(*args, **kwargs):
|
| 145 |
+
if not is_loopback_address(request.remote_addr):
|
| 146 |
+
return Response(
|
| 147 |
+
"Access denied.",
|
| 148 |
+
403,
|
| 149 |
+
{},
|
| 150 |
+
)
|
| 151 |
+
return await f(*args, **kwargs)
|
| 152 |
+
|
| 153 |
+
return decorated
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
# require authentication for handlers
|
| 157 |
+
def requires_auth(f):
|
| 158 |
+
@wraps(f)
|
| 159 |
+
async def decorated(*args, **kwargs):
|
| 160 |
+
user = dotenv.get_dotenv_value("AUTH_LOGIN")
|
| 161 |
+
password = dotenv.get_dotenv_value("AUTH_PASSWORD")
|
| 162 |
+
if user and password:
|
| 163 |
+
auth = request.authorization
|
| 164 |
+
if not auth or not (auth.username == user and auth.password == password):
|
| 165 |
+
return Response(
|
| 166 |
+
"Could not verify your access level for that URL.\n"
|
| 167 |
+
"You have to login with proper credentials",
|
| 168 |
+
401,
|
| 169 |
+
{"WWW-Authenticate": 'Basic realm="Login Required"'},
|
| 170 |
+
)
|
| 171 |
+
return await f(*args, **kwargs)
|
| 172 |
+
|
| 173 |
+
return decorated
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
def csrf_protect(f):
|
| 177 |
+
@wraps(f)
|
| 178 |
+
async def decorated(*args, **kwargs):
|
| 179 |
+
header = request.headers.get("X-CSRF-Token")
|
| 180 |
+
if not header or not verify_csrf_token(header):
|
| 181 |
+
print("Invalid or missing CSRF token!")
|
| 182 |
+
return Response("CSRF token missing or invalid", 403)
|
| 183 |
+
print("CSRF token OK.")
|
| 184 |
+
return await f(*args, **kwargs)
|
| 185 |
+
|
| 186 |
+
return decorated
|
| 187 |
+
|
| 188 |
+
|
| 189 |
+
# handle default address, load index
|
| 190 |
+
@webapp.route("/", methods=["GET"])
|
| 191 |
+
@requires_auth
|
| 192 |
+
async def serve_index():
|
| 193 |
+
PrintStyle().print("Serving index.html")
|
| 194 |
+
gitinfo = None
|
| 195 |
+
try:
|
| 196 |
+
gitinfo = git.get_git_info()
|
| 197 |
+
except Exception as e:
|
| 198 |
+
PrintStyle().error(f"Error getting git info: {e}")
|
| 199 |
+
gitinfo = {
|
| 200 |
+
"version": "unknown",
|
| 201 |
+
"commit_time": "unknown",
|
| 202 |
+
}
|
| 203 |
+
index_content = files.read_file("webui/index.html")
|
| 204 |
+
index_content = files.replace_placeholders_text(
|
| 205 |
+
_content=index_content,
|
| 206 |
+
version_no=gitinfo["version"],
|
| 207 |
+
version_time=gitinfo["commit_time"]
|
| 208 |
+
)
|
| 209 |
+
|
| 210 |
+
# Generate and inject CSRF token and runtime_id into meta tags
|
| 211 |
+
csrf_token = generate_csrf_token()
|
| 212 |
+
runtime_id = runtime.get_runtime_id()
|
| 213 |
+
meta_tags = f'''<meta name="csrf-token" content="{csrf_token}">
|
| 214 |
+
<meta name="runtime-id" content="{runtime_id}">'''
|
| 215 |
+
index_content = index_content.replace("</head>", f"{meta_tags}</head>")
|
| 216 |
+
PrintStyle().print("Finished serving index.html")
|
| 217 |
+
return index_content
|
| 218 |
+
|
| 219 |
+
def run():
|
| 220 |
+
PrintStyle().print("Initializing framework...")
|
| 221 |
+
|
| 222 |
+
# Suppress only request logs but keep the startup messages
|
| 223 |
+
from werkzeug.serving import WSGIRequestHandler
|
| 224 |
+
from werkzeug.serving import make_server
|
| 225 |
+
from werkzeug.middleware.dispatcher import DispatcherMiddleware
|
| 226 |
+
from a2wsgi import ASGIMiddleware
|
| 227 |
+
|
| 228 |
+
PrintStyle().print("Starting server...")
|
| 229 |
+
|
| 230 |
+
class NoRequestLoggingWSGIRequestHandler(WSGIRequestHandler):
|
| 231 |
+
def log_request(self, code="-", size="-"):
|
| 232 |
+
pass # Override to suppress request logging
|
| 233 |
+
|
| 234 |
+
# Get configuration from environment
|
| 235 |
+
port = runtime.get_web_ui_port()
|
| 236 |
+
host = (
|
| 237 |
+
runtime.get_arg("host") or dotenv.get_dotenv_value("WEB_UI_HOST") or "localhost"
|
| 238 |
+
)
|
| 239 |
+
server = None
|
| 240 |
+
|
| 241 |
+
def register_api_handler(app, handler: type[ApiHandler]):
|
| 242 |
+
name = handler.__module__.split(".")[-1]
|
| 243 |
+
instance = handler(app, lock)
|
| 244 |
+
|
| 245 |
+
async def handler_wrap():
|
| 246 |
+
return await instance.handle_request(request=request)
|
| 247 |
+
|
| 248 |
+
if handler.requires_loopback():
|
| 249 |
+
handler_wrap = requires_loopback(handler_wrap)
|
| 250 |
+
if handler.requires_auth():
|
| 251 |
+
handler_wrap = requires_auth(handler_wrap)
|
| 252 |
+
if handler.requires_api_key():
|
| 253 |
+
handler_wrap = requires_api_key(handler_wrap)
|
| 254 |
+
if handler.requires_csrf():
|
| 255 |
+
handler_wrap = csrf_protect(handler_wrap)
|
| 256 |
+
|
| 257 |
+
app.add_url_rule(
|
| 258 |
+
f"/{name}",
|
| 259 |
+
f"/{name}",
|
| 260 |
+
handler_wrap,
|
| 261 |
+
methods=handler.get_methods(),
|
| 262 |
+
)
|
| 263 |
+
|
| 264 |
+
# initialize and register API handlers
|
| 265 |
+
handlers = load_classes_from_folder("python/api", "*.py", ApiHandler)
|
| 266 |
+
for handler in handlers:
|
| 267 |
+
register_api_handler(webapp, handler)
|
| 268 |
+
|
| 269 |
+
# add the webapp, mcp, and a2a to the app
|
| 270 |
+
middleware_routes = {
|
| 271 |
+
"/mcp": ASGIMiddleware(app=mcp_server.DynamicMcpProxy.get_instance()), # type: ignore
|
| 272 |
+
"/a2a": ASGIMiddleware(app=fasta2a_server.DynamicA2AProxy.get_instance()), # type: ignore
|
| 273 |
+
}
|
| 274 |
+
|
| 275 |
+
app = DispatcherMiddleware(webapp, middleware_routes) # type: ignore
|
| 276 |
+
|
| 277 |
+
PrintStyle().debug(f"Starting server at http://{host}:{port} ...")
|
| 278 |
+
|
| 279 |
+
server = make_server(
|
| 280 |
+
host=host,
|
| 281 |
+
port=port,
|
| 282 |
+
app=app,
|
| 283 |
+
request_handler=NoRequestLoggingWSGIRequestHandler,
|
| 284 |
+
threaded=True,
|
| 285 |
+
)
|
| 286 |
+
process.set_server(server)
|
| 287 |
+
server.log_startup()
|
| 288 |
+
|
| 289 |
+
# Start init_a0 in a background thread when server starts
|
| 290 |
+
# threading.Thread(target=init_a0, daemon=True).start()
|
| 291 |
+
init_a0()
|
| 292 |
+
|
| 293 |
+
# run the server
|
| 294 |
+
server.serve_forever()
|
| 295 |
+
|
| 296 |
+
|
| 297 |
+
def init_a0():
|
| 298 |
+
# initialize contexts and MCP
|
| 299 |
+
init_chats = initialize.initialize_chats()
|
| 300 |
+
# only wait for init chats, otherwise they would seem to disappear for a while on restart
|
| 301 |
+
init_chats.result_sync()
|
| 302 |
+
|
| 303 |
+
initialize.initialize_mcp()
|
| 304 |
+
# start job loop
|
| 305 |
+
initialize.initialize_job_loop()
|
| 306 |
+
# preload
|
| 307 |
+
initialize.initialize_preload()
|
| 308 |
+
|
| 309 |
+
|
| 310 |
+
def shutdown_mcp():
|
| 311 |
+
proxy = mcp_server.DynamicMcpProxy.get_instance()
|
| 312 |
+
if proxy:
|
| 313 |
+
asyncio.run(proxy.close())
|
| 314 |
+
|
| 315 |
+
atexit.register(shutdown_mcp)
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
# run the internal server
|
| 319 |
+
if __name__ == "__main__":
|
| 320 |
+
runtime.initialize()
|
| 321 |
+
dotenv.load_dotenv()
|
| 322 |
+
run()
|
searxng/settings.yml
ADDED
|
File without changes
|
start.sh
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
|
| 3 |
+
echo "Starting application..."
|
| 4 |
+
|
| 5 |
+
# Sync BLABLADOR_API_KEY to OTHER_API_KEY for Agent-Zero fallback
|
| 6 |
+
if [ -n "$BLABLADOR_API_KEY" ]; then
|
| 7 |
+
export OTHER_API_KEY="$BLABLADOR_API_KEY"
|
| 8 |
+
echo "BLABLADOR_API_KEY synced to OTHER_API_KEY"
|
| 9 |
+
fi
|
| 10 |
+
|
| 11 |
+
python run_ui.py --host 0.0.0.0 --port 7860 --dockerized=true
|
test_api.py
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import base64
|
| 3 |
+
import json
|
| 4 |
+
import sys
|
| 5 |
+
|
| 6 |
+
BASE_URL = "https://auxteam-agent-skillset.hf.space"
|
| 7 |
+
# BASE_URL = "http://localhost:7860"
|
| 8 |
+
|
| 9 |
+
def test_health():
|
| 10 |
+
print("Testing /health...")
|
| 11 |
+
r = requests.get(f"{BASE_URL}/health")
|
| 12 |
+
print(r.status_code, r.text)
|
| 13 |
+
|
| 14 |
+
def test_set_keys():
|
| 15 |
+
print("Testing /set (keys)...")
|
| 16 |
+
# Setting a dummy key for testing, replace with real one if needed
|
| 17 |
+
data = {
|
| 18 |
+
"api_key_other": "sk-dummy-key-from-api",
|
| 19 |
+
"chat_model_provider": "other",
|
| 20 |
+
"chat_model_api_base": "https://api.helmholtz-blablador.fz-juelich.de/v1",
|
| 21 |
+
"chat_model_name": "alias-large"
|
| 22 |
+
}
|
| 23 |
+
r = requests.post(f"{BASE_URL}/set", json=data)
|
| 24 |
+
print(r.status_code, r.text)
|
| 25 |
+
|
| 26 |
+
def test_chat():
|
| 27 |
+
print("Testing /chat...")
|
| 28 |
+
data = {
|
| 29 |
+
"message": "Hello, who are you? Please reply briefly.",
|
| 30 |
+
"profile": "agent0"
|
| 31 |
+
}
|
| 32 |
+
r = requests.post(f"{BASE_URL}/chat", json=data)
|
| 33 |
+
print(r.status_code, r.text)
|
| 34 |
+
|
| 35 |
+
def test_chat_with_file():
|
| 36 |
+
print("Testing /chat with file...")
|
| 37 |
+
content = "This is a secret code: 12345. Remember it."
|
| 38 |
+
encoded = base64.b64encode(content.encode()).decode()
|
| 39 |
+
data = {
|
| 40 |
+
"message": "What is the secret code from the file?",
|
| 41 |
+
"file": encoded,
|
| 42 |
+
"file_name": "secret.txt"
|
| 43 |
+
}
|
| 44 |
+
r = requests.post(f"{BASE_URL}/chat", json=data)
|
| 45 |
+
print(r.status_code, r.text)
|
| 46 |
+
|
| 47 |
+
def test_stream():
|
| 48 |
+
print("Testing /stream...")
|
| 49 |
+
data = {
|
| 50 |
+
"message": "Tell me a short joke.",
|
| 51 |
+
}
|
| 52 |
+
r = requests.post(f"{BASE_URL}/stream", json=data, stream=True)
|
| 53 |
+
for line in r.iter_lines():
|
| 54 |
+
if line:
|
| 55 |
+
print(line.decode())
|
| 56 |
+
|
| 57 |
+
if __name__ == "__main__":
|
| 58 |
+
if len(sys.argv) > 1:
|
| 59 |
+
cmd = sys.argv[1]
|
| 60 |
+
if cmd == "health": test_health()
|
| 61 |
+
elif cmd == "set": test_set_keys()
|
| 62 |
+
elif cmd == "chat": test_chat()
|
| 63 |
+
elif cmd == "file": test_chat_with_file()
|
| 64 |
+
elif cmd == "stream": test_stream()
|
| 65 |
+
else:
|
| 66 |
+
test_health()
|
| 67 |
+
# test_set_keys()
|
| 68 |
+
# test_chat()
|
test_deployed_app.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import time
|
| 3 |
+
|
| 4 |
+
url = "https://auxteam-agent-skillset.hf.space/health"
|
| 5 |
+
|
| 6 |
+
def test_health():
|
| 7 |
+
print(f"Testing health endpoint: {url}")
|
| 8 |
+
try:
|
| 9 |
+
response = requests.get(url, timeout=10)
|
| 10 |
+
print(f"Status Code: {response.status_code}")
|
| 11 |
+
print(f"Response: {response.json()}")
|
| 12 |
+
return response.status_code == 200
|
| 13 |
+
except Exception as e:
|
| 14 |
+
print(f"Error: {e}")
|
| 15 |
+
return False
|
| 16 |
+
|
| 17 |
+
if __name__ == "__main__":
|
| 18 |
+
test_health()
|
webui/index.html
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
webui/js/api.js
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Call a JSON-in JSON-out API endpoint
|
| 3 |
+
* Data is automatically serialized
|
| 4 |
+
* @param {string} endpoint - The API endpoint to call
|
| 5 |
+
* @param {any} data - The data to send to the API
|
| 6 |
+
* @returns {Promise<any>} The JSON response from the API
|
| 7 |
+
*/
|
| 8 |
+
export async function callJsonApi(endpoint, data) {
|
| 9 |
+
const response = await fetchApi(endpoint, {
|
| 10 |
+
method: "POST",
|
| 11 |
+
headers: {
|
| 12 |
+
"Content-Type": "application/json",
|
| 13 |
+
},
|
| 14 |
+
credentials: "same-origin",
|
| 15 |
+
body: JSON.stringify(data),
|
| 16 |
+
});
|
| 17 |
+
|
| 18 |
+
if (!response.ok) {
|
| 19 |
+
const error = await response.text();
|
| 20 |
+
throw new Error(error);
|
| 21 |
+
}
|
| 22 |
+
const jsonResponse = await response.json();
|
| 23 |
+
return jsonResponse;
|
| 24 |
+
}
|
| 25 |
+
|
| 26 |
+
/**
|
| 27 |
+
* Fetch wrapper for A0 APIs that ensures token exchange
|
| 28 |
+
* Automatically adds CSRF token to request headers
|
| 29 |
+
* @param {string} url - The URL to fetch
|
| 30 |
+
* @param {Object} [request] - The fetch request options
|
| 31 |
+
* @returns {Promise<Response>} The fetch response
|
| 32 |
+
*/
|
| 33 |
+
export async function fetchApi(url, request) {
|
| 34 |
+
async function _wrap(retry) {
|
| 35 |
+
// get the CSRF token
|
| 36 |
+
const token = await getCsrfToken();
|
| 37 |
+
|
| 38 |
+
// create a new request object if none was provided
|
| 39 |
+
const finalRequest = request || {};
|
| 40 |
+
|
| 41 |
+
// ensure headers object exists
|
| 42 |
+
finalRequest.headers = finalRequest.headers || {};
|
| 43 |
+
|
| 44 |
+
// add the CSRF token to the headers
|
| 45 |
+
finalRequest.headers["X-CSRF-Token"] = token;
|
| 46 |
+
|
| 47 |
+
// perform the fetch with the updated request
|
| 48 |
+
const response = await fetch(url, finalRequest);
|
| 49 |
+
|
| 50 |
+
// check if there was an CSRF error
|
| 51 |
+
if (response.status === 403 && retry) {
|
| 52 |
+
// retry the request with new token
|
| 53 |
+
csrfToken = null;
|
| 54 |
+
return await _wrap(false);
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
// return the response
|
| 58 |
+
return response;
|
| 59 |
+
}
|
| 60 |
+
|
| 61 |
+
// perform the request
|
| 62 |
+
const response = await _wrap(true);
|
| 63 |
+
|
| 64 |
+
// return the response
|
| 65 |
+
return response;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
+
// csrf token stored locally
|
| 69 |
+
let csrfToken = null;
|
| 70 |
+
|
| 71 |
+
/**
|
| 72 |
+
* Get the CSRF token for API requests
|
| 73 |
+
* Caches the token after first request
|
| 74 |
+
* @returns {Promise<string>} The CSRF token
|
| 75 |
+
*/
|
| 76 |
+
async function getCsrfToken() {
|
| 77 |
+
if (csrfToken) return csrfToken;
|
| 78 |
+
const tokenElement = document.querySelector('meta[name="csrf-token"]');
|
| 79 |
+
if (tokenElement) {
|
| 80 |
+
csrfToken = tokenElement.content;
|
| 81 |
+
return csrfToken;
|
| 82 |
+
}
|
| 83 |
+
// fallback to fetch, but this should not happen
|
| 84 |
+
const response = await fetch("/csrf_token", {
|
| 85 |
+
credentials: "same-origin",
|
| 86 |
+
}).then((r) => r.json());
|
| 87 |
+
csrfToken = response.token;
|
| 88 |
+
document.cookie = `csrf_token_${response.runtime_id}=${csrfToken}; SameSite=Strict; Path=/`;
|
| 89 |
+
return csrfToken;
|
| 90 |
+
}
|
webui/js/index.js
ADDED
|
@@ -0,0 +1,1275 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import * as msgs from "/js/messages.js";
|
| 2 |
+
import * as api from "/js/api.js";
|
| 3 |
+
import * as css from "/js/css.js";
|
| 4 |
+
import { sleep } from "/js/sleep.js";
|
| 5 |
+
import { store as attachmentsStore } from "/components/chat/attachments/attachmentsStore.js";
|
| 6 |
+
import { store as speechStore } from "/components/chat/speech/speech-store.js";
|
| 7 |
+
import { store as notificationStore } from "/components/notifications/notification-store.js";
|
| 8 |
+
|
| 9 |
+
globalThis.fetchApi = api.fetchApi; // TODO - backward compatibility for non-modular scripts, remove once refactored to alpine
|
| 10 |
+
|
| 11 |
+
const leftPanel = document.getElementById("left-panel");
|
| 12 |
+
const rightPanel = document.getElementById("right-panel");
|
| 13 |
+
const container = document.querySelector(".container");
|
| 14 |
+
const chatInput = document.getElementById("chat-input");
|
| 15 |
+
const chatHistory = document.getElementById("chat-history");
|
| 16 |
+
const sendButton = document.getElementById("send-button");
|
| 17 |
+
const inputSection = document.getElementById("input-section");
|
| 18 |
+
const statusSection = document.getElementById("status-section");
|
| 19 |
+
const chatsSection = document.getElementById("chats-section");
|
| 20 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 21 |
+
const progressBar = document.getElementById("progress-bar");
|
| 22 |
+
const autoScrollSwitch = document.getElementById("auto-scroll-switch");
|
| 23 |
+
const timeDate = document.getElementById("time-date-container");
|
| 24 |
+
|
| 25 |
+
let autoScroll = true;
|
| 26 |
+
let context = "";
|
| 27 |
+
let resetCounter = 0;
|
| 28 |
+
let skipOneSpeech = false;
|
| 29 |
+
let connectionStatus = undefined; // undefined = not checked yet, true = connected, false = disconnected
|
| 30 |
+
|
| 31 |
+
// Initialize the toggle button
|
| 32 |
+
setupSidebarToggle();
|
| 33 |
+
// Initialize tabs
|
| 34 |
+
setupTabs();
|
| 35 |
+
|
| 36 |
+
export function getAutoScroll() {
|
| 37 |
+
return autoScroll;
|
| 38 |
+
}
|
| 39 |
+
|
| 40 |
+
function isMobile() {
|
| 41 |
+
return window.innerWidth <= 768;
|
| 42 |
+
}
|
| 43 |
+
|
| 44 |
+
function toggleSidebar(show) {
|
| 45 |
+
const overlay = document.getElementById("sidebar-overlay");
|
| 46 |
+
if (typeof show === "boolean") {
|
| 47 |
+
leftPanel.classList.toggle("hidden", !show);
|
| 48 |
+
rightPanel.classList.toggle("expanded", !show);
|
| 49 |
+
overlay.classList.toggle("visible", show);
|
| 50 |
+
} else {
|
| 51 |
+
leftPanel.classList.toggle("hidden");
|
| 52 |
+
rightPanel.classList.toggle("expanded");
|
| 53 |
+
overlay.classList.toggle(
|
| 54 |
+
"visible",
|
| 55 |
+
!leftPanel.classList.contains("hidden")
|
| 56 |
+
);
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
function handleResize() {
|
| 61 |
+
const overlay = document.getElementById("sidebar-overlay");
|
| 62 |
+
if (isMobile()) {
|
| 63 |
+
leftPanel.classList.add("hidden");
|
| 64 |
+
rightPanel.classList.add("expanded");
|
| 65 |
+
overlay.classList.remove("visible");
|
| 66 |
+
} else {
|
| 67 |
+
leftPanel.classList.remove("hidden");
|
| 68 |
+
rightPanel.classList.remove("expanded");
|
| 69 |
+
overlay.classList.remove("visible");
|
| 70 |
+
}
|
| 71 |
+
}
|
| 72 |
+
|
| 73 |
+
globalThis.addEventListener("load", handleResize);
|
| 74 |
+
globalThis.addEventListener("resize", handleResize);
|
| 75 |
+
|
| 76 |
+
document.addEventListener("DOMContentLoaded", () => {
|
| 77 |
+
const overlay = document.getElementById("sidebar-overlay");
|
| 78 |
+
overlay.addEventListener("click", () => {
|
| 79 |
+
if (isMobile()) {
|
| 80 |
+
toggleSidebar(false);
|
| 81 |
+
}
|
| 82 |
+
});
|
| 83 |
+
});
|
| 84 |
+
|
| 85 |
+
function setupSidebarToggle() {
|
| 86 |
+
const leftPanel = document.getElementById("left-panel");
|
| 87 |
+
const rightPanel = document.getElementById("right-panel");
|
| 88 |
+
const toggleSidebarButton = document.getElementById("toggle-sidebar");
|
| 89 |
+
if (toggleSidebarButton) {
|
| 90 |
+
toggleSidebarButton.addEventListener("click", toggleSidebar);
|
| 91 |
+
} else {
|
| 92 |
+
console.error("Toggle sidebar button not found");
|
| 93 |
+
setTimeout(setupSidebarToggle, 100);
|
| 94 |
+
}
|
| 95 |
+
}
|
| 96 |
+
document.addEventListener("DOMContentLoaded", setupSidebarToggle);
|
| 97 |
+
|
| 98 |
+
export async function sendMessage() {
|
| 99 |
+
const sendButton = document.getElementById("send-button");
|
| 100 |
+
try {
|
| 101 |
+
sendButton.classList.add("loading");
|
| 102 |
+
sendButton.disabled = true;
|
| 103 |
+
|
| 104 |
+
const message = chatInput.value.trim();
|
| 105 |
+
const attachmentsWithUrls = attachmentsStore.getAttachmentsForSending();
|
| 106 |
+
const hasAttachments = attachmentsWithUrls.length > 0;
|
| 107 |
+
|
| 108 |
+
if (message || hasAttachments) {
|
| 109 |
+
let response;
|
| 110 |
+
const messageId = generateGUID();
|
| 111 |
+
|
| 112 |
+
// Clear input and attachments
|
| 113 |
+
chatInput.value = "";
|
| 114 |
+
attachmentsStore.clearAttachments();
|
| 115 |
+
adjustTextareaHeight();
|
| 116 |
+
|
| 117 |
+
// Include attachments in the user message
|
| 118 |
+
if (hasAttachments) {
|
| 119 |
+
const heading =
|
| 120 |
+
attachmentsWithUrls.length > 0
|
| 121 |
+
? "Uploading attachments..."
|
| 122 |
+
: "User message";
|
| 123 |
+
|
| 124 |
+
// Render user message with attachments
|
| 125 |
+
setMessage(messageId, "user", heading, message, false, {
|
| 126 |
+
// attachments: attachmentsWithUrls, // skip here, let the backend properly log them
|
| 127 |
+
});
|
| 128 |
+
|
| 129 |
+
// sleep one frame to render the message before upload starts - better UX
|
| 130 |
+
sleep(0);
|
| 131 |
+
|
| 132 |
+
const formData = new FormData();
|
| 133 |
+
formData.append("text", message);
|
| 134 |
+
formData.append("context", context);
|
| 135 |
+
formData.append("message_id", messageId);
|
| 136 |
+
|
| 137 |
+
for (let i = 0; i < attachmentsWithUrls.length; i++) {
|
| 138 |
+
formData.append("attachments", attachmentsWithUrls[i].file);
|
| 139 |
+
}
|
| 140 |
+
|
| 141 |
+
response = await api.fetchApi("/message_async", {
|
| 142 |
+
method: "POST",
|
| 143 |
+
body: formData,
|
| 144 |
+
});
|
| 145 |
+
} else {
|
| 146 |
+
// For text-only messages
|
| 147 |
+
const data = {
|
| 148 |
+
text: message,
|
| 149 |
+
context,
|
| 150 |
+
message_id: messageId,
|
| 151 |
+
};
|
| 152 |
+
response = await api.fetchApi("/message_async", {
|
| 153 |
+
method: "POST",
|
| 154 |
+
headers: {
|
| 155 |
+
"Content-Type": "application/json",
|
| 156 |
+
},
|
| 157 |
+
body: JSON.stringify(data),
|
| 158 |
+
});
|
| 159 |
+
}
|
| 160 |
+
|
| 161 |
+
// Handle response
|
| 162 |
+
const jsonResponse = await response.json();
|
| 163 |
+
if (!jsonResponse) {
|
| 164 |
+
toast("No response returned.", "error");
|
| 165 |
+
} else {
|
| 166 |
+
setContext(jsonResponse.context);
|
| 167 |
+
}
|
| 168 |
+
}
|
| 169 |
+
} catch (e) {
|
| 170 |
+
toastFetchError("Error sending message", e); // Will use new notification system
|
| 171 |
+
} finally {
|
| 172 |
+
sendButton.classList.remove("loading");
|
| 173 |
+
sendButton.disabled = false;
|
| 174 |
+
}
|
| 175 |
+
}
|
| 176 |
+
|
| 177 |
+
function toastFetchError(text, error) {
|
| 178 |
+
console.error(text, error);
|
| 179 |
+
// Use new frontend error notification system (async, but we don't need to wait)
|
| 180 |
+
const errorMessage = error?.message || error?.toString() || "Unknown error";
|
| 181 |
+
|
| 182 |
+
if (getConnectionStatus()) {
|
| 183 |
+
// Backend is connected, just show the error
|
| 184 |
+
toastFrontendError(`${text}: ${errorMessage}`).catch((e) =>
|
| 185 |
+
console.error("Failed to show error toast:", e)
|
| 186 |
+
);
|
| 187 |
+
} else {
|
| 188 |
+
// Backend is disconnected, show connection error
|
| 189 |
+
toastFrontendError(
|
| 190 |
+
`${text} (backend appears to be disconnected): ${errorMessage}`,
|
| 191 |
+
"Connection Error"
|
| 192 |
+
).catch((e) => console.error("Failed to show connection error toast:", e));
|
| 193 |
+
}
|
| 194 |
+
}
|
| 195 |
+
globalThis.toastFetchError = toastFetchError;
|
| 196 |
+
|
| 197 |
+
chatInput.addEventListener("keydown", (e) => {
|
| 198 |
+
if (e.key === "Enter" && !e.shiftKey) {
|
| 199 |
+
e.preventDefault();
|
| 200 |
+
sendMessage();
|
| 201 |
+
}
|
| 202 |
+
});
|
| 203 |
+
|
| 204 |
+
sendButton.addEventListener("click", sendMessage);
|
| 205 |
+
|
| 206 |
+
export function updateChatInput(text) {
|
| 207 |
+
console.log("updateChatInput called with:", text);
|
| 208 |
+
|
| 209 |
+
// Append text with proper spacing
|
| 210 |
+
const currentValue = chatInput.value;
|
| 211 |
+
const needsSpace = currentValue.length > 0 && !currentValue.endsWith(" ");
|
| 212 |
+
chatInput.value = currentValue + (needsSpace ? " " : "") + text + " ";
|
| 213 |
+
|
| 214 |
+
// Adjust height and trigger input event
|
| 215 |
+
adjustTextareaHeight();
|
| 216 |
+
chatInput.dispatchEvent(new Event("input"));
|
| 217 |
+
|
| 218 |
+
console.log("Updated chat input value:", chatInput.value);
|
| 219 |
+
}
|
| 220 |
+
|
| 221 |
+
function updateUserTime() {
|
| 222 |
+
const now = new Date();
|
| 223 |
+
const hours = now.getHours();
|
| 224 |
+
const minutes = now.getMinutes();
|
| 225 |
+
const seconds = now.getSeconds();
|
| 226 |
+
const ampm = hours >= 12 ? "pm" : "am";
|
| 227 |
+
const formattedHours = hours % 12 || 12;
|
| 228 |
+
|
| 229 |
+
// Format the time
|
| 230 |
+
const timeString = `${formattedHours}:${minutes
|
| 231 |
+
.toString()
|
| 232 |
+
.padStart(2, "0")}:${seconds.toString().padStart(2, "0")} ${ampm}`;
|
| 233 |
+
|
| 234 |
+
// Format the date
|
| 235 |
+
const options = { year: "numeric", month: "short", day: "numeric" };
|
| 236 |
+
const dateString = now.toLocaleDateString(undefined, options);
|
| 237 |
+
|
| 238 |
+
// Update the HTML
|
| 239 |
+
const userTimeElement = document.getElementById("time-date");
|
| 240 |
+
userTimeElement.innerHTML = `${timeString}<br><span id="user-date">${dateString}</span>`;
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
updateUserTime();
|
| 244 |
+
setInterval(updateUserTime, 1000);
|
| 245 |
+
|
| 246 |
+
function setMessage(id, type, heading, content, temp, kvps = null) {
|
| 247 |
+
const result = msgs.setMessage(id, type, heading, content, temp, kvps);
|
| 248 |
+
if (autoScroll) chatHistory.scrollTop = chatHistory.scrollHeight;
|
| 249 |
+
return result;
|
| 250 |
+
}
|
| 251 |
+
|
| 252 |
+
globalThis.loadKnowledge = async function () {
|
| 253 |
+
const input = document.createElement("input");
|
| 254 |
+
input.type = "file";
|
| 255 |
+
input.accept = ".txt,.pdf,.csv,.html,.json,.md";
|
| 256 |
+
input.multiple = true;
|
| 257 |
+
|
| 258 |
+
input.onchange = async () => {
|
| 259 |
+
try {
|
| 260 |
+
const formData = new FormData();
|
| 261 |
+
for (let file of input.files) {
|
| 262 |
+
formData.append("files[]", file);
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
formData.append("ctxid", getContext());
|
| 266 |
+
|
| 267 |
+
const response = await api.fetchApi("/import_knowledge", {
|
| 268 |
+
method: "POST",
|
| 269 |
+
body: formData,
|
| 270 |
+
});
|
| 271 |
+
|
| 272 |
+
if (!response.ok) {
|
| 273 |
+
toast(await response.text(), "error");
|
| 274 |
+
} else {
|
| 275 |
+
const data = await response.json();
|
| 276 |
+
toast(
|
| 277 |
+
"Knowledge files imported: " + data.filenames.join(", "),
|
| 278 |
+
"success"
|
| 279 |
+
);
|
| 280 |
+
}
|
| 281 |
+
} catch (e) {
|
| 282 |
+
toastFetchError("Error loading knowledge", e);
|
| 283 |
+
}
|
| 284 |
+
};
|
| 285 |
+
|
| 286 |
+
input.click();
|
| 287 |
+
};
|
| 288 |
+
|
| 289 |
+
function adjustTextareaHeight() {
|
| 290 |
+
chatInput.style.height = "auto";
|
| 291 |
+
chatInput.style.height = chatInput.scrollHeight + "px";
|
| 292 |
+
}
|
| 293 |
+
|
| 294 |
+
export const sendJsonData = async function (url, data) {
|
| 295 |
+
return await api.callJsonApi(url, data);
|
| 296 |
+
// const response = await api.fetchApi(url, {
|
| 297 |
+
// method: 'POST',
|
| 298 |
+
// headers: {
|
| 299 |
+
// 'Content-Type': 'application/json'
|
| 300 |
+
// },
|
| 301 |
+
// body: JSON.stringify(data)
|
| 302 |
+
// });
|
| 303 |
+
|
| 304 |
+
// if (!response.ok) {
|
| 305 |
+
// const error = await response.text();
|
| 306 |
+
// throw new Error(error);
|
| 307 |
+
// }
|
| 308 |
+
// const jsonResponse = await response.json();
|
| 309 |
+
// return jsonResponse;
|
| 310 |
+
};
|
| 311 |
+
globalThis.sendJsonData = sendJsonData;
|
| 312 |
+
|
| 313 |
+
function generateGUID() {
|
| 314 |
+
return "xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx".replace(/[xy]/g, function (c) {
|
| 315 |
+
var r = (Math.random() * 16) | 0;
|
| 316 |
+
var v = c === "x" ? r : (r & 0x3) | 0x8;
|
| 317 |
+
return v.toString(16);
|
| 318 |
+
});
|
| 319 |
+
}
|
| 320 |
+
|
| 321 |
+
function getConnectionStatus() {
|
| 322 |
+
return connectionStatus;
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
function setConnectionStatus(connected) {
|
| 326 |
+
connectionStatus = connected;
|
| 327 |
+
if (globalThis.Alpine && timeDate) {
|
| 328 |
+
const statusIconEl = timeDate.querySelector(".status-icon");
|
| 329 |
+
if (statusIconEl) {
|
| 330 |
+
const statusIcon = Alpine.$data(statusIconEl);
|
| 331 |
+
if (statusIcon) {
|
| 332 |
+
statusIcon.connected = connected;
|
| 333 |
+
}
|
| 334 |
+
}
|
| 335 |
+
}
|
| 336 |
+
}
|
| 337 |
+
|
| 338 |
+
let lastLogVersion = 0;
|
| 339 |
+
let lastLogGuid = "";
|
| 340 |
+
let lastSpokenNo = 0;
|
| 341 |
+
|
| 342 |
+
async function poll() {
|
| 343 |
+
let updated = false;
|
| 344 |
+
try {
|
| 345 |
+
// Get timezone from navigator
|
| 346 |
+
const timezone = Intl.DateTimeFormat().resolvedOptions().timeZone;
|
| 347 |
+
|
| 348 |
+
const log_from = lastLogVersion;
|
| 349 |
+
const response = await sendJsonData("/poll", {
|
| 350 |
+
log_from: log_from,
|
| 351 |
+
notifications_from: notificationStore.lastNotificationVersion || 0,
|
| 352 |
+
context: context || null,
|
| 353 |
+
timezone: timezone,
|
| 354 |
+
});
|
| 355 |
+
|
| 356 |
+
// Check if the response is valid
|
| 357 |
+
if (!response) {
|
| 358 |
+
console.error("Invalid response from poll endpoint");
|
| 359 |
+
return false;
|
| 360 |
+
}
|
| 361 |
+
|
| 362 |
+
if (!context) setContext(response.context);
|
| 363 |
+
if (response.context != context) return; //skip late polls after context change
|
| 364 |
+
|
| 365 |
+
// if the chat has been reset, restart this poll as it may have been called with incorrect log_from
|
| 366 |
+
if (lastLogGuid != response.log_guid) {
|
| 367 |
+
chatHistory.innerHTML = "";
|
| 368 |
+
lastLogVersion = 0;
|
| 369 |
+
lastLogGuid = response.log_guid;
|
| 370 |
+
await poll();
|
| 371 |
+
return;
|
| 372 |
+
}
|
| 373 |
+
|
| 374 |
+
if (lastLogVersion != response.log_version) {
|
| 375 |
+
updated = true;
|
| 376 |
+
for (const log of response.logs) {
|
| 377 |
+
const messageId = log.id || log.no; // Use log.id if available
|
| 378 |
+
setMessage(
|
| 379 |
+
messageId,
|
| 380 |
+
log.type,
|
| 381 |
+
log.heading,
|
| 382 |
+
log.content,
|
| 383 |
+
log.temp,
|
| 384 |
+
log.kvps
|
| 385 |
+
);
|
| 386 |
+
}
|
| 387 |
+
afterMessagesUpdate(response.logs);
|
| 388 |
+
}
|
| 389 |
+
|
| 390 |
+
lastLogVersion = response.log_version;
|
| 391 |
+
lastLogGuid = response.log_guid;
|
| 392 |
+
|
| 393 |
+
updateProgress(response.log_progress, response.log_progress_active);
|
| 394 |
+
|
| 395 |
+
// Update notifications from response
|
| 396 |
+
notificationStore.updateFromPoll(response);
|
| 397 |
+
|
| 398 |
+
//set ui model vars from backend
|
| 399 |
+
if (globalThis.Alpine && inputSection) {
|
| 400 |
+
const inputAD = Alpine.$data(inputSection);
|
| 401 |
+
if (inputAD) {
|
| 402 |
+
inputAD.paused = response.paused;
|
| 403 |
+
}
|
| 404 |
+
}
|
| 405 |
+
|
| 406 |
+
// Update status icon state
|
| 407 |
+
setConnectionStatus(true);
|
| 408 |
+
|
| 409 |
+
// Update chats list and sort by created_at time (newer first)
|
| 410 |
+
let chatsAD = null;
|
| 411 |
+
let contexts = response.contexts || [];
|
| 412 |
+
if (globalThis.Alpine && chatsSection) {
|
| 413 |
+
chatsAD = Alpine.$data(chatsSection);
|
| 414 |
+
if (chatsAD) {
|
| 415 |
+
chatsAD.contexts = contexts.sort(
|
| 416 |
+
(a, b) => (b.created_at || 0) - (a.created_at || 0)
|
| 417 |
+
);
|
| 418 |
+
}
|
| 419 |
+
}
|
| 420 |
+
|
| 421 |
+
// Update tasks list and sort by creation time (newer first)
|
| 422 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 423 |
+
if (globalThis.Alpine && tasksSection) {
|
| 424 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 425 |
+
if (tasksAD) {
|
| 426 |
+
let tasks = response.tasks || [];
|
| 427 |
+
|
| 428 |
+
// Always update tasks to ensure state changes are reflected
|
| 429 |
+
if (tasks.length > 0) {
|
| 430 |
+
// Sort the tasks by creation time
|
| 431 |
+
const sortedTasks = [...tasks].sort(
|
| 432 |
+
(a, b) => (b.created_at || 0) - (a.created_at || 0)
|
| 433 |
+
);
|
| 434 |
+
|
| 435 |
+
// Assign the sorted tasks to the Alpine data
|
| 436 |
+
tasksAD.tasks = sortedTasks;
|
| 437 |
+
} else {
|
| 438 |
+
// Make sure to use a new empty array instance
|
| 439 |
+
tasksAD.tasks = [];
|
| 440 |
+
}
|
| 441 |
+
}
|
| 442 |
+
}
|
| 443 |
+
|
| 444 |
+
// Make sure the active context is properly selected in both lists
|
| 445 |
+
if (context) {
|
| 446 |
+
// Update selection in the active tab
|
| 447 |
+
const activeTab = localStorage.getItem("activeTab") || "chats";
|
| 448 |
+
|
| 449 |
+
if (activeTab === "chats" && chatsAD) {
|
| 450 |
+
chatsAD.selected = context;
|
| 451 |
+
localStorage.setItem("lastSelectedChat", context);
|
| 452 |
+
|
| 453 |
+
// Check if this context exists in the chats list
|
| 454 |
+
const contextExists = contexts.some((ctx) => ctx.id === context);
|
| 455 |
+
|
| 456 |
+
// If it doesn't exist in the chats list but we're in chats tab, try to select the first chat
|
| 457 |
+
if (!contextExists && contexts.length > 0) {
|
| 458 |
+
// Check if the current context is empty before creating a new one
|
| 459 |
+
// If there's already a current context and we're just updating UI, don't automatically
|
| 460 |
+
// create a new context by calling setContext
|
| 461 |
+
const firstChatId = contexts[0].id;
|
| 462 |
+
|
| 463 |
+
// Only create a new context if we're not currently in an existing context
|
| 464 |
+
// This helps prevent duplicate contexts when switching tabs
|
| 465 |
+
setContext(firstChatId);
|
| 466 |
+
chatsAD.selected = firstChatId;
|
| 467 |
+
localStorage.setItem("lastSelectedChat", firstChatId);
|
| 468 |
+
}
|
| 469 |
+
} else if (activeTab === "tasks" && tasksSection) {
|
| 470 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 471 |
+
tasksAD.selected = context;
|
| 472 |
+
localStorage.setItem("lastSelectedTask", context);
|
| 473 |
+
|
| 474 |
+
// Check if this context exists in the tasks list
|
| 475 |
+
const taskExists = response.tasks?.some((task) => task.id === context);
|
| 476 |
+
|
| 477 |
+
// If it doesn't exist in the tasks list but we're in tasks tab, try to select the first task
|
| 478 |
+
if (!taskExists && response.tasks?.length > 0) {
|
| 479 |
+
const firstTaskId = response.tasks[0].id;
|
| 480 |
+
setContext(firstTaskId);
|
| 481 |
+
tasksAD.selected = firstTaskId;
|
| 482 |
+
localStorage.setItem("lastSelectedTask", firstTaskId);
|
| 483 |
+
}
|
| 484 |
+
}
|
| 485 |
+
} else if (
|
| 486 |
+
response.tasks &&
|
| 487 |
+
response.tasks.length > 0 &&
|
| 488 |
+
localStorage.getItem("activeTab") === "tasks"
|
| 489 |
+
) {
|
| 490 |
+
// If we're in tasks tab with no selection but have tasks, select the first one
|
| 491 |
+
const firstTaskId = response.tasks[0].id;
|
| 492 |
+
setContext(firstTaskId);
|
| 493 |
+
if (tasksSection) {
|
| 494 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 495 |
+
tasksAD.selected = firstTaskId;
|
| 496 |
+
localStorage.setItem("lastSelectedTask", firstTaskId);
|
| 497 |
+
}
|
| 498 |
+
} else if (
|
| 499 |
+
contexts.length > 0 &&
|
| 500 |
+
localStorage.getItem("activeTab") === "chats" &&
|
| 501 |
+
chatsAD
|
| 502 |
+
) {
|
| 503 |
+
// If we're in chats tab with no selection but have chats, select the first one
|
| 504 |
+
const firstChatId = contexts[0].id;
|
| 505 |
+
|
| 506 |
+
// Only set context if we don't already have one to avoid duplicates
|
| 507 |
+
if (!context) {
|
| 508 |
+
setContext(firstChatId);
|
| 509 |
+
chatsAD.selected = firstChatId;
|
| 510 |
+
localStorage.setItem("lastSelectedChat", firstChatId);
|
| 511 |
+
}
|
| 512 |
+
}
|
| 513 |
+
|
| 514 |
+
lastLogVersion = response.log_version;
|
| 515 |
+
lastLogGuid = response.log_guid;
|
| 516 |
+
} catch (error) {
|
| 517 |
+
console.error("Error:", error);
|
| 518 |
+
setConnectionStatus(false);
|
| 519 |
+
}
|
| 520 |
+
|
| 521 |
+
return updated;
|
| 522 |
+
}
|
| 523 |
+
|
| 524 |
+
function afterMessagesUpdate(logs) {
|
| 525 |
+
if (localStorage.getItem("speech") == "true") {
|
| 526 |
+
speakMessages(logs);
|
| 527 |
+
}
|
| 528 |
+
}
|
| 529 |
+
|
| 530 |
+
function speakMessages(logs) {
|
| 531 |
+
if (skipOneSpeech) {
|
| 532 |
+
skipOneSpeech = false;
|
| 533 |
+
return;
|
| 534 |
+
}
|
| 535 |
+
// log.no, log.type, log.heading, log.content
|
| 536 |
+
for (let i = logs.length - 1; i >= 0; i--) {
|
| 537 |
+
const log = logs[i];
|
| 538 |
+
|
| 539 |
+
// if already spoken, end
|
| 540 |
+
// if(log.no < lastSpokenNo) break;
|
| 541 |
+
|
| 542 |
+
// finished response
|
| 543 |
+
if (log.type == "response") {
|
| 544 |
+
// lastSpokenNo = log.no;
|
| 545 |
+
speechStore.speakStream(
|
| 546 |
+
getChatBasedId(log.no),
|
| 547 |
+
log.content,
|
| 548 |
+
log.kvps?.finished
|
| 549 |
+
);
|
| 550 |
+
return;
|
| 551 |
+
|
| 552 |
+
// finished LLM headline, not response
|
| 553 |
+
} else if (
|
| 554 |
+
log.type == "agent" &&
|
| 555 |
+
log.kvps &&
|
| 556 |
+
log.kvps.headline &&
|
| 557 |
+
log.kvps.tool_args &&
|
| 558 |
+
log.kvps.tool_name != "response"
|
| 559 |
+
) {
|
| 560 |
+
// lastSpokenNo = log.no;
|
| 561 |
+
speechStore.speakStream(getChatBasedId(log.no), log.kvps.headline, true);
|
| 562 |
+
return;
|
| 563 |
+
}
|
| 564 |
+
}
|
| 565 |
+
}
|
| 566 |
+
|
| 567 |
+
function updateProgress(progress, active) {
|
| 568 |
+
if (!progress) progress = "";
|
| 569 |
+
|
| 570 |
+
if (!active) {
|
| 571 |
+
removeClassFromElement(progressBar, "shiny-text");
|
| 572 |
+
} else {
|
| 573 |
+
addClassToElement(progressBar, "shiny-text");
|
| 574 |
+
}
|
| 575 |
+
|
| 576 |
+
progress = msgs.convertIcons(progress);
|
| 577 |
+
|
| 578 |
+
if (progressBar.innerHTML != progress) {
|
| 579 |
+
progressBar.innerHTML = progress;
|
| 580 |
+
}
|
| 581 |
+
}
|
| 582 |
+
|
| 583 |
+
globalThis.pauseAgent = async function (paused) {
|
| 584 |
+
try {
|
| 585 |
+
const resp = await sendJsonData("/pause", { paused: paused, context });
|
| 586 |
+
} catch (e) {
|
| 587 |
+
globalThis.toastFetchError("Error pausing agent", e);
|
| 588 |
+
}
|
| 589 |
+
};
|
| 590 |
+
|
| 591 |
+
globalThis.resetChat = async function (ctxid = null) {
|
| 592 |
+
try {
|
| 593 |
+
const resp = await sendJsonData("/chat_reset", {
|
| 594 |
+
context: ctxid === null ? context : ctxid,
|
| 595 |
+
});
|
| 596 |
+
resetCounter++;
|
| 597 |
+
if (ctxid === null) updateAfterScroll();
|
| 598 |
+
} catch (e) {
|
| 599 |
+
globalThis.toastFetchError("Error resetting chat", e);
|
| 600 |
+
}
|
| 601 |
+
};
|
| 602 |
+
|
| 603 |
+
globalThis.newChat = async function () {
|
| 604 |
+
try {
|
| 605 |
+
setContext(generateGUID());
|
| 606 |
+
updateAfterScroll();
|
| 607 |
+
} catch (e) {
|
| 608 |
+
globalThis.toastFetchError("Error creating new chat", e);
|
| 609 |
+
}
|
| 610 |
+
};
|
| 611 |
+
|
| 612 |
+
globalThis.killChat = async function (id) {
|
| 613 |
+
if (!id) {
|
| 614 |
+
console.error("No chat ID provided for deletion");
|
| 615 |
+
return;
|
| 616 |
+
}
|
| 617 |
+
|
| 618 |
+
console.log("Deleting chat with ID:", id);
|
| 619 |
+
|
| 620 |
+
try {
|
| 621 |
+
const chatsAD = Alpine.$data(chatsSection);
|
| 622 |
+
console.log(
|
| 623 |
+
"Current contexts before deletion:",
|
| 624 |
+
JSON.stringify(chatsAD.contexts.map((c) => ({ id: c.id, name: c.name })))
|
| 625 |
+
);
|
| 626 |
+
|
| 627 |
+
// switch to another context if deleting current
|
| 628 |
+
switchFromContext(id);
|
| 629 |
+
|
| 630 |
+
// Delete the chat on the server
|
| 631 |
+
await sendJsonData("/chat_remove", { context: id });
|
| 632 |
+
|
| 633 |
+
// Update the UI manually to ensure the correct chat is removed
|
| 634 |
+
// Deep clone the contexts array to prevent reference issues
|
| 635 |
+
const updatedContexts = chatsAD.contexts.filter((ctx) => ctx.id !== id);
|
| 636 |
+
console.log(
|
| 637 |
+
"Updated contexts after deletion:",
|
| 638 |
+
JSON.stringify(updatedContexts.map((c) => ({ id: c.id, name: c.name })))
|
| 639 |
+
);
|
| 640 |
+
|
| 641 |
+
// Force UI update by creating a new array
|
| 642 |
+
chatsAD.contexts = [...updatedContexts];
|
| 643 |
+
|
| 644 |
+
updateAfterScroll();
|
| 645 |
+
|
| 646 |
+
justToast("Chat deleted successfully", "success", 1000, "chat-removal");
|
| 647 |
+
} catch (e) {
|
| 648 |
+
console.error("Error deleting chat:", e);
|
| 649 |
+
globalThis.toastFetchError("Error deleting chat", e);
|
| 650 |
+
}
|
| 651 |
+
};
|
| 652 |
+
|
| 653 |
+
export function switchFromContext(id) {
|
| 654 |
+
// If we're deleting the currently selected chat, switch to another one first
|
| 655 |
+
if (context === id) {
|
| 656 |
+
const chatsAD = Alpine.$data(chatsSection);
|
| 657 |
+
|
| 658 |
+
// Find an alternate chat to switch to if we're deleting the current one
|
| 659 |
+
let alternateChat = null;
|
| 660 |
+
for (let i = 0; i < chatsAD.contexts.length; i++) {
|
| 661 |
+
if (chatsAD.contexts[i].id !== id) {
|
| 662 |
+
alternateChat = chatsAD.contexts[i];
|
| 663 |
+
break;
|
| 664 |
+
}
|
| 665 |
+
}
|
| 666 |
+
|
| 667 |
+
if (alternateChat) {
|
| 668 |
+
setContext(alternateChat.id);
|
| 669 |
+
} else {
|
| 670 |
+
// If no other chats, create a new empty context
|
| 671 |
+
setContext(generateGUID());
|
| 672 |
+
}
|
| 673 |
+
}
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
// Function to ensure proper UI state when switching contexts
|
| 677 |
+
function ensureProperTabSelection(contextId) {
|
| 678 |
+
// Get current active tab
|
| 679 |
+
const activeTab = localStorage.getItem("activeTab") || "chats";
|
| 680 |
+
|
| 681 |
+
// First attempt to determine if this is a task or chat based on the task list
|
| 682 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 683 |
+
let isTask = false;
|
| 684 |
+
|
| 685 |
+
if (tasksSection) {
|
| 686 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 687 |
+
if (tasksAD && tasksAD.tasks) {
|
| 688 |
+
isTask = tasksAD.tasks.some((task) => task.id === contextId);
|
| 689 |
+
}
|
| 690 |
+
}
|
| 691 |
+
|
| 692 |
+
// If we're selecting a task but are in the chats tab, switch to tasks tab
|
| 693 |
+
if (isTask && activeTab === "chats") {
|
| 694 |
+
// Store this as the last selected task before switching
|
| 695 |
+
localStorage.setItem("lastSelectedTask", contextId);
|
| 696 |
+
activateTab("tasks");
|
| 697 |
+
return true;
|
| 698 |
+
}
|
| 699 |
+
|
| 700 |
+
// If we're selecting a chat but are in the tasks tab, switch to chats tab
|
| 701 |
+
if (!isTask && activeTab === "tasks") {
|
| 702 |
+
// Store this as the last selected chat before switching
|
| 703 |
+
localStorage.setItem("lastSelectedChat", contextId);
|
| 704 |
+
activateTab("chats");
|
| 705 |
+
return true;
|
| 706 |
+
}
|
| 707 |
+
|
| 708 |
+
return false;
|
| 709 |
+
}
|
| 710 |
+
|
| 711 |
+
globalThis.selectChat = async function (id) {
|
| 712 |
+
if (id === context) return; //already selected
|
| 713 |
+
|
| 714 |
+
// Check if we need to switch tabs based on the context type
|
| 715 |
+
const tabSwitched = ensureProperTabSelection(id);
|
| 716 |
+
|
| 717 |
+
// If we didn't switch tabs, proceed with normal selection
|
| 718 |
+
if (!tabSwitched) {
|
| 719 |
+
// Switch to the new context - this will clear chat history and reset tracking variables
|
| 720 |
+
setContext(id);
|
| 721 |
+
|
| 722 |
+
// Update both contexts and tasks lists to reflect the selected item
|
| 723 |
+
const chatsAD = Alpine.$data(chatsSection);
|
| 724 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 725 |
+
if (tasksSection) {
|
| 726 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 727 |
+
tasksAD.selected = id;
|
| 728 |
+
}
|
| 729 |
+
chatsAD.selected = id;
|
| 730 |
+
|
| 731 |
+
// Store this selection in the appropriate localStorage key
|
| 732 |
+
const activeTab = localStorage.getItem("activeTab") || "chats";
|
| 733 |
+
if (activeTab === "chats") {
|
| 734 |
+
localStorage.setItem("lastSelectedChat", id);
|
| 735 |
+
} else if (activeTab === "tasks") {
|
| 736 |
+
localStorage.setItem("lastSelectedTask", id);
|
| 737 |
+
}
|
| 738 |
+
|
| 739 |
+
// Trigger an immediate poll to fetch content
|
| 740 |
+
poll();
|
| 741 |
+
}
|
| 742 |
+
|
| 743 |
+
updateAfterScroll();
|
| 744 |
+
};
|
| 745 |
+
|
| 746 |
+
export const setContext = function (id) {
|
| 747 |
+
if (id == context) return;
|
| 748 |
+
context = id;
|
| 749 |
+
// Always reset the log tracking variables when switching contexts
|
| 750 |
+
// This ensures we get fresh data from the backend
|
| 751 |
+
lastLogGuid = "";
|
| 752 |
+
lastLogVersion = 0;
|
| 753 |
+
lastSpokenNo = 0;
|
| 754 |
+
|
| 755 |
+
// Stop speech when switching chats
|
| 756 |
+
speechStore.stopAudio();
|
| 757 |
+
|
| 758 |
+
// Clear the chat history immediately to avoid showing stale content
|
| 759 |
+
chatHistory.innerHTML = "";
|
| 760 |
+
|
| 761 |
+
// Update both selected states
|
| 762 |
+
if (globalThis.Alpine) {
|
| 763 |
+
if (chatsSection) {
|
| 764 |
+
const chatsAD = Alpine.$data(chatsSection);
|
| 765 |
+
if (chatsAD) chatsAD.selected = id;
|
| 766 |
+
}
|
| 767 |
+
if (tasksSection) {
|
| 768 |
+
const tasksAD = Alpine.$data(tasksSection);
|
| 769 |
+
if (tasksAD) tasksAD.selected = id;
|
| 770 |
+
}
|
| 771 |
+
}
|
| 772 |
+
|
| 773 |
+
//skip one speech if enabled when switching context
|
| 774 |
+
if (localStorage.getItem("speech") == "true") skipOneSpeech = true;
|
| 775 |
+
};
|
| 776 |
+
|
| 777 |
+
export const getContext = function () {
|
| 778 |
+
return context;
|
| 779 |
+
};
|
| 780 |
+
|
| 781 |
+
export const getChatBasedId = function (id) {
|
| 782 |
+
return context + "-" + resetCounter + "-" + id;
|
| 783 |
+
};
|
| 784 |
+
|
| 785 |
+
globalThis.toggleAutoScroll = async function (_autoScroll) {
|
| 786 |
+
autoScroll = _autoScroll;
|
| 787 |
+
};
|
| 788 |
+
|
| 789 |
+
globalThis.toggleJson = async function (showJson) {
|
| 790 |
+
css.toggleCssProperty(".msg-json", "display", showJson ? "block" : "none");
|
| 791 |
+
};
|
| 792 |
+
|
| 793 |
+
globalThis.toggleThoughts = async function (showThoughts) {
|
| 794 |
+
css.toggleCssProperty(
|
| 795 |
+
".msg-thoughts",
|
| 796 |
+
"display",
|
| 797 |
+
showThoughts ? undefined : "none"
|
| 798 |
+
);
|
| 799 |
+
};
|
| 800 |
+
|
| 801 |
+
globalThis.toggleUtils = async function (showUtils) {
|
| 802 |
+
css.toggleCssProperty(
|
| 803 |
+
".message-util",
|
| 804 |
+
"display",
|
| 805 |
+
showUtils ? undefined : "none"
|
| 806 |
+
);
|
| 807 |
+
};
|
| 808 |
+
|
| 809 |
+
globalThis.toggleDarkMode = function (isDark) {
|
| 810 |
+
if (isDark) {
|
| 811 |
+
document.body.classList.remove("light-mode");
|
| 812 |
+
document.body.classList.add("dark-mode");
|
| 813 |
+
} else {
|
| 814 |
+
document.body.classList.remove("dark-mode");
|
| 815 |
+
document.body.classList.add("light-mode");
|
| 816 |
+
}
|
| 817 |
+
console.log("Dark mode:", isDark);
|
| 818 |
+
localStorage.setItem("darkMode", isDark);
|
| 819 |
+
};
|
| 820 |
+
|
| 821 |
+
globalThis.toggleSpeech = function (isOn) {
|
| 822 |
+
console.log("Speech:", isOn);
|
| 823 |
+
localStorage.setItem("speech", isOn);
|
| 824 |
+
if (!isOn) speechStore.stopAudio();
|
| 825 |
+
};
|
| 826 |
+
|
| 827 |
+
globalThis.nudge = async function () {
|
| 828 |
+
try {
|
| 829 |
+
const resp = await sendJsonData("/nudge", { ctxid: getContext() });
|
| 830 |
+
} catch (e) {
|
| 831 |
+
toastFetchError("Error nudging agent", e);
|
| 832 |
+
}
|
| 833 |
+
};
|
| 834 |
+
|
| 835 |
+
globalThis.restart = async function () {
|
| 836 |
+
try {
|
| 837 |
+
if (!getConnectionStatus()) {
|
| 838 |
+
await toastFrontendError(
|
| 839 |
+
"Backend disconnected, cannot restart.",
|
| 840 |
+
"Restart Error"
|
| 841 |
+
);
|
| 842 |
+
return;
|
| 843 |
+
}
|
| 844 |
+
// First try to initiate restart
|
| 845 |
+
const resp = await sendJsonData("/restart", {});
|
| 846 |
+
} catch (e) {
|
| 847 |
+
// Show restarting message with no timeout and restart group
|
| 848 |
+
await toastFrontendInfo("Restarting...", "System Restart", 9999, "restart");
|
| 849 |
+
|
| 850 |
+
let retries = 0;
|
| 851 |
+
const maxRetries = 240; // Maximum number of retries (60 seconds with 250ms interval)
|
| 852 |
+
|
| 853 |
+
while (retries < maxRetries) {
|
| 854 |
+
try {
|
| 855 |
+
const resp = await sendJsonData("/health", {});
|
| 856 |
+
// Server is back up, show success message that replaces the restarting message
|
| 857 |
+
await new Promise((resolve) => setTimeout(resolve, 250));
|
| 858 |
+
await toastFrontendSuccess("Restarted", "System Restart", 5, "restart");
|
| 859 |
+
return;
|
| 860 |
+
} catch (e) {
|
| 861 |
+
// Server still down, keep waiting
|
| 862 |
+
retries++;
|
| 863 |
+
await new Promise((resolve) => setTimeout(resolve, 250));
|
| 864 |
+
}
|
| 865 |
+
}
|
| 866 |
+
|
| 867 |
+
// If we get here, restart failed or took too long
|
| 868 |
+
await toastFrontendError(
|
| 869 |
+
"Restart timed out or failed",
|
| 870 |
+
"Restart Error",
|
| 871 |
+
8,
|
| 872 |
+
"restart"
|
| 873 |
+
);
|
| 874 |
+
}
|
| 875 |
+
};
|
| 876 |
+
|
| 877 |
+
// Modify this part
|
| 878 |
+
document.addEventListener("DOMContentLoaded", () => {
|
| 879 |
+
const isDarkMode = localStorage.getItem("darkMode") !== "false";
|
| 880 |
+
toggleDarkMode(isDarkMode);
|
| 881 |
+
});
|
| 882 |
+
|
| 883 |
+
globalThis.loadChats = async function () {
|
| 884 |
+
try {
|
| 885 |
+
const fileContents = await readJsonFiles();
|
| 886 |
+
const response = await sendJsonData("/chat_load", { chats: fileContents });
|
| 887 |
+
|
| 888 |
+
if (!response) {
|
| 889 |
+
toast("No response returned.", "error");
|
| 890 |
+
}
|
| 891 |
+
// else if (!response.ok) {
|
| 892 |
+
// if (response.message) {
|
| 893 |
+
// toast(response.message, "error")
|
| 894 |
+
// } else {
|
| 895 |
+
// toast("Undefined error.", "error")
|
| 896 |
+
// }
|
| 897 |
+
// }
|
| 898 |
+
else {
|
| 899 |
+
setContext(response.ctxids[0]);
|
| 900 |
+
toast("Chats loaded.", "success");
|
| 901 |
+
}
|
| 902 |
+
} catch (e) {
|
| 903 |
+
toastFetchError("Error loading chats", e);
|
| 904 |
+
}
|
| 905 |
+
};
|
| 906 |
+
|
| 907 |
+
globalThis.saveChat = async function () {
|
| 908 |
+
try {
|
| 909 |
+
const response = await sendJsonData("/chat_export", { ctxid: context });
|
| 910 |
+
|
| 911 |
+
if (!response) {
|
| 912 |
+
toast("No response returned.", "error");
|
| 913 |
+
}
|
| 914 |
+
// else if (!response.ok) {
|
| 915 |
+
// if (response.message) {
|
| 916 |
+
// toast(response.message, "error")
|
| 917 |
+
// } else {
|
| 918 |
+
// toast("Undefined error.", "error")
|
| 919 |
+
// }
|
| 920 |
+
// }
|
| 921 |
+
else {
|
| 922 |
+
downloadFile(response.ctxid + ".json", response.content);
|
| 923 |
+
toast("Chat file downloaded.", "success");
|
| 924 |
+
}
|
| 925 |
+
} catch (e) {
|
| 926 |
+
toastFetchError("Error saving chat", e);
|
| 927 |
+
}
|
| 928 |
+
};
|
| 929 |
+
|
| 930 |
+
function downloadFile(filename, content) {
|
| 931 |
+
// Create a Blob with the content to save
|
| 932 |
+
const blob = new Blob([content], { type: "application/json" });
|
| 933 |
+
|
| 934 |
+
// Create a link element
|
| 935 |
+
const link = document.createElement("a");
|
| 936 |
+
|
| 937 |
+
// Create a URL for the Blob
|
| 938 |
+
const url = URL.createObjectURL(blob);
|
| 939 |
+
link.href = url;
|
| 940 |
+
|
| 941 |
+
// Set the file name for download
|
| 942 |
+
link.download = filename;
|
| 943 |
+
|
| 944 |
+
// Programmatically click the link to trigger the download
|
| 945 |
+
link.click();
|
| 946 |
+
|
| 947 |
+
// Clean up by revoking the object URL
|
| 948 |
+
setTimeout(() => {
|
| 949 |
+
URL.revokeObjectURL(url);
|
| 950 |
+
}, 0);
|
| 951 |
+
}
|
| 952 |
+
|
| 953 |
+
function readJsonFiles() {
|
| 954 |
+
return new Promise((resolve, reject) => {
|
| 955 |
+
// Create an input element of type 'file'
|
| 956 |
+
const input = document.createElement("input");
|
| 957 |
+
input.type = "file";
|
| 958 |
+
input.accept = ".json"; // Only accept JSON files
|
| 959 |
+
input.multiple = true; // Allow multiple file selection
|
| 960 |
+
|
| 961 |
+
// Trigger the file dialog
|
| 962 |
+
input.click();
|
| 963 |
+
|
| 964 |
+
// When files are selected
|
| 965 |
+
input.onchange = async () => {
|
| 966 |
+
const files = input.files;
|
| 967 |
+
if (!files.length) {
|
| 968 |
+
resolve([]); // Return an empty array if no files are selected
|
| 969 |
+
return;
|
| 970 |
+
}
|
| 971 |
+
|
| 972 |
+
// Read each file as a string and store in an array
|
| 973 |
+
const filePromises = Array.from(files).map((file) => {
|
| 974 |
+
return new Promise((fileResolve, fileReject) => {
|
| 975 |
+
const reader = new FileReader();
|
| 976 |
+
reader.onload = () => fileResolve(reader.result);
|
| 977 |
+
reader.onerror = fileReject;
|
| 978 |
+
reader.readAsText(file);
|
| 979 |
+
});
|
| 980 |
+
});
|
| 981 |
+
|
| 982 |
+
try {
|
| 983 |
+
const fileContents = await Promise.all(filePromises);
|
| 984 |
+
resolve(fileContents);
|
| 985 |
+
} catch (error) {
|
| 986 |
+
reject(error); // In case of any file reading error
|
| 987 |
+
}
|
| 988 |
+
};
|
| 989 |
+
});
|
| 990 |
+
}
|
| 991 |
+
|
| 992 |
+
function addClassToElement(element, className) {
|
| 993 |
+
element.classList.add(className);
|
| 994 |
+
}
|
| 995 |
+
|
| 996 |
+
function removeClassFromElement(element, className) {
|
| 997 |
+
element.classList.remove(className);
|
| 998 |
+
}
|
| 999 |
+
|
| 1000 |
+
function justToast(text, type = "info", timeout = 5000, group = "") {
|
| 1001 |
+
notificationStore.addFrontendToastOnly(
|
| 1002 |
+
type,
|
| 1003 |
+
text,
|
| 1004 |
+
"",
|
| 1005 |
+
timeout / 1000,
|
| 1006 |
+
group
|
| 1007 |
+
)
|
| 1008 |
+
}
|
| 1009 |
+
|
| 1010 |
+
|
| 1011 |
+
function toast(text, type = "info", timeout = 5000) {
|
| 1012 |
+
// Convert timeout from milliseconds to seconds for new notification system
|
| 1013 |
+
const display_time = Math.max(timeout / 1000, 1); // Minimum 1 second
|
| 1014 |
+
|
| 1015 |
+
// Use new frontend notification system based on type
|
| 1016 |
+
switch (type.toLowerCase()) {
|
| 1017 |
+
case "error":
|
| 1018 |
+
return notificationStore.frontendError(text, "Error", display_time);
|
| 1019 |
+
case "success":
|
| 1020 |
+
return notificationStore.frontendInfo(text, "Success", display_time);
|
| 1021 |
+
case "warning":
|
| 1022 |
+
return notificationStore.frontendWarning(text, "Warning", display_time);
|
| 1023 |
+
case "info":
|
| 1024 |
+
default:
|
| 1025 |
+
return notificationStore.frontendInfo(text, "Info", display_time);
|
| 1026 |
+
}
|
| 1027 |
+
|
| 1028 |
+
}
|
| 1029 |
+
globalThis.toast = toast;
|
| 1030 |
+
|
| 1031 |
+
// OLD: hideToast function removed - now using new notification system
|
| 1032 |
+
|
| 1033 |
+
function scrollChanged(isAtBottom) {
|
| 1034 |
+
if (globalThis.Alpine && autoScrollSwitch) {
|
| 1035 |
+
const inputAS = Alpine.$data(autoScrollSwitch);
|
| 1036 |
+
if (inputAS) {
|
| 1037 |
+
inputAS.autoScroll = isAtBottom;
|
| 1038 |
+
}
|
| 1039 |
+
}
|
| 1040 |
+
// autoScrollSwitch.checked = isAtBottom
|
| 1041 |
+
}
|
| 1042 |
+
|
| 1043 |
+
function updateAfterScroll() {
|
| 1044 |
+
// const toleranceEm = 1; // Tolerance in em units
|
| 1045 |
+
// const tolerancePx = toleranceEm * parseFloat(getComputedStyle(document.documentElement).fontSize); // Convert em to pixels
|
| 1046 |
+
const tolerancePx = 10;
|
| 1047 |
+
const chatHistory = document.getElementById("chat-history");
|
| 1048 |
+
const isAtBottom =
|
| 1049 |
+
chatHistory.scrollHeight - chatHistory.scrollTop <=
|
| 1050 |
+
chatHistory.clientHeight + tolerancePx;
|
| 1051 |
+
|
| 1052 |
+
scrollChanged(isAtBottom);
|
| 1053 |
+
}
|
| 1054 |
+
|
| 1055 |
+
chatHistory.addEventListener("scroll", updateAfterScroll);
|
| 1056 |
+
|
| 1057 |
+
chatInput.addEventListener("input", adjustTextareaHeight);
|
| 1058 |
+
|
| 1059 |
+
// setInterval(poll, 250);
|
| 1060 |
+
|
| 1061 |
+
async function startPolling() {
|
| 1062 |
+
const shortInterval = 25;
|
| 1063 |
+
const longInterval = 250;
|
| 1064 |
+
const shortIntervalPeriod = 100;
|
| 1065 |
+
let shortIntervalCount = 0;
|
| 1066 |
+
|
| 1067 |
+
async function _doPoll() {
|
| 1068 |
+
let nextInterval = longInterval;
|
| 1069 |
+
|
| 1070 |
+
try {
|
| 1071 |
+
const result = await poll();
|
| 1072 |
+
if (result) shortIntervalCount = shortIntervalPeriod; // Reset the counter when the result is true
|
| 1073 |
+
if (shortIntervalCount > 0) shortIntervalCount--; // Decrease the counter on each call
|
| 1074 |
+
nextInterval = shortIntervalCount > 0 ? shortInterval : longInterval;
|
| 1075 |
+
} catch (error) {
|
| 1076 |
+
console.error("Error:", error);
|
| 1077 |
+
}
|
| 1078 |
+
|
| 1079 |
+
// Call the function again after the selected interval
|
| 1080 |
+
setTimeout(_doPoll.bind(this), nextInterval);
|
| 1081 |
+
}
|
| 1082 |
+
|
| 1083 |
+
_doPoll();
|
| 1084 |
+
}
|
| 1085 |
+
|
| 1086 |
+
document.addEventListener("DOMContentLoaded", startPolling);
|
| 1087 |
+
|
| 1088 |
+
// Setup event handlers once the DOM is fully loaded
|
| 1089 |
+
document.addEventListener("DOMContentLoaded", function () {
|
| 1090 |
+
setupSidebarToggle();
|
| 1091 |
+
setupTabs();
|
| 1092 |
+
initializeActiveTab();
|
| 1093 |
+
});
|
| 1094 |
+
|
| 1095 |
+
// Setup tabs functionality
|
| 1096 |
+
function setupTabs() {
|
| 1097 |
+
const chatsTab = document.getElementById("chats-tab");
|
| 1098 |
+
const tasksTab = document.getElementById("tasks-tab");
|
| 1099 |
+
|
| 1100 |
+
if (chatsTab && tasksTab) {
|
| 1101 |
+
chatsTab.addEventListener("click", function () {
|
| 1102 |
+
activateTab("chats");
|
| 1103 |
+
});
|
| 1104 |
+
|
| 1105 |
+
tasksTab.addEventListener("click", function () {
|
| 1106 |
+
activateTab("tasks");
|
| 1107 |
+
});
|
| 1108 |
+
} else {
|
| 1109 |
+
console.error("Tab elements not found");
|
| 1110 |
+
setTimeout(setupTabs, 100); // Retry setup
|
| 1111 |
+
}
|
| 1112 |
+
}
|
| 1113 |
+
|
| 1114 |
+
function activateTab(tabName) {
|
| 1115 |
+
const chatsTab = document.getElementById("chats-tab");
|
| 1116 |
+
const tasksTab = document.getElementById("tasks-tab");
|
| 1117 |
+
const chatsSection = document.getElementById("chats-section");
|
| 1118 |
+
const tasksSection = document.getElementById("tasks-section");
|
| 1119 |
+
|
| 1120 |
+
// Get current context to preserve before switching
|
| 1121 |
+
const currentContext = context;
|
| 1122 |
+
|
| 1123 |
+
// Store the current selection for the active tab before switching
|
| 1124 |
+
const previousTab = localStorage.getItem("activeTab");
|
| 1125 |
+
if (previousTab === "chats") {
|
| 1126 |
+
localStorage.setItem("lastSelectedChat", currentContext);
|
| 1127 |
+
} else if (previousTab === "tasks") {
|
| 1128 |
+
localStorage.setItem("lastSelectedTask", currentContext);
|
| 1129 |
+
}
|
| 1130 |
+
|
| 1131 |
+
// Reset all tabs and sections
|
| 1132 |
+
chatsTab.classList.remove("active");
|
| 1133 |
+
tasksTab.classList.remove("active");
|
| 1134 |
+
chatsSection.style.display = "none";
|
| 1135 |
+
tasksSection.style.display = "none";
|
| 1136 |
+
|
| 1137 |
+
// Remember the last active tab in localStorage
|
| 1138 |
+
localStorage.setItem("activeTab", tabName);
|
| 1139 |
+
|
| 1140 |
+
// Activate selected tab and section
|
| 1141 |
+
if (tabName === "chats") {
|
| 1142 |
+
chatsTab.classList.add("active");
|
| 1143 |
+
chatsSection.style.display = "";
|
| 1144 |
+
|
| 1145 |
+
// Get the available contexts from Alpine.js data
|
| 1146 |
+
const chatsAD = globalThis.Alpine ? Alpine.$data(chatsSection) : null;
|
| 1147 |
+
const availableContexts = chatsAD?.contexts || [];
|
| 1148 |
+
|
| 1149 |
+
// Restore previous chat selection
|
| 1150 |
+
const lastSelectedChat = localStorage.getItem("lastSelectedChat");
|
| 1151 |
+
|
| 1152 |
+
// Only switch if:
|
| 1153 |
+
// 1. lastSelectedChat exists AND
|
| 1154 |
+
// 2. It's different from current context AND
|
| 1155 |
+
// 3. The context actually exists in our contexts list OR there are no contexts yet
|
| 1156 |
+
if (
|
| 1157 |
+
lastSelectedChat &&
|
| 1158 |
+
lastSelectedChat !== currentContext &&
|
| 1159 |
+
(availableContexts.some((ctx) => ctx.id === lastSelectedChat) ||
|
| 1160 |
+
availableContexts.length === 0)
|
| 1161 |
+
) {
|
| 1162 |
+
setContext(lastSelectedChat);
|
| 1163 |
+
}
|
| 1164 |
+
} else if (tabName === "tasks") {
|
| 1165 |
+
tasksTab.classList.add("active");
|
| 1166 |
+
tasksSection.style.display = "flex";
|
| 1167 |
+
tasksSection.style.flexDirection = "column";
|
| 1168 |
+
|
| 1169 |
+
// Get the available tasks from Alpine.js data
|
| 1170 |
+
const tasksAD = globalThis.Alpine ? Alpine.$data(tasksSection) : null;
|
| 1171 |
+
const availableTasks = tasksAD?.tasks || [];
|
| 1172 |
+
|
| 1173 |
+
// Restore previous task selection
|
| 1174 |
+
const lastSelectedTask = localStorage.getItem("lastSelectedTask");
|
| 1175 |
+
|
| 1176 |
+
// Only switch if:
|
| 1177 |
+
// 1. lastSelectedTask exists AND
|
| 1178 |
+
// 2. It's different from current context AND
|
| 1179 |
+
// 3. The task actually exists in our tasks list
|
| 1180 |
+
if (
|
| 1181 |
+
lastSelectedTask &&
|
| 1182 |
+
lastSelectedTask !== currentContext &&
|
| 1183 |
+
availableTasks.some((task) => task.id === lastSelectedTask)
|
| 1184 |
+
) {
|
| 1185 |
+
setContext(lastSelectedTask);
|
| 1186 |
+
}
|
| 1187 |
+
}
|
| 1188 |
+
|
| 1189 |
+
// Request a poll update
|
| 1190 |
+
poll();
|
| 1191 |
+
}
|
| 1192 |
+
|
| 1193 |
+
// Add function to initialize active tab and selections from localStorage
|
| 1194 |
+
function initializeActiveTab() {
|
| 1195 |
+
// Initialize selection storage if not present
|
| 1196 |
+
if (!localStorage.getItem("lastSelectedChat")) {
|
| 1197 |
+
localStorage.setItem("lastSelectedChat", "");
|
| 1198 |
+
}
|
| 1199 |
+
if (!localStorage.getItem("lastSelectedTask")) {
|
| 1200 |
+
localStorage.setItem("lastSelectedTask", "");
|
| 1201 |
+
}
|
| 1202 |
+
|
| 1203 |
+
const activeTab = localStorage.getItem("activeTab") || "chats";
|
| 1204 |
+
activateTab(activeTab);
|
| 1205 |
+
}
|
| 1206 |
+
|
| 1207 |
+
/*
|
| 1208 |
+
* A0 Chat UI
|
| 1209 |
+
*
|
| 1210 |
+
* Tasks tab functionality:
|
| 1211 |
+
* - Tasks are displayed in the Tasks tab with the same mechanics as chats
|
| 1212 |
+
* - Both lists are sorted by creation time (newest first)
|
| 1213 |
+
* - Selection state is preserved across tab switches
|
| 1214 |
+
* - The active tab is remembered across sessions
|
| 1215 |
+
* - Tasks use the same context system as chats for communication with the backend
|
| 1216 |
+
* - Future support for renaming and deletion will be implemented later
|
| 1217 |
+
*/
|
| 1218 |
+
|
| 1219 |
+
// Open the scheduler detail view for a specific task
|
| 1220 |
+
function openTaskDetail(taskId) {
|
| 1221 |
+
// Wait for Alpine.js to be fully loaded
|
| 1222 |
+
if (globalThis.Alpine) {
|
| 1223 |
+
// Get the settings modal button and click it to ensure all init logic happens
|
| 1224 |
+
const settingsButton = document.getElementById("settings");
|
| 1225 |
+
if (settingsButton) {
|
| 1226 |
+
// Programmatically click the settings button
|
| 1227 |
+
settingsButton.click();
|
| 1228 |
+
|
| 1229 |
+
// Now get a reference to the modal element
|
| 1230 |
+
const modalEl = document.getElementById("settingsModal");
|
| 1231 |
+
if (!modalEl) {
|
| 1232 |
+
console.error("Settings modal element not found after clicking button");
|
| 1233 |
+
return;
|
| 1234 |
+
}
|
| 1235 |
+
|
| 1236 |
+
// Get the Alpine.js data for the modal
|
| 1237 |
+
const modalData = globalThis.Alpine ? Alpine.$data(modalEl) : null;
|
| 1238 |
+
|
| 1239 |
+
// Use a timeout to ensure the modal is fully rendered
|
| 1240 |
+
setTimeout(() => {
|
| 1241 |
+
// Switch to the scheduler tab first
|
| 1242 |
+
modalData.switchTab("scheduler");
|
| 1243 |
+
|
| 1244 |
+
// Use another timeout to ensure the scheduler component is initialized
|
| 1245 |
+
setTimeout(() => {
|
| 1246 |
+
// Get the scheduler component
|
| 1247 |
+
const schedulerComponent = document.querySelector(
|
| 1248 |
+
'[x-data="schedulerSettings"]'
|
| 1249 |
+
);
|
| 1250 |
+
if (!schedulerComponent) {
|
| 1251 |
+
console.error("Scheduler component not found");
|
| 1252 |
+
return;
|
| 1253 |
+
}
|
| 1254 |
+
|
| 1255 |
+
// Get the Alpine.js data for the scheduler component
|
| 1256 |
+
const schedulerData = globalThis.Alpine
|
| 1257 |
+
? Alpine.$data(schedulerComponent)
|
| 1258 |
+
: null;
|
| 1259 |
+
|
| 1260 |
+
// Show the task detail view for the specific task
|
| 1261 |
+
schedulerData.showTaskDetail(taskId);
|
| 1262 |
+
|
| 1263 |
+
console.log("Task detail view opened for task:", taskId);
|
| 1264 |
+
}, 50); // Give time for the scheduler tab to initialize
|
| 1265 |
+
}, 25); // Give time for the modal to render
|
| 1266 |
+
} else {
|
| 1267 |
+
console.error("Settings button not found");
|
| 1268 |
+
}
|
| 1269 |
+
} else {
|
| 1270 |
+
console.error("Alpine.js not loaded");
|
| 1271 |
+
}
|
| 1272 |
+
}
|
| 1273 |
+
|
| 1274 |
+
// Make the function available globally
|
| 1275 |
+
globalThis.openTaskDetail = openTaskDetail;
|
whisper.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
def load_model(*args, **kwargs):
|
| 2 |
+
class MockModel:
|
| 3 |
+
def transcribe(self, *args, **kwargs):
|
| 4 |
+
return {"text": "Whisper is disabled in this deployment"}
|
| 5 |
+
return MockModel()
|