Spaces:
Running
Running
Jainish1808 commited on
Commit ·
00a2010
1
Parent(s): 8f05f24
Initial import of Claude_Code
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- Claude_Code/.dockerignore +8 -0
- Claude_Code/.gitattributes +35 -0
- Claude_Code/.gitignore +12 -0
- Claude_Code/.python-version +1 -0
- Claude_Code/AGENTS.md +52 -0
- Claude_Code/CLAUDE.md +1 -0
- Claude_Code/Dockerfile +18 -0
- Claude_Code/README.md +588 -0
- Claude_Code/api/__init__.py +21 -0
- Claude_Code/api/app.py +273 -0
- Claude_Code/api/command_utils.py +139 -0
- Claude_Code/api/dependencies.py +226 -0
- Claude_Code/api/detection.py +130 -0
- Claude_Code/api/models/__init__.py +35 -0
- Claude_Code/api/models/anthropic.py +134 -0
- Claude_Code/api/models/responses.py +33 -0
- Claude_Code/api/optimization_handlers.py +147 -0
- Claude_Code/api/request_utils.py +101 -0
- Claude_Code/api/routes.py +345 -0
- Claude_Code/claude-pick +183 -0
- Claude_Code/cli/__init__.py +6 -0
- Claude_Code/cli/entrypoints.py +47 -0
- Claude_Code/cli/manager.py +144 -0
- Claude_Code/cli/process_registry.py +74 -0
- Claude_Code/cli/session.py +257 -0
- Claude_Code/config/__init__.py +5 -0
- Claude_Code/config/env.example +71 -0
- Claude_Code/config/logging_config.py +90 -0
- Claude_Code/config/nim.py +51 -0
- Claude_Code/config/settings.py +242 -0
- Claude_Code/messaging/__init__.py +23 -0
- Claude_Code/messaging/commands.py +283 -0
- Claude_Code/messaging/event_parser.py +163 -0
- Claude_Code/messaging/handler.py +770 -0
- Claude_Code/messaging/limiter.py +312 -0
- Claude_Code/messaging/models.py +36 -0
- Claude_Code/messaging/platforms/__init__.py +11 -0
- Claude_Code/messaging/platforms/base.py +218 -0
- Claude_Code/messaging/platforms/discord.py +561 -0
- Claude_Code/messaging/platforms/factory.py +56 -0
- Claude_Code/messaging/platforms/telegram.py +661 -0
- Claude_Code/messaging/rendering/__init__.py +41 -0
- Claude_Code/messaging/rendering/discord_markdown.py +365 -0
- Claude_Code/messaging/rendering/telegram_markdown.py +380 -0
- Claude_Code/messaging/session.py +289 -0
- Claude_Code/messaging/transcript.py +577 -0
- Claude_Code/messaging/transcription.py +228 -0
- Claude_Code/messaging/trees/__init__.py +11 -0
- Claude_Code/messaging/trees/data.py +482 -0
- Claude_Code/messaging/trees/processor.py +165 -0
Claude_Code/.dockerignore
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
.git
|
| 2 |
+
.venv
|
| 3 |
+
__pycache__
|
| 4 |
+
*.pyc
|
| 5 |
+
.env
|
| 6 |
+
uv.lock
|
| 7 |
+
nvidia_nim_models.json
|
| 8 |
+
pic.png
|
Claude_Code/.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tar filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
Claude_Code/.gitignore
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__pycache__
|
| 2 |
+
.claude
|
| 3 |
+
.cursor
|
| 4 |
+
.pytest_cache
|
| 5 |
+
.ruff_cache
|
| 6 |
+
.serena
|
| 7 |
+
.venv
|
| 8 |
+
agent_workspace
|
| 9 |
+
.env
|
| 10 |
+
server.log
|
| 11 |
+
.coverage
|
| 12 |
+
llama_cache
|
Claude_Code/.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.14.0
|
Claude_Code/AGENTS.md
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# AGENTIC DIRECTIVE
|
| 2 |
+
|
| 3 |
+
> This file is identical to CLAUDE.md. Keep them in sync.
|
| 4 |
+
|
| 5 |
+
## CODING ENVIRONMENT
|
| 6 |
+
|
| 7 |
+
- Install astral uv using "curl -LsSf https://astral.sh/uv/install.sh | sh" if not already installed and if already installed then update it to the latest version
|
| 8 |
+
- Install Python 3.14 using `uv python install 3.14` if not already installed
|
| 9 |
+
- Always use `uv run` to run files instead of the global `python` command.
|
| 10 |
+
- Current uv ruff formatter is set to py314 which has supports multiple exception types without paranthesis (except TypeError, ValueError:)
|
| 11 |
+
- Read `.env.example` for environment variables.
|
| 12 |
+
- All CI checks must pass; failing checks block merge.
|
| 13 |
+
- Add tests for new changes (including edge cases), then run `uv run pytest`.
|
| 14 |
+
- Run checks in this order: `uv run ruff format`, `uv run ruff check`, `uv run ty check`, `uv run pytest`.
|
| 15 |
+
- Do not add `# type: ignore` or `# ty: ignore`; fix the underlying type issue.
|
| 16 |
+
- All 5 checks are enforced in `tests.yml` on push/merge.
|
| 17 |
+
|
| 18 |
+
## IDENTITY & CONTEXT
|
| 19 |
+
|
| 20 |
+
- You are an expert Software Architect and Systems Engineer.
|
| 21 |
+
- Goal: Zero-defect, root-cause-oriented engineering for bugs; test-driven engineering for new features. Think carefully; no need to rush.
|
| 22 |
+
- Code: Write the simplest code possible. Keep the codebase minimal and modular.
|
| 23 |
+
|
| 24 |
+
## ARCHITECTURE PRINCIPLES (see PLAN.md)
|
| 25 |
+
|
| 26 |
+
- **Shared utilities**: Extract common logic into shared packages (e.g. `providers/common/`). Do not have one provider import from another provider's utils.
|
| 27 |
+
- **DRY**: Extract shared base classes to eliminate duplication. Prefer composition over copy-paste.
|
| 28 |
+
- **Encapsulation**: Use accessor methods for internal state (e.g. `set_current_task()`), not direct `_attribute` assignment from outside.
|
| 29 |
+
- **Provider-specific config**: Keep provider-specific fields (e.g. `nim_settings`) in provider constructors, not in the base `ProviderConfig`.
|
| 30 |
+
- **Dead code**: Remove unused code, legacy systems, and hardcoded values. Use settings/config instead of literals (e.g. `settings.provider_type` not `"nvidia_nim"`).
|
| 31 |
+
- **Performance**: Use list accumulation for strings (not `+=` in loops), cache env vars at init, prefer iterative over recursive when stack depth matters.
|
| 32 |
+
- **Platform-agnostic naming**: Use generic names (e.g. `PLATFORM_EDIT`) not platform-specific ones (e.g. `TELEGRAM_EDIT`) in shared code.
|
| 33 |
+
- **No type ignores**: Do not add `# type: ignore` or `# ty: ignore`. Fix the underlying type issue.
|
| 34 |
+
- **Backward compatibility**: When moving modules, add re-exports from old locations so existing imports keep working.
|
| 35 |
+
|
| 36 |
+
## COGNITIVE WORKFLOW
|
| 37 |
+
|
| 38 |
+
1. **ANALYZE**: Read relevant files. Do not guess.
|
| 39 |
+
2. **PLAN**: Map out the logic. Identify root cause or required changes. Order changes by dependency.
|
| 40 |
+
3. **EXECUTE**: Fix the cause, not the symptom. Execute incrementally with clear commits.
|
| 41 |
+
4. **VERIFY**: Run ci checks. Confirm the fix via logs or output.
|
| 42 |
+
5. **SPECIFICITY**: Do exactly as much as asked; nothing more, nothing less.
|
| 43 |
+
6. **PROPAGATION**: Changes impact multiple files; propagate updates correctly.
|
| 44 |
+
|
| 45 |
+
## SUMMARY STANDARDS
|
| 46 |
+
|
| 47 |
+
- Summaries must be technical and granular.
|
| 48 |
+
- Include: [Files Changed], [Logic Altered], [Verification Method], [Residual Risks] (if no residual risks then say none).
|
| 49 |
+
|
| 50 |
+
## TOOLS
|
| 51 |
+
|
| 52 |
+
- Prefer built-in tools (grep, read_file, etc.) over manual workflows. Check tool availability before use.
|
Claude_Code/CLAUDE.md
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
IMPORTANT: Ensure you’ve thoroughly reviewed the [AGENTS.md](AGENTS.md) file before beginning any work.
|
Claude_Code/Dockerfile
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.12-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
ENV PYTHONUNBUFFERED=1
|
| 5 |
+
|
| 6 |
+
# Install uv for faster dependency installation
|
| 7 |
+
RUN pip install --no-cache-dir uv
|
| 8 |
+
|
| 9 |
+
# Install dependencies
|
| 10 |
+
COPY requirements.txt .
|
| 11 |
+
RUN uv pip install --system --no-cache-dir -r requirements.txt
|
| 12 |
+
|
| 13 |
+
# Copy application source code
|
| 14 |
+
COPY . .
|
| 15 |
+
|
| 16 |
+
EXPOSE 7860
|
| 17 |
+
|
| 18 |
+
CMD ["uvicorn", "server:app", "--host", "0.0.0.0", "--port", "7860"]
|
Claude_Code/README.md
ADDED
|
@@ -0,0 +1,588 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Claude Code
|
| 3 |
+
emoji: 🤖
|
| 4 |
+
colorFrom: indigo
|
| 5 |
+
colorTo: blue
|
| 6 |
+
sdk: docker
|
| 7 |
+
app_port: 7860
|
| 8 |
+
pinned: false
|
| 9 |
+
---
|
| 10 |
+
|
| 11 |
+
<div align="center">
|
| 12 |
+
|
| 13 |
+
# 🤖 Free Claude Code
|
| 14 |
+
|
| 15 |
+
### Use Claude Code CLI & VSCode for free. No Anthropic API key required.
|
| 16 |
+
|
| 17 |
+
[](https://opensource.org/licenses/MIT)
|
| 18 |
+
[](https://www.python.org/downloads/)
|
| 19 |
+
[](https://github.com/astral-sh/uv)
|
| 20 |
+
[](https://github.com/Alishahryar1/free-claude-code/actions/workflows/tests.yml)
|
| 21 |
+
[](https://pypi.org/project/ty/)
|
| 22 |
+
[](https://github.com/astral-sh/ruff)
|
| 23 |
+
[](https://github.com/Delgan/loguru)
|
| 24 |
+
|
| 25 |
+
A lightweight proxy that routes Claude Code's Anthropic API calls to **NVIDIA NIM** (40 req/min free), **OpenRouter** (hundreds of models), **LM Studio** (fully local), or **llama.cpp** (local with Anthropic endpoints).
|
| 26 |
+
|
| 27 |
+
[Quick Start](#quick-start) · [Providers](#providers) · [Discord Bot](#discord-bot) · [Configuration](#configuration) · [Development](#development) · [Contributing](#contributing)
|
| 28 |
+
|
| 29 |
+
---
|
| 30 |
+
|
| 31 |
+
</div>
|
| 32 |
+
|
| 33 |
+
<div align="center">
|
| 34 |
+
<img src="pic.png" alt="Free Claude Code in action" width="700">
|
| 35 |
+
<p><em>Claude Code running via NVIDIA NIM, completely free</em></p>
|
| 36 |
+
</div>
|
| 37 |
+
|
| 38 |
+
## Features
|
| 39 |
+
|
| 40 |
+
| Feature | Description |
|
| 41 |
+
| -------------------------- | ----------------------------------------------------------------------------------------------- |
|
| 42 |
+
| **Zero Cost** | 40 req/min free on NVIDIA NIM. Free models on OpenRouter. Fully local with LM Studio |
|
| 43 |
+
| **Drop-in Replacement** | Set 2 env vars. No modifications to Claude Code CLI or VSCode extension needed |
|
| 44 |
+
| **4 Providers** | NVIDIA NIM, OpenRouter (hundreds of models), LM Studio (local), llama.cpp (`llama-server`) |
|
| 45 |
+
| **Per-Model Mapping** | Route Opus / Sonnet / Haiku to different models and providers. Mix providers freely |
|
| 46 |
+
| **Thinking Token Support** | Parses `<think>` tags and `reasoning_content` into native Claude thinking blocks |
|
| 47 |
+
| **Heuristic Tool Parser** | Models outputting tool calls as text are auto-parsed into structured tool use |
|
| 48 |
+
| **Request Optimization** | 5 categories of trivial API calls intercepted locally, saving quota and latency |
|
| 49 |
+
| **Smart Rate Limiting** | Proactive rolling-window throttle + reactive 429 exponential backoff + optional concurrency cap |
|
| 50 |
+
| **Discord / Telegram Bot** | Remote autonomous coding with tree-based threading, session persistence, and live progress |
|
| 51 |
+
| **Subagent Control** | Task tool interception forces `run_in_background=False`. No runaway subagents |
|
| 52 |
+
| **Extensible** | Clean `BaseProvider` and `MessagingPlatform` ABCs. Add new providers or platforms easily |
|
| 53 |
+
|
| 54 |
+
## Quick Start
|
| 55 |
+
|
| 56 |
+
### Prerequisites
|
| 57 |
+
|
| 58 |
+
1. Get an API key (or use LM Studio / llama.cpp locally):
|
| 59 |
+
- **NVIDIA NIM**: [build.nvidia.com/settings/api-keys](https://build.nvidia.com/settings/api-keys)
|
| 60 |
+
- **OpenRouter**: [openrouter.ai/keys](https://openrouter.ai/keys)
|
| 61 |
+
- **LM Studio**: No API key needed. Run locally with [LM Studio](https://lmstudio.ai)
|
| 62 |
+
- **llama.cpp**: No API key needed. Run `llama-server` locally.
|
| 63 |
+
2. Install [Claude Code](https://github.com/anthropics/claude-code)
|
| 64 |
+
3. Install [uv](https://github.com/astral-sh/uv) (or `uv self update` if already installed)
|
| 65 |
+
|
| 66 |
+
### Clone & Configure
|
| 67 |
+
|
| 68 |
+
```bash
|
| 69 |
+
git clone https://github.com/Alishahryar1/free-claude-code.git
|
| 70 |
+
cd free-claude-code
|
| 71 |
+
cp .env.example .env
|
| 72 |
+
```
|
| 73 |
+
|
| 74 |
+
Choose your provider and edit `.env`:
|
| 75 |
+
|
| 76 |
+
<details>
|
| 77 |
+
<summary><b>NVIDIA NIM</b> (40 req/min free, recommended)</summary>
|
| 78 |
+
|
| 79 |
+
```dotenv
|
| 80 |
+
NVIDIA_NIM_API_KEY="nvapi-your-key-here"
|
| 81 |
+
|
| 82 |
+
MODEL_OPUS="nvidia_nim/z-ai/glm4.7"
|
| 83 |
+
MODEL_SONNET="nvidia_nim/moonshotai/kimi-k2-thinking"
|
| 84 |
+
MODEL_HAIKU="nvidia_nim/stepfun-ai/step-3.5-flash"
|
| 85 |
+
MODEL="nvidia_nim/z-ai/glm4.7" # fallback
|
| 86 |
+
|
| 87 |
+
# Enable for thinking models (kimi, nemotron). Leave false for others (e.g. Mistral).
|
| 88 |
+
NIM_ENABLE_THINKING=true
|
| 89 |
+
```
|
| 90 |
+
|
| 91 |
+
</details>
|
| 92 |
+
|
| 93 |
+
<details>
|
| 94 |
+
<summary><b>OpenRouter</b> (hundreds of models)</summary>
|
| 95 |
+
|
| 96 |
+
```dotenv
|
| 97 |
+
OPENROUTER_API_KEY="sk-or-your-key-here"
|
| 98 |
+
|
| 99 |
+
MODEL_OPUS="open_router/deepseek/deepseek-r1-0528:free"
|
| 100 |
+
MODEL_SONNET="open_router/openai/gpt-oss-120b:free"
|
| 101 |
+
MODEL_HAIKU="open_router/stepfun/step-3.5-flash:free"
|
| 102 |
+
MODEL="open_router/stepfun/step-3.5-flash:free" # fallback
|
| 103 |
+
```
|
| 104 |
+
|
| 105 |
+
</details>
|
| 106 |
+
|
| 107 |
+
<details>
|
| 108 |
+
<summary><b>LM Studio</b> (fully local, no API key)</summary>
|
| 109 |
+
|
| 110 |
+
```dotenv
|
| 111 |
+
MODEL_OPUS="lmstudio/unsloth/MiniMax-M2.5-GGUF"
|
| 112 |
+
MODEL_SONNET="lmstudio/unsloth/Qwen3.5-35B-A3B-GGUF"
|
| 113 |
+
MODEL_HAIKU="lmstudio/unsloth/GLM-4.7-Flash-GGUF"
|
| 114 |
+
MODEL="lmstudio/unsloth/GLM-4.7-Flash-GGUF" # fallback
|
| 115 |
+
```
|
| 116 |
+
|
| 117 |
+
</details>
|
| 118 |
+
|
| 119 |
+
<details>
|
| 120 |
+
<summary><b>llama.cpp</b> (fully local, no API key)</summary>
|
| 121 |
+
|
| 122 |
+
```dotenv
|
| 123 |
+
LLAMACPP_BASE_URL="http://localhost:8080/v1"
|
| 124 |
+
|
| 125 |
+
MODEL_OPUS="llamacpp/local-model"
|
| 126 |
+
MODEL_SONNET="llamacpp/local-model"
|
| 127 |
+
MODEL_HAIKU="llamacpp/local-model"
|
| 128 |
+
MODEL="llamacpp/local-model"
|
| 129 |
+
```
|
| 130 |
+
|
| 131 |
+
</details>
|
| 132 |
+
|
| 133 |
+
<details>
|
| 134 |
+
<summary><b>Mix providers</b></summary>
|
| 135 |
+
|
| 136 |
+
Each `MODEL_*` variable can use a different provider. `MODEL` is the fallback for unrecognized Claude models.
|
| 137 |
+
|
| 138 |
+
```dotenv
|
| 139 |
+
NVIDIA_NIM_API_KEY="nvapi-your-key-here"
|
| 140 |
+
OPENROUTER_API_KEY="sk-or-your-key-here"
|
| 141 |
+
|
| 142 |
+
MODEL_OPUS="nvidia_nim/moonshotai/kimi-k2.5"
|
| 143 |
+
MODEL_SONNET="open_router/deepseek/deepseek-r1-0528:free"
|
| 144 |
+
MODEL_HAIKU="lmstudio/unsloth/GLM-4.7-Flash-GGUF"
|
| 145 |
+
MODEL="nvidia_nim/z-ai/glm4.7" # fallback
|
| 146 |
+
```
|
| 147 |
+
|
| 148 |
+
</details>
|
| 149 |
+
|
| 150 |
+
<details>
|
| 151 |
+
<summary><b>Optional Authentication</b> (restrict access to your proxy)</summary>
|
| 152 |
+
|
| 153 |
+
Set `ANTHROPIC_AUTH_TOKEN` in `.env` to require clients to authenticate:
|
| 154 |
+
|
| 155 |
+
```dotenv
|
| 156 |
+
ANTHROPIC_AUTH_TOKEN="your-secret-token-here"
|
| 157 |
+
```
|
| 158 |
+
|
| 159 |
+
**How it works:**
|
| 160 |
+
- If `ANTHROPIC_AUTH_TOKEN` is empty (default), no authentication is required (backward compatible)
|
| 161 |
+
- If set, clients must provide the same token via the `ANTHROPIC_AUTH_TOKEN` header
|
| 162 |
+
- For private Hugging Face Spaces, query auth is supported as `?psw=token`, `?psw:token`, or `?psw%3Atoken`
|
| 163 |
+
- The `claude-pick` script automatically reads the token from `.env` if configured
|
| 164 |
+
|
| 165 |
+
**Example usage:**
|
| 166 |
+
```bash
|
| 167 |
+
# With authentication
|
| 168 |
+
ANTHROPIC_AUTH_TOKEN="your-secret-token-here" \
|
| 169 |
+
ANTHROPIC_BASE_URL="http://localhost:8082" claude
|
| 170 |
+
|
| 171 |
+
# Hugging Face private Space (query auth in URL)
|
| 172 |
+
ANTHROPIC_API_KEY="Jack@188" \
|
| 173 |
+
ANTHROPIC_BASE_URL="https://<your-space>.hf.space?psw:Jack%40188" claude
|
| 174 |
+
|
| 175 |
+
# claude-pick automatically uses the configured token
|
| 176 |
+
claude-pick
|
| 177 |
+
```
|
| 178 |
+
|
| 179 |
+
Note: `HEAD /` returning `405 Method Not Allowed` means auth already passed; only `GET /` is implemented.
|
| 180 |
+
|
| 181 |
+
Use this feature if:
|
| 182 |
+
- Running the proxy on a public network
|
| 183 |
+
- Sharing the server with others but restricting access
|
| 184 |
+
- Wanting an additional layer of security
|
| 185 |
+
|
| 186 |
+
</details>
|
| 187 |
+
|
| 188 |
+
### Run It
|
| 189 |
+
|
| 190 |
+
**Terminal 1:** Start the proxy server:
|
| 191 |
+
|
| 192 |
+
```bash
|
| 193 |
+
uv run uvicorn server:app --host 0.0.0.0 --port 8082
|
| 194 |
+
```
|
| 195 |
+
|
| 196 |
+
**Terminal 2:** Run Claude Code:
|
| 197 |
+
|
| 198 |
+
#### Powershell
|
| 199 |
+
```powershell
|
| 200 |
+
$env:ANTHROPIC_BASE_URL="http://localhost:8082?psw:Jack%40188"; $env:ANTHROPIC_API_KEY="Jack@188"; claude
|
| 201 |
+
```
|
| 202 |
+
#### Bash
|
| 203 |
+
```bash
|
| 204 |
+
export ANTHROPIC_BASE_URL="http://localhost:8082?psw:Jack%40188"; export ANTHROPIC_API_KEY="Jack@188"; claude
|
| 205 |
+
```
|
| 206 |
+
|
| 207 |
+
That's it! Claude Code now uses your configured provider for free.
|
| 208 |
+
|
| 209 |
+
### One-Click Factory Reset (Space Admin)
|
| 210 |
+
|
| 211 |
+
Open the admin page:
|
| 212 |
+
|
| 213 |
+
- Local: `http://localhost:8082/admin/factory-reset?psw:Jack%40188`
|
| 214 |
+
- Space: `https://<your-space>.hf.space/admin/factory-reset?psw:Jack%40188`
|
| 215 |
+
|
| 216 |
+
Click **Factory Restart** to clear runtime cache + workspace data and restart the server.
|
| 217 |
+
|
| 218 |
+
<details>
|
| 219 |
+
<summary><b>VSCode Extension Setup</b></summary>
|
| 220 |
+
|
| 221 |
+
1. Start the proxy server (same as above).
|
| 222 |
+
2. Open Settings (`Ctrl + ,`) and search for `claude-code.environmentVariables`.
|
| 223 |
+
3. Click **Edit in settings.json** and add:
|
| 224 |
+
|
| 225 |
+
```json
|
| 226 |
+
"claudeCode.environmentVariables": [
|
| 227 |
+
{ "name": "ANTHROPIC_BASE_URL", "value": "http://localhost:8082" },
|
| 228 |
+
{ "name": "ANTHROPIC_AUTH_TOKEN", "value": "freecc" }
|
| 229 |
+
]
|
| 230 |
+
```
|
| 231 |
+
|
| 232 |
+
4. Reload extensions.
|
| 233 |
+
5. **If you see the login screen**: Click **Anthropic Console**, then authorize. The extension will start working. You may be redirected to buy credits in the browser; ignore it — the extension already works.
|
| 234 |
+
|
| 235 |
+
To switch back to Anthropic models, comment out the added block and reload extensions.
|
| 236 |
+
|
| 237 |
+
</details>
|
| 238 |
+
|
| 239 |
+
<details>
|
| 240 |
+
<summary><b>Multi-Model Support (Model Picker)</b></summary>
|
| 241 |
+
|
| 242 |
+
`claude-pick` is an interactive model selector that lets you choose any model from your active provider each time you launch Claude, without editing `MODEL` in `.env`.
|
| 243 |
+
|
| 244 |
+
https://github.com/user-attachments/assets/9a33c316-90f8-4418-9650-97e7d33ad645
|
| 245 |
+
|
| 246 |
+
**1. Install [fzf](https://github.com/junegunn/fzf)**:
|
| 247 |
+
|
| 248 |
+
```bash
|
| 249 |
+
brew install fzf # macOS/Linux
|
| 250 |
+
```
|
| 251 |
+
|
| 252 |
+
**2. Add the alias to `~/.zshrc` or `~/.bashrc`:**
|
| 253 |
+
|
| 254 |
+
```bash
|
| 255 |
+
alias claude-pick="/absolute/path/to/free-claude-code/claude-pick"
|
| 256 |
+
```
|
| 257 |
+
|
| 258 |
+
Then reload your shell (`source ~/.zshrc` or `source ~/.bashrc`) and run `claude-pick`.
|
| 259 |
+
|
| 260 |
+
**Or use a fixed model alias** (no picker needed):
|
| 261 |
+
|
| 262 |
+
```bash
|
| 263 |
+
alias claude-kimi='ANTHROPIC_BASE_URL="http://localhost:8082" ANTHROPIC_AUTH_TOKEN="freecc:moonshotai/kimi-k2.5" claude'
|
| 264 |
+
```
|
| 265 |
+
|
| 266 |
+
</details>
|
| 267 |
+
|
| 268 |
+
### Install as a Package (no clone needed)
|
| 269 |
+
|
| 270 |
+
```bash
|
| 271 |
+
uv tool install git+https://github.com/Alishahryar1/free-claude-code.git
|
| 272 |
+
fcc-init # creates ~/.config/free-claude-code/.env from the built-in template
|
| 273 |
+
```
|
| 274 |
+
|
| 275 |
+
Edit `~/.config/free-claude-code/.env` with your API keys and model names, then:
|
| 276 |
+
|
| 277 |
+
```bash
|
| 278 |
+
free-claude-code # starts the server
|
| 279 |
+
```
|
| 280 |
+
|
| 281 |
+
> To update: `uv tool upgrade free-claude-code`
|
| 282 |
+
|
| 283 |
+
---
|
| 284 |
+
|
| 285 |
+
## How It Works
|
| 286 |
+
|
| 287 |
+
```
|
| 288 |
+
┌─────────────────┐ ┌──────────────────────┐ ┌──────────────────┐
|
| 289 |
+
│ Claude Code │───────>│ Free Claude Code │───────>│ LLM Provider │
|
| 290 |
+
│ CLI / VSCode │<───────│ Proxy (:8082) │<───────│ NIM / OR / LMS │
|
| 291 |
+
└─────────────────┘ └──────────────────────┘ └──────────────────┘
|
| 292 |
+
Anthropic API OpenAI-compatible
|
| 293 |
+
format (SSE) format (SSE)
|
| 294 |
+
```
|
| 295 |
+
|
| 296 |
+
- **Transparent proxy**: Claude Code sends standard Anthropic API requests; the proxy forwards them to your configured provider
|
| 297 |
+
- **Per-model routing**: Opus / Sonnet / Haiku requests resolve to their model-specific backend, with `MODEL` as fallback
|
| 298 |
+
- **Request optimization**: 5 categories of trivial requests (quota probes, title generation, prefix detection, suggestions, filepath extraction) are intercepted and responded to locally without using API quota
|
| 299 |
+
- **Format translation**: Requests are translated from Anthropic format to the provider's OpenAI-compatible format and streamed back
|
| 300 |
+
- **Thinking tokens**: `<think>` tags and `reasoning_content` fields are converted into native Claude thinking blocks
|
| 301 |
+
|
| 302 |
+
---
|
| 303 |
+
|
| 304 |
+
## Providers
|
| 305 |
+
|
| 306 |
+
| Provider | Cost | Rate Limit | Best For |
|
| 307 |
+
| -------------- | ------------ | ---------- | ------------------------------------ |
|
| 308 |
+
| **NVIDIA NIM** | Free | 40 req/min | Daily driver, generous free tier |
|
| 309 |
+
| **OpenRouter** | Free / Paid | Varies | Model variety, fallback options |
|
| 310 |
+
| **LM Studio** | Free (local) | Unlimited | Privacy, offline use, no rate limits |
|
| 311 |
+
| **llama.cpp** | Free (local) | Unlimited | Lightweight local inference engine |
|
| 312 |
+
|
| 313 |
+
Models use a prefix format: `provider_prefix/model/name`. An invalid prefix causes an error.
|
| 314 |
+
|
| 315 |
+
| Provider | `MODEL` prefix | API Key Variable | Default Base URL |
|
| 316 |
+
| ---------- | ----------------- | -------------------- | ----------------------------- |
|
| 317 |
+
| NVIDIA NIM | `nvidia_nim/...` | `NVIDIA_NIM_API_KEY` | `integrate.api.nvidia.com/v1` |
|
| 318 |
+
| OpenRouter | `open_router/...` | `OPENROUTER_API_KEY` | `openrouter.ai/api/v1` |
|
| 319 |
+
| LM Studio | `lmstudio/...` | (none) | `localhost:1234/v1` |
|
| 320 |
+
| llama.cpp | `llamacpp/...` | (none) | `localhost:8080/v1` |
|
| 321 |
+
|
| 322 |
+
<details>
|
| 323 |
+
<summary><b>NVIDIA NIM models</b></summary>
|
| 324 |
+
|
| 325 |
+
Popular models (full list in [`nvidia_nim_models.json`](nvidia_nim_models.json)):
|
| 326 |
+
|
| 327 |
+
- `nvidia_nim/minimaxai/minimax-m2.5`
|
| 328 |
+
- `nvidia_nim/qwen/qwen3.5-397b-a17b`
|
| 329 |
+
- `nvidia_nim/z-ai/glm5`
|
| 330 |
+
- `nvidia_nim/moonshotai/kimi-k2.5`
|
| 331 |
+
- `nvidia_nim/stepfun-ai/step-3.5-flash`
|
| 332 |
+
|
| 333 |
+
Browse: [build.nvidia.com](https://build.nvidia.com/explore/discover) · Update list: `curl "https://integrate.api.nvidia.com/v1/models" > nvidia_nim_models.json`
|
| 334 |
+
|
| 335 |
+
</details>
|
| 336 |
+
|
| 337 |
+
<details>
|
| 338 |
+
<summary><b>OpenRouter models</b></summary>
|
| 339 |
+
|
| 340 |
+
Popular free models:
|
| 341 |
+
|
| 342 |
+
- `open_router/arcee-ai/trinity-large-preview:free`
|
| 343 |
+
- `open_router/stepfun/step-3.5-flash:free`
|
| 344 |
+
- `open_router/deepseek/deepseek-r1-0528:free`
|
| 345 |
+
- `open_router/openai/gpt-oss-120b:free`
|
| 346 |
+
|
| 347 |
+
Browse: [openrouter.ai/models](https://openrouter.ai/models) · [Free models](https://openrouter.ai/collections/free-models)
|
| 348 |
+
|
| 349 |
+
</details>
|
| 350 |
+
|
| 351 |
+
<details>
|
| 352 |
+
<summary><b>LM Studio models</b></summary>
|
| 353 |
+
|
| 354 |
+
Run models locally with [LM Studio](https://lmstudio.ai). Load a model in the Chat or Developer tab, then set `MODEL` to its identifier.
|
| 355 |
+
|
| 356 |
+
Examples with native tool-use support:
|
| 357 |
+
|
| 358 |
+
- `LiquidAI/LFM2-24B-A2B-GGUF`
|
| 359 |
+
- `unsloth/MiniMax-M2.5-GGUF`
|
| 360 |
+
- `unsloth/GLM-4.7-Flash-GGUF`
|
| 361 |
+
- `unsloth/Qwen3.5-35B-A3B-GGUF`
|
| 362 |
+
|
| 363 |
+
Browse: [model.lmstudio.ai](https://model.lmstudio.ai)
|
| 364 |
+
|
| 365 |
+
</details>
|
| 366 |
+
|
| 367 |
+
<details>
|
| 368 |
+
<summary><b>llama.cpp models</b></summary>
|
| 369 |
+
|
| 370 |
+
Run models locally using `llama-server`. Ensure you have a tool-capable GGUF. Set `MODEL` to whatever arbitrary name you'd like (e.g. `llamacpp/my-model`), as `llama-server` ignores the model name when run via `/v1/messages`.
|
| 371 |
+
|
| 372 |
+
See the Unsloth docs for detailed instructions and capable models:
|
| 373 |
+
[https://unsloth.ai/docs/models/qwen3.5#qwen3.5-small-0.8b-2b-4b-9b](https://unsloth.ai/docs/models/qwen3.5#qwen3.5-small-0.8b-2b-4b-9b)
|
| 374 |
+
|
| 375 |
+
</details>
|
| 376 |
+
|
| 377 |
+
---
|
| 378 |
+
|
| 379 |
+
## Discord Bot
|
| 380 |
+
|
| 381 |
+
Control Claude Code remotely from Discord (or Telegram). Send tasks, watch live progress, and manage multiple concurrent sessions.
|
| 382 |
+
|
| 383 |
+
**Capabilities:**
|
| 384 |
+
|
| 385 |
+
- Tree-based message threading: reply to a message to fork the conversation
|
| 386 |
+
- Session persistence across server restarts
|
| 387 |
+
- Live streaming of thinking tokens, tool calls, and results
|
| 388 |
+
- Unlimited concurrent Claude CLI sessions (concurrency controlled by `PROVIDER_MAX_CONCURRENCY`)
|
| 389 |
+
- Voice notes: send voice messages; they are transcribed and processed as regular prompts
|
| 390 |
+
- Commands: `/stop` (cancel a task; reply to a message to stop only that task), `/clear` (reset all sessions, or reply to clear a branch), `/stats`
|
| 391 |
+
|
| 392 |
+
### Setup
|
| 393 |
+
|
| 394 |
+
1. **Create a Discord Bot**: Go to [Discord Developer Portal](https://discord.com/developers/applications), create an application, add a bot, and copy the token. Enable **Message Content Intent** under Bot settings.
|
| 395 |
+
|
| 396 |
+
2. **Edit `.env`:**
|
| 397 |
+
|
| 398 |
+
```dotenv
|
| 399 |
+
MESSAGING_PLATFORM="discord"
|
| 400 |
+
DISCORD_BOT_TOKEN="your_discord_bot_token"
|
| 401 |
+
ALLOWED_DISCORD_CHANNELS="123456789,987654321"
|
| 402 |
+
```
|
| 403 |
+
|
| 404 |
+
> Enable Developer Mode in Discord (Settings → Advanced), then right-click a channel and "Copy ID". Comma-separate multiple channels. If empty, no channels are allowed.
|
| 405 |
+
|
| 406 |
+
3. **Configure the workspace** (where Claude will operate):
|
| 407 |
+
|
| 408 |
+
```dotenv
|
| 409 |
+
CLAUDE_WORKSPACE="./agent_workspace"
|
| 410 |
+
ALLOWED_DIR="C:/Users/yourname/projects"
|
| 411 |
+
```
|
| 412 |
+
|
| 413 |
+
4. **Start the server:**
|
| 414 |
+
|
| 415 |
+
```bash
|
| 416 |
+
uv run uvicorn server:app --host 0.0.0.0 --port 8082
|
| 417 |
+
```
|
| 418 |
+
|
| 419 |
+
5. **Invite the bot** via OAuth2 URL Generator (scopes: `bot`, permissions: Read Messages, Send Messages, Manage Messages, Read Message History).
|
| 420 |
+
|
| 421 |
+
### Telegram
|
| 422 |
+
|
| 423 |
+
Set `MESSAGING_PLATFORM=telegram` and configure:
|
| 424 |
+
|
| 425 |
+
```dotenv
|
| 426 |
+
TELEGRAM_BOT_TOKEN="123456789:ABCdefGHIjklMNOpqrSTUvwxYZ"
|
| 427 |
+
ALLOWED_TELEGRAM_USER_ID="your_telegram_user_id"
|
| 428 |
+
```
|
| 429 |
+
|
| 430 |
+
Get a token from [@BotFather](https://t.me/BotFather); find your user ID via [@userinfobot](https://t.me/userinfobot).
|
| 431 |
+
|
| 432 |
+
### Voice Notes
|
| 433 |
+
|
| 434 |
+
Send voice messages on Discord or Telegram; they are transcribed and processed as regular prompts.
|
| 435 |
+
|
| 436 |
+
| Backend | Description | API Key |
|
| 437 |
+
| --------------------------- | ------------------------------------------------------------------------------------------------------------- | -------------------- |
|
| 438 |
+
| **Local Whisper** (default) | [Hugging Face Whisper](https://huggingface.co/openai/whisper-large-v3-turbo) — free, offline, CUDA compatible | not required |
|
| 439 |
+
| **NVIDIA NIM** | Whisper/Parakeet models via gRPC | `NVIDIA_NIM_API_KEY` |
|
| 440 |
+
|
| 441 |
+
**Install the voice extras:**
|
| 442 |
+
|
| 443 |
+
```bash
|
| 444 |
+
# If you cloned the repo:
|
| 445 |
+
uv sync --extra voice_local # Local Whisper
|
| 446 |
+
uv sync --extra voice # NVIDIA NIM
|
| 447 |
+
uv sync --extra voice --extra voice_local # Both
|
| 448 |
+
|
| 449 |
+
# If you installed as a package (no clone):
|
| 450 |
+
uv tool install "free-claude-code[voice_local] @ git+https://github.com/Alishahryar1/free-claude-code.git"
|
| 451 |
+
uv tool install "free-claude-code[voice] @ git+https://github.com/Alishahryar1/free-claude-code.git"
|
| 452 |
+
uv tool install "free-claude-code[voice,voice_local] @ git+https://github.com/Alishahryar1/free-claude-code.git"
|
| 453 |
+
```
|
| 454 |
+
|
| 455 |
+
Configure via `WHISPER_DEVICE` (`cpu` | `cuda` | `nvidia_nim`) and `WHISPER_MODEL`. See the [Configuration](#configuration) table for all voice variables and supported model values.
|
| 456 |
+
|
| 457 |
+
---
|
| 458 |
+
|
| 459 |
+
## Configuration
|
| 460 |
+
|
| 461 |
+
### Core
|
| 462 |
+
|
| 463 |
+
| Variable | Description | Default |
|
| 464 |
+
| -------------------- | --------------------------------------------------------------------- | ------------------------------------------------- |
|
| 465 |
+
| `MODEL` | Fallback model (`provider/model/name` format; invalid prefix → error) | `nvidia_nim/stepfun-ai/step-3.5-flash` |
|
| 466 |
+
| `MODEL_OPUS` | Model for Claude Opus requests (falls back to `MODEL`) | `nvidia_nim/z-ai/glm4.7` |
|
| 467 |
+
| `MODEL_SONNET` | Model for Claude Sonnet requests (falls back to `MODEL`) | `open_router/arcee-ai/trinity-large-preview:free` |
|
| 468 |
+
| `MODEL_HAIKU` | Model for Claude Haiku requests (falls back to `MODEL`) | `open_router/stepfun/step-3.5-flash:free` |
|
| 469 |
+
| `NVIDIA_NIM_API_KEY` | NVIDIA API key | required for NIM |
|
| 470 |
+
| `NIM_ENABLE_THINKING` | Send `chat_template_kwargs` + `reasoning_budget` on NIM requests. Enable for thinking models (kimi, nemotron); leave `false` for others (e.g. Mistral) | `false` |
|
| 471 |
+
| `OPENROUTER_API_KEY` | OpenRouter API key | required for OpenRouter |
|
| 472 |
+
| `LM_STUDIO_BASE_URL` | LM Studio server URL | `http://localhost:1234/v1` |
|
| 473 |
+
| `LLAMACPP_BASE_URL` | llama.cpp server URL | `http://localhost:8080/v1` |
|
| 474 |
+
|
| 475 |
+
### Rate Limiting & Timeouts
|
| 476 |
+
|
| 477 |
+
| Variable | Description | Default |
|
| 478 |
+
| -------------------------- | ----------------------------------------- | ------- |
|
| 479 |
+
| `PROVIDER_RATE_LIMIT` | LLM API requests per window | `40` |
|
| 480 |
+
| `PROVIDER_RATE_WINDOW` | Rate limit window (seconds) | `60` |
|
| 481 |
+
| `PROVIDER_MAX_CONCURRENCY` | Max simultaneous open provider streams | `5` |
|
| 482 |
+
| `HTTP_READ_TIMEOUT` | Read timeout for provider requests (s) | `120` |
|
| 483 |
+
| `HTTP_WRITE_TIMEOUT` | Write timeout for provider requests (s) | `10` |
|
| 484 |
+
| `HTTP_CONNECT_TIMEOUT` | Connect timeout for provider requests (s) | `2` |
|
| 485 |
+
|
| 486 |
+
### Messaging & Voice
|
| 487 |
+
|
| 488 |
+
| Variable | Description | Default |
|
| 489 |
+
| -------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------- |
|
| 490 |
+
| `MESSAGING_PLATFORM` | `discord` or `telegram` | `discord` |
|
| 491 |
+
| `DISCORD_BOT_TOKEN` | Discord bot token | `""` |
|
| 492 |
+
| `ALLOWED_DISCORD_CHANNELS` | Comma-separated channel IDs (empty = none allowed) | `""` |
|
| 493 |
+
| `TELEGRAM_BOT_TOKEN` | Telegram bot token | `""` |
|
| 494 |
+
| `ALLOWED_TELEGRAM_USER_ID` | Allowed Telegram user ID | `""` |
|
| 495 |
+
| `CLAUDE_WORKSPACE` | Directory where the agent operates | `./agent_workspace` |
|
| 496 |
+
| `ALLOWED_DIR` | Allowed directories for the agent | `""` |
|
| 497 |
+
| `MESSAGING_RATE_LIMIT` | Messaging messages per window | `1` |
|
| 498 |
+
| `MESSAGING_RATE_WINDOW` | Messaging window (seconds) | `1` |
|
| 499 |
+
| `VOICE_NOTE_ENABLED` | Enable voice note handling | `true` |
|
| 500 |
+
| `WHISPER_DEVICE` | `cpu` \| `cuda` \| `nvidia_nim` | `cpu` |
|
| 501 |
+
| `WHISPER_MODEL` | Whisper model (local: `tiny`/`base`/`small`/`medium`/`large-v2`/`large-v3`/`large-v3-turbo`; NIM: `openai/whisper-large-v3`, `nvidia/parakeet-ctc-1.1b-asr`, etc.) | `base` |
|
| 502 |
+
| `HF_TOKEN` | Hugging Face token for faster downloads (local Whisper, optional) | — |
|
| 503 |
+
|
| 504 |
+
<details>
|
| 505 |
+
<summary><b>Advanced: Request optimization flags</b></summary>
|
| 506 |
+
|
| 507 |
+
These are enabled by default and intercept trivial Claude Code requests locally to save API quota.
|
| 508 |
+
|
| 509 |
+
| Variable | Description | Default |
|
| 510 |
+
| --------------------------------- | ------------------------------ | ------- |
|
| 511 |
+
| `FAST_PREFIX_DETECTION` | Enable fast prefix detection | `true` |
|
| 512 |
+
| `ENABLE_NETWORK_PROBE_MOCK` | Mock network probe requests | `true` |
|
| 513 |
+
| `ENABLE_TITLE_GENERATION_SKIP` | Skip title generation requests | `true` |
|
| 514 |
+
| `ENABLE_SUGGESTION_MODE_SKIP` | Skip suggestion mode requests | `true` |
|
| 515 |
+
| `ENABLE_FILEPATH_EXTRACTION_MOCK` | Mock filepath extraction | `true` |
|
| 516 |
+
|
| 517 |
+
</details>
|
| 518 |
+
|
| 519 |
+
See [`.env.example`](.env.example) for all supported parameters.
|
| 520 |
+
|
| 521 |
+
---
|
| 522 |
+
|
| 523 |
+
## Development
|
| 524 |
+
|
| 525 |
+
### Project Structure
|
| 526 |
+
|
| 527 |
+
```
|
| 528 |
+
free-claude-code/
|
| 529 |
+
├── server.py # Entry point
|
| 530 |
+
├── api/ # FastAPI routes, request detection, optimization handlers
|
| 531 |
+
├── providers/ # BaseProvider, OpenAICompatibleProvider, NIM, OpenRouter, LM Studio, llamacpp
|
| 532 |
+
│ └── common/ # Shared utils (SSE builder, message converter, parsers, error mapping)
|
| 533 |
+
├── messaging/ # MessagingPlatform ABC + Discord/Telegram bots, session management
|
| 534 |
+
├── config/ # Settings, NIM config, logging
|
| 535 |
+
├── cli/ # CLI session and process management
|
| 536 |
+
└── tests/ # Pytest test suite
|
| 537 |
+
```
|
| 538 |
+
|
| 539 |
+
### Commands
|
| 540 |
+
|
| 541 |
+
```bash
|
| 542 |
+
uv run ruff format # Format code
|
| 543 |
+
uv run ruff check # Lint
|
| 544 |
+
uv run ty check # Type checking
|
| 545 |
+
uv run pytest # Run tests
|
| 546 |
+
```
|
| 547 |
+
|
| 548 |
+
### Extending
|
| 549 |
+
|
| 550 |
+
**Adding an OpenAI-compatible provider** (Groq, Together AI, etc.) — extend `OpenAICompatibleProvider`:
|
| 551 |
+
|
| 552 |
+
```python
|
| 553 |
+
from providers.openai_compat import OpenAICompatibleProvider
|
| 554 |
+
from providers.base import ProviderConfig
|
| 555 |
+
|
| 556 |
+
class MyProvider(OpenAICompatibleProvider):
|
| 557 |
+
def __init__(self, config: ProviderConfig):
|
| 558 |
+
super().__init__(config, provider_name="MYPROVIDER",
|
| 559 |
+
base_url="https://api.example.com/v1", api_key=config.api_key)
|
| 560 |
+
```
|
| 561 |
+
|
| 562 |
+
**Adding a fully custom provider** — extend `BaseProvider` directly and implement `stream_response()`.
|
| 563 |
+
|
| 564 |
+
**Adding a messaging platform** — extend `MessagingPlatform` in `messaging/` and implement `start()`, `stop()`, `send_message()`, `edit_message()`, and `on_message()`.
|
| 565 |
+
|
| 566 |
+
---
|
| 567 |
+
|
| 568 |
+
## Contributing
|
| 569 |
+
|
| 570 |
+
- Report bugs or suggest features via [Issues](https://github.com/Alishahryar1/free-claude-code/issues)
|
| 571 |
+
- Add new LLM providers (Groq, Together AI, etc.)
|
| 572 |
+
- Add new messaging platforms (Slack, etc.)
|
| 573 |
+
- Improve test coverage
|
| 574 |
+
- Not accepting Docker integration PRs for now
|
| 575 |
+
|
| 576 |
+
```bash
|
| 577 |
+
git checkout -b my-feature
|
| 578 |
+
uv run ruff format && uv run ruff check && uv run ty check && uv run pytest
|
| 579 |
+
# Open a pull request
|
| 580 |
+
```
|
| 581 |
+
|
| 582 |
+
---
|
| 583 |
+
|
| 584 |
+
## License
|
| 585 |
+
|
| 586 |
+
MIT License. See [LICENSE](LICENSE) for details.
|
| 587 |
+
|
| 588 |
+
Built with [FastAPI](https://fastapi.tiangolo.com/), [OpenAI Python SDK](https://github.com/openai/openai-python), [discord.py](https://github.com/Rapptz/discord.py), and [python-telegram-bot](https://python-telegram-bot.org/).
|
Claude_Code/api/__init__.py
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""API layer for Claude Code Proxy."""
|
| 2 |
+
|
| 3 |
+
from .app import app, create_app
|
| 4 |
+
from .dependencies import get_provider, get_provider_for_type
|
| 5 |
+
from .models import (
|
| 6 |
+
MessagesRequest,
|
| 7 |
+
MessagesResponse,
|
| 8 |
+
TokenCountRequest,
|
| 9 |
+
TokenCountResponse,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
"MessagesRequest",
|
| 14 |
+
"MessagesResponse",
|
| 15 |
+
"TokenCountRequest",
|
| 16 |
+
"TokenCountResponse",
|
| 17 |
+
"app",
|
| 18 |
+
"create_app",
|
| 19 |
+
"get_provider",
|
| 20 |
+
"get_provider_for_type",
|
| 21 |
+
]
|
Claude_Code/api/app.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI application factory and configuration."""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import os
|
| 5 |
+
from contextlib import asynccontextmanager
|
| 6 |
+
|
| 7 |
+
from fastapi import FastAPI, HTTPException, Request
|
| 8 |
+
from fastapi.responses import JSONResponse
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
from config.logging_config import configure_logging
|
| 12 |
+
from config.settings import get_settings
|
| 13 |
+
from providers.exceptions import ProviderError
|
| 14 |
+
|
| 15 |
+
from .dependencies import cleanup_provider, validate_request_api_key
|
| 16 |
+
from .routes import router
|
| 17 |
+
|
| 18 |
+
# Opt-in to future behavior for python-telegram-bot
|
| 19 |
+
os.environ["PTB_TIMEDELTA"] = "1"
|
| 20 |
+
|
| 21 |
+
# Configure logging first (before any module logs)
|
| 22 |
+
_settings = get_settings()
|
| 23 |
+
configure_logging(_settings.log_file)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
_SHUTDOWN_TIMEOUT_S = 5.0
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def _normalize_malformed_query_base_url_request(request: Request) -> None:
|
| 30 |
+
"""Normalize malformed request targets when base URL contains query auth.
|
| 31 |
+
|
| 32 |
+
Some clients concatenate paths onto a base URL containing query params as plain
|
| 33 |
+
strings, producing targets like:
|
| 34 |
+
/?psw:token/v1/messages?beta=true
|
| 35 |
+
This rewrites them to:
|
| 36 |
+
/v1/messages?psw:token&beta=true
|
| 37 |
+
"""
|
| 38 |
+
if request.scope.get("path") != "/":
|
| 39 |
+
return
|
| 40 |
+
|
| 41 |
+
raw_query_bytes = request.scope.get("query_string", b"")
|
| 42 |
+
raw_query = raw_query_bytes.decode("utf-8", errors="ignore")
|
| 43 |
+
if not raw_query or "/v1/" not in raw_query:
|
| 44 |
+
return
|
| 45 |
+
|
| 46 |
+
auth_part, _, remainder = raw_query.partition("/v1/")
|
| 47 |
+
if not auth_part or not remainder:
|
| 48 |
+
return
|
| 49 |
+
|
| 50 |
+
if "?" in remainder:
|
| 51 |
+
path_suffix, trailing_query = remainder.split("?", 1)
|
| 52 |
+
else:
|
| 53 |
+
path_suffix, trailing_query = remainder, ""
|
| 54 |
+
|
| 55 |
+
new_path = f"/v1/{path_suffix}"
|
| 56 |
+
new_query = auth_part if not trailing_query else f"{auth_part}&{trailing_query}"
|
| 57 |
+
|
| 58 |
+
request.scope["path"] = new_path
|
| 59 |
+
request.scope["raw_path"] = new_path.encode("utf-8")
|
| 60 |
+
request.scope["query_string"] = new_query.encode("utf-8")
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
async def _best_effort(
|
| 64 |
+
name: str, awaitable, timeout_s: float = _SHUTDOWN_TIMEOUT_S
|
| 65 |
+
) -> None:
|
| 66 |
+
"""Run a shutdown step with timeout; never raise to callers."""
|
| 67 |
+
try:
|
| 68 |
+
await asyncio.wait_for(awaitable, timeout=timeout_s)
|
| 69 |
+
except TimeoutError:
|
| 70 |
+
logger.warning(f"Shutdown step timed out: {name} ({timeout_s}s)")
|
| 71 |
+
except Exception as e:
|
| 72 |
+
logger.warning(f"Shutdown step failed: {name}: {type(e).__name__}: {e}")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
@asynccontextmanager
|
| 76 |
+
async def lifespan(app: FastAPI):
|
| 77 |
+
"""Application lifespan manager."""
|
| 78 |
+
settings = get_settings()
|
| 79 |
+
logger.info("Starting Claude Code Proxy...")
|
| 80 |
+
|
| 81 |
+
# Initialize messaging platform if configured
|
| 82 |
+
messaging_platform = None
|
| 83 |
+
message_handler = None
|
| 84 |
+
cli_manager = None
|
| 85 |
+
|
| 86 |
+
try:
|
| 87 |
+
# Use the messaging factory to create the right platform
|
| 88 |
+
from messaging.platforms.factory import create_messaging_platform
|
| 89 |
+
|
| 90 |
+
messaging_platform = create_messaging_platform(
|
| 91 |
+
platform_type=settings.messaging_platform,
|
| 92 |
+
bot_token=settings.telegram_bot_token,
|
| 93 |
+
allowed_user_id=settings.allowed_telegram_user_id,
|
| 94 |
+
discord_bot_token=settings.discord_bot_token,
|
| 95 |
+
allowed_discord_channels=settings.allowed_discord_channels,
|
| 96 |
+
)
|
| 97 |
+
|
| 98 |
+
if messaging_platform:
|
| 99 |
+
from cli.manager import CLISessionManager
|
| 100 |
+
from messaging.handler import ClaudeMessageHandler
|
| 101 |
+
from messaging.session import SessionStore
|
| 102 |
+
|
| 103 |
+
# Setup workspace - CLI runs in allowed_dir if set (e.g. project root)
|
| 104 |
+
workspace = (
|
| 105 |
+
os.path.abspath(settings.allowed_dir)
|
| 106 |
+
if settings.allowed_dir
|
| 107 |
+
else os.getcwd()
|
| 108 |
+
)
|
| 109 |
+
os.makedirs(workspace, exist_ok=True)
|
| 110 |
+
|
| 111 |
+
# Session data stored in agent_workspace
|
| 112 |
+
data_path = os.path.abspath(settings.claude_workspace)
|
| 113 |
+
os.makedirs(data_path, exist_ok=True)
|
| 114 |
+
|
| 115 |
+
api_url = f"http://{settings.host}:{settings.port}/v1"
|
| 116 |
+
allowed_dirs = [workspace] if settings.allowed_dir else []
|
| 117 |
+
plans_dir_abs = os.path.abspath(
|
| 118 |
+
os.path.join(settings.claude_workspace, "plans")
|
| 119 |
+
)
|
| 120 |
+
plans_directory = os.path.relpath(plans_dir_abs, workspace)
|
| 121 |
+
cli_manager = CLISessionManager(
|
| 122 |
+
workspace_path=workspace,
|
| 123 |
+
api_url=api_url,
|
| 124 |
+
allowed_dirs=allowed_dirs,
|
| 125 |
+
plans_directory=plans_directory,
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
# Initialize session store
|
| 129 |
+
session_store = SessionStore(
|
| 130 |
+
storage_path=os.path.join(data_path, "sessions.json")
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
# Create and register message handler
|
| 134 |
+
message_handler = ClaudeMessageHandler(
|
| 135 |
+
platform=messaging_platform,
|
| 136 |
+
cli_manager=cli_manager,
|
| 137 |
+
session_store=session_store,
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
# Restore tree state if available
|
| 141 |
+
saved_trees = session_store.get_all_trees()
|
| 142 |
+
if saved_trees:
|
| 143 |
+
logger.info(f"Restoring {len(saved_trees)} conversation trees...")
|
| 144 |
+
from messaging.trees.queue_manager import TreeQueueManager
|
| 145 |
+
|
| 146 |
+
message_handler.replace_tree_queue(
|
| 147 |
+
TreeQueueManager.from_dict(
|
| 148 |
+
{
|
| 149 |
+
"trees": saved_trees,
|
| 150 |
+
"node_to_tree": session_store.get_node_mapping(),
|
| 151 |
+
},
|
| 152 |
+
queue_update_callback=message_handler.update_queue_positions,
|
| 153 |
+
node_started_callback=message_handler.mark_node_processing,
|
| 154 |
+
)
|
| 155 |
+
)
|
| 156 |
+
# Reconcile restored state - anything PENDING/IN_PROGRESS is lost across restart
|
| 157 |
+
if message_handler.tree_queue.cleanup_stale_nodes() > 0:
|
| 158 |
+
# Sync back and save
|
| 159 |
+
tree_data = message_handler.tree_queue.to_dict()
|
| 160 |
+
session_store.sync_from_tree_data(
|
| 161 |
+
tree_data["trees"], tree_data["node_to_tree"]
|
| 162 |
+
)
|
| 163 |
+
|
| 164 |
+
# Wire up the handler
|
| 165 |
+
messaging_platform.on_message(message_handler.handle_message)
|
| 166 |
+
|
| 167 |
+
# Start the platform
|
| 168 |
+
await messaging_platform.start()
|
| 169 |
+
logger.info(
|
| 170 |
+
f"{messaging_platform.name} platform started with message handler"
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
except ImportError as e:
|
| 174 |
+
logger.warning(f"Messaging module import error: {e}")
|
| 175 |
+
except Exception as e:
|
| 176 |
+
logger.error(f"Failed to start messaging platform: {e}")
|
| 177 |
+
import traceback
|
| 178 |
+
|
| 179 |
+
logger.error(traceback.format_exc())
|
| 180 |
+
|
| 181 |
+
# Store in app state for access in routes
|
| 182 |
+
app.state.messaging_platform = messaging_platform
|
| 183 |
+
app.state.message_handler = message_handler
|
| 184 |
+
app.state.cli_manager = cli_manager
|
| 185 |
+
|
| 186 |
+
yield
|
| 187 |
+
|
| 188 |
+
# Cleanup
|
| 189 |
+
if message_handler and hasattr(message_handler, "session_store"):
|
| 190 |
+
try:
|
| 191 |
+
message_handler.session_store.flush_pending_save()
|
| 192 |
+
except Exception as e:
|
| 193 |
+
logger.warning(f"Session store flush on shutdown: {e}")
|
| 194 |
+
logger.info("Shutdown requested, cleaning up...")
|
| 195 |
+
if messaging_platform:
|
| 196 |
+
await _best_effort("messaging_platform.stop", messaging_platform.stop())
|
| 197 |
+
if cli_manager:
|
| 198 |
+
await _best_effort("cli_manager.stop_all", cli_manager.stop_all())
|
| 199 |
+
await _best_effort("cleanup_provider", cleanup_provider())
|
| 200 |
+
|
| 201 |
+
# Ensure background limiter worker doesn't keep the loop alive.
|
| 202 |
+
try:
|
| 203 |
+
from messaging.limiter import MessagingRateLimiter
|
| 204 |
+
|
| 205 |
+
await _best_effort(
|
| 206 |
+
"MessagingRateLimiter.shutdown_instance",
|
| 207 |
+
MessagingRateLimiter.shutdown_instance(),
|
| 208 |
+
timeout_s=2.0,
|
| 209 |
+
)
|
| 210 |
+
except Exception:
|
| 211 |
+
# Limiter may never have been imported/initialized.
|
| 212 |
+
pass
|
| 213 |
+
|
| 214 |
+
logger.info("Server shut down cleanly")
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
def create_app() -> FastAPI:
|
| 218 |
+
"""Create and configure the FastAPI application."""
|
| 219 |
+
app = FastAPI(
|
| 220 |
+
title="Claude Code Proxy",
|
| 221 |
+
version="2.0.0",
|
| 222 |
+
lifespan=lifespan,
|
| 223 |
+
)
|
| 224 |
+
|
| 225 |
+
@app.middleware("http")
|
| 226 |
+
async def enforce_api_key(request: Request, call_next):
|
| 227 |
+
"""Enforce API key for every request before routing/method matching."""
|
| 228 |
+
_normalize_malformed_query_base_url_request(request)
|
| 229 |
+
try:
|
| 230 |
+
validate_request_api_key(request, get_settings())
|
| 231 |
+
except HTTPException as exc:
|
| 232 |
+
return JSONResponse(
|
| 233 |
+
status_code=exc.status_code,
|
| 234 |
+
content={"detail": exc.detail},
|
| 235 |
+
)
|
| 236 |
+
return await call_next(request)
|
| 237 |
+
|
| 238 |
+
# Register routes
|
| 239 |
+
app.include_router(router)
|
| 240 |
+
|
| 241 |
+
# Exception handlers
|
| 242 |
+
@app.exception_handler(ProviderError)
|
| 243 |
+
async def provider_error_handler(request: Request, exc: ProviderError):
|
| 244 |
+
"""Handle provider-specific errors and return Anthropic format."""
|
| 245 |
+
logger.error(f"Provider Error: {exc.error_type} - {exc.message}")
|
| 246 |
+
return JSONResponse(
|
| 247 |
+
status_code=exc.status_code,
|
| 248 |
+
content=exc.to_anthropic_format(),
|
| 249 |
+
)
|
| 250 |
+
|
| 251 |
+
@app.exception_handler(Exception)
|
| 252 |
+
async def general_error_handler(request: Request, exc: Exception):
|
| 253 |
+
"""Handle general errors and return Anthropic format."""
|
| 254 |
+
logger.error(f"General Error: {exc!s}")
|
| 255 |
+
import traceback
|
| 256 |
+
|
| 257 |
+
logger.error(traceback.format_exc())
|
| 258 |
+
return JSONResponse(
|
| 259 |
+
status_code=500,
|
| 260 |
+
content={
|
| 261 |
+
"type": "error",
|
| 262 |
+
"error": {
|
| 263 |
+
"type": "api_error",
|
| 264 |
+
"message": "An unexpected error occurred.",
|
| 265 |
+
},
|
| 266 |
+
},
|
| 267 |
+
)
|
| 268 |
+
|
| 269 |
+
return app
|
| 270 |
+
|
| 271 |
+
|
| 272 |
+
# Default app instance for uvicorn
|
| 273 |
+
app = create_app()
|
Claude_Code/api/command_utils.py
ADDED
|
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Command parsing utilities for API optimizations."""
|
| 2 |
+
|
| 3 |
+
import shlex
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def extract_command_prefix(command: str) -> str:
|
| 7 |
+
"""Extract the command prefix for fast prefix detection.
|
| 8 |
+
|
| 9 |
+
Parses a shell command safely, handling environment variables and
|
| 10 |
+
command injection attempts. Returns the command prefix suitable
|
| 11 |
+
for quick identification.
|
| 12 |
+
|
| 13 |
+
Returns:
|
| 14 |
+
Command prefix (e.g., "git", "git commit", "npm install")
|
| 15 |
+
or "none" if no valid command found
|
| 16 |
+
"""
|
| 17 |
+
if "`" in command or "$(" in command:
|
| 18 |
+
return "command_injection_detected"
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
parts = shlex.split(command, posix=False)
|
| 22 |
+
if not parts:
|
| 23 |
+
return "none"
|
| 24 |
+
|
| 25 |
+
env_prefix = []
|
| 26 |
+
cmd_start = 0
|
| 27 |
+
for i, part in enumerate(parts):
|
| 28 |
+
if "=" in part and not part.startswith("-"):
|
| 29 |
+
env_prefix.append(part)
|
| 30 |
+
cmd_start = i + 1
|
| 31 |
+
else:
|
| 32 |
+
break
|
| 33 |
+
|
| 34 |
+
if cmd_start >= len(parts):
|
| 35 |
+
return "none"
|
| 36 |
+
|
| 37 |
+
cmd_parts = parts[cmd_start:]
|
| 38 |
+
if not cmd_parts:
|
| 39 |
+
return "none"
|
| 40 |
+
|
| 41 |
+
first_word = cmd_parts[0]
|
| 42 |
+
two_word_commands = {
|
| 43 |
+
"git",
|
| 44 |
+
"npm",
|
| 45 |
+
"docker",
|
| 46 |
+
"kubectl",
|
| 47 |
+
"cargo",
|
| 48 |
+
"go",
|
| 49 |
+
"pip",
|
| 50 |
+
"yarn",
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
if first_word in two_word_commands and len(cmd_parts) > 1:
|
| 54 |
+
second_word = cmd_parts[1]
|
| 55 |
+
if not second_word.startswith("-"):
|
| 56 |
+
return f"{first_word} {second_word}"
|
| 57 |
+
return first_word
|
| 58 |
+
return first_word if not env_prefix else " ".join(env_prefix) + " " + first_word
|
| 59 |
+
|
| 60 |
+
except ValueError:
|
| 61 |
+
return command.split()[0] if command.split() else "none"
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
def extract_filepaths_from_command(command: str, output: str) -> str:
|
| 65 |
+
"""Extract file paths from a command locally without API call.
|
| 66 |
+
|
| 67 |
+
Determines if the command reads file contents and extracts paths accordingly.
|
| 68 |
+
Commands like ls/dir/find just list files, so return empty.
|
| 69 |
+
Commands like cat/head/tail actually read contents, so extract the file path.
|
| 70 |
+
|
| 71 |
+
Returns:
|
| 72 |
+
Filepath extraction result in <filepaths> format
|
| 73 |
+
"""
|
| 74 |
+
listing_commands = {
|
| 75 |
+
"ls",
|
| 76 |
+
"dir",
|
| 77 |
+
"find",
|
| 78 |
+
"tree",
|
| 79 |
+
"pwd",
|
| 80 |
+
"cd",
|
| 81 |
+
"mkdir",
|
| 82 |
+
"rmdir",
|
| 83 |
+
"rm",
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
reading_commands = {"cat", "head", "tail", "less", "more", "bat", "type"}
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
parts = shlex.split(command, posix=False)
|
| 90 |
+
if not parts:
|
| 91 |
+
return "<filepaths>\n</filepaths>"
|
| 92 |
+
|
| 93 |
+
base_cmd = parts[0].split("/")[-1].split("\\")[-1].lower()
|
| 94 |
+
|
| 95 |
+
if base_cmd in listing_commands:
|
| 96 |
+
return "<filepaths>\n</filepaths>"
|
| 97 |
+
|
| 98 |
+
if base_cmd in reading_commands:
|
| 99 |
+
filepaths = []
|
| 100 |
+
for part in parts[1:]:
|
| 101 |
+
if part.startswith("-"):
|
| 102 |
+
continue
|
| 103 |
+
filepaths.append(part)
|
| 104 |
+
|
| 105 |
+
if filepaths:
|
| 106 |
+
paths_str = "\n".join(filepaths)
|
| 107 |
+
return f"<filepaths>\n{paths_str}\n</filepaths>"
|
| 108 |
+
return "<filepaths>\n</filepaths>"
|
| 109 |
+
|
| 110 |
+
if base_cmd == "grep":
|
| 111 |
+
flags_with_args = {"-e", "-f", "-m", "-A", "-B", "-C"}
|
| 112 |
+
pattern_provided_via_flag = False
|
| 113 |
+
positional: list[str] = []
|
| 114 |
+
|
| 115 |
+
skip_next = False
|
| 116 |
+
for part in parts[1:]:
|
| 117 |
+
if skip_next:
|
| 118 |
+
skip_next = False
|
| 119 |
+
continue
|
| 120 |
+
|
| 121 |
+
if part.startswith("-"):
|
| 122 |
+
if part in flags_with_args:
|
| 123 |
+
if part in {"-e", "-f"}:
|
| 124 |
+
pattern_provided_via_flag = True
|
| 125 |
+
skip_next = True
|
| 126 |
+
continue
|
| 127 |
+
|
| 128 |
+
positional.append(part)
|
| 129 |
+
|
| 130 |
+
filepaths = positional if pattern_provided_via_flag else positional[1:]
|
| 131 |
+
if filepaths:
|
| 132 |
+
paths_str = "\n".join(filepaths)
|
| 133 |
+
return f"<filepaths>\n{paths_str}\n</filepaths>"
|
| 134 |
+
return "<filepaths>\n</filepaths>"
|
| 135 |
+
|
| 136 |
+
return "<filepaths>\n</filepaths>"
|
| 137 |
+
|
| 138 |
+
except Exception:
|
| 139 |
+
return "<filepaths>\n</filepaths>"
|
Claude_Code/api/dependencies.py
ADDED
|
@@ -0,0 +1,226 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Dependency injection for FastAPI."""
|
| 2 |
+
|
| 3 |
+
from urllib.parse import unquote_plus
|
| 4 |
+
|
| 5 |
+
from fastapi import Depends, HTTPException, Request
|
| 6 |
+
from loguru import logger
|
| 7 |
+
|
| 8 |
+
from config.settings import Settings
|
| 9 |
+
from config.settings import get_settings as _get_settings
|
| 10 |
+
from providers.base import BaseProvider, ProviderConfig
|
| 11 |
+
from providers.common import get_user_facing_error_message
|
| 12 |
+
from providers.exceptions import AuthenticationError
|
| 13 |
+
from providers.llamacpp import LlamaCppProvider
|
| 14 |
+
from providers.lmstudio import LMStudioProvider
|
| 15 |
+
from providers.nvidia_nim import NVIDIA_NIM_BASE_URL, NvidiaNimProvider
|
| 16 |
+
from providers.open_router import OPENROUTER_BASE_URL, OpenRouterProvider
|
| 17 |
+
|
| 18 |
+
# Provider registry: keyed by provider type string, lazily populated
|
| 19 |
+
_providers: dict[str, BaseProvider] = {}
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def get_settings() -> Settings:
|
| 23 |
+
"""Get application settings via dependency injection."""
|
| 24 |
+
return _get_settings()
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def _create_provider_for_type(provider_type: str, settings: Settings) -> BaseProvider:
|
| 28 |
+
"""Construct and return a new provider instance for the given provider type."""
|
| 29 |
+
if provider_type == "nvidia_nim":
|
| 30 |
+
if not settings.nvidia_nim_api_key or not settings.nvidia_nim_api_key.strip():
|
| 31 |
+
raise AuthenticationError(
|
| 32 |
+
"NVIDIA_NIM_API_KEY is not set. Add it to your .env file. "
|
| 33 |
+
"Get a key at https://build.nvidia.com/settings/api-keys"
|
| 34 |
+
)
|
| 35 |
+
config = ProviderConfig(
|
| 36 |
+
api_key=settings.nvidia_nim_api_key,
|
| 37 |
+
base_url=NVIDIA_NIM_BASE_URL,
|
| 38 |
+
rate_limit=settings.provider_rate_limit,
|
| 39 |
+
rate_window=settings.provider_rate_window,
|
| 40 |
+
max_concurrency=settings.provider_max_concurrency,
|
| 41 |
+
http_read_timeout=settings.http_read_timeout,
|
| 42 |
+
http_write_timeout=settings.http_write_timeout,
|
| 43 |
+
http_connect_timeout=settings.http_connect_timeout,
|
| 44 |
+
)
|
| 45 |
+
return NvidiaNimProvider(config, nim_settings=settings.nim)
|
| 46 |
+
if provider_type == "open_router":
|
| 47 |
+
if not settings.open_router_api_key or not settings.open_router_api_key.strip():
|
| 48 |
+
raise AuthenticationError(
|
| 49 |
+
"OPENROUTER_API_KEY is not set. Add it to your .env file. "
|
| 50 |
+
"Get a key at https://openrouter.ai/keys"
|
| 51 |
+
)
|
| 52 |
+
config = ProviderConfig(
|
| 53 |
+
api_key=settings.open_router_api_key,
|
| 54 |
+
base_url=OPENROUTER_BASE_URL,
|
| 55 |
+
rate_limit=settings.provider_rate_limit,
|
| 56 |
+
rate_window=settings.provider_rate_window,
|
| 57 |
+
max_concurrency=settings.provider_max_concurrency,
|
| 58 |
+
http_read_timeout=settings.http_read_timeout,
|
| 59 |
+
http_write_timeout=settings.http_write_timeout,
|
| 60 |
+
http_connect_timeout=settings.http_connect_timeout,
|
| 61 |
+
)
|
| 62 |
+
return OpenRouterProvider(config)
|
| 63 |
+
if provider_type == "lmstudio":
|
| 64 |
+
config = ProviderConfig(
|
| 65 |
+
api_key="lm-studio",
|
| 66 |
+
base_url=settings.lm_studio_base_url,
|
| 67 |
+
rate_limit=settings.provider_rate_limit,
|
| 68 |
+
rate_window=settings.provider_rate_window,
|
| 69 |
+
max_concurrency=settings.provider_max_concurrency,
|
| 70 |
+
http_read_timeout=settings.http_read_timeout,
|
| 71 |
+
http_write_timeout=settings.http_write_timeout,
|
| 72 |
+
http_connect_timeout=settings.http_connect_timeout,
|
| 73 |
+
)
|
| 74 |
+
return LMStudioProvider(config)
|
| 75 |
+
if provider_type == "llamacpp":
|
| 76 |
+
config = ProviderConfig(
|
| 77 |
+
api_key="llamacpp",
|
| 78 |
+
base_url=settings.llamacpp_base_url,
|
| 79 |
+
rate_limit=settings.provider_rate_limit,
|
| 80 |
+
rate_window=settings.provider_rate_window,
|
| 81 |
+
max_concurrency=settings.provider_max_concurrency,
|
| 82 |
+
http_read_timeout=settings.http_read_timeout,
|
| 83 |
+
http_write_timeout=settings.http_write_timeout,
|
| 84 |
+
http_connect_timeout=settings.http_connect_timeout,
|
| 85 |
+
)
|
| 86 |
+
return LlamaCppProvider(config)
|
| 87 |
+
logger.error(
|
| 88 |
+
"Unknown provider_type: '{}'. Supported: 'nvidia_nim', 'open_router', 'lmstudio', 'llamacpp'",
|
| 89 |
+
provider_type,
|
| 90 |
+
)
|
| 91 |
+
raise ValueError(
|
| 92 |
+
f"Unknown provider_type: '{provider_type}'. "
|
| 93 |
+
f"Supported: 'nvidia_nim', 'open_router', 'lmstudio', 'llamacpp'"
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def get_provider_for_type(provider_type: str) -> BaseProvider:
|
| 98 |
+
"""Get or create a provider for the given provider type.
|
| 99 |
+
|
| 100 |
+
Providers are cached in the registry and reused across requests.
|
| 101 |
+
"""
|
| 102 |
+
if provider_type not in _providers:
|
| 103 |
+
try:
|
| 104 |
+
_providers[provider_type] = _create_provider_for_type(
|
| 105 |
+
provider_type, get_settings()
|
| 106 |
+
)
|
| 107 |
+
except AuthenticationError as e:
|
| 108 |
+
raise HTTPException(
|
| 109 |
+
status_code=503, detail=get_user_facing_error_message(e)
|
| 110 |
+
) from e
|
| 111 |
+
logger.info("Provider initialized: {}", provider_type)
|
| 112 |
+
return _providers[provider_type]
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
def validate_request_api_key(request: Request, settings: Settings) -> None:
|
| 116 |
+
"""Validate a request against configured server API key.
|
| 117 |
+
|
| 118 |
+
Checks `x-api-key` header, `Authorization: Bearer ...`, or query parameter `psw`
|
| 119 |
+
against `Settings.anthropic_auth_token`. If `ANTHROPIC_AUTH_TOKEN` is empty, this is a no-op.
|
| 120 |
+
|
| 121 |
+
Supports Hugging Face Spaces private deployments via query parameter authentication:
|
| 122 |
+
- Append `?psw=your-token` to the base URL
|
| 123 |
+
- Or `?psw:your-token` (URL-encoded colon becomes %3A)
|
| 124 |
+
"""
|
| 125 |
+
anthropic_auth_token = settings.anthropic_auth_token
|
| 126 |
+
if not anthropic_auth_token:
|
| 127 |
+
# No API key configured -> allow
|
| 128 |
+
return
|
| 129 |
+
|
| 130 |
+
# Allow Hugging Face private Space signed browser requests for UI pages.
|
| 131 |
+
# This keeps API routes protected while avoiding 401 on Space shell probes.
|
| 132 |
+
if _is_hf_signed_page_request(request):
|
| 133 |
+
return
|
| 134 |
+
|
| 135 |
+
token = None
|
| 136 |
+
|
| 137 |
+
# Check headers first (preferred)
|
| 138 |
+
header = (
|
| 139 |
+
request.headers.get("x-api-key")
|
| 140 |
+
or request.headers.get("authorization")
|
| 141 |
+
or request.headers.get("anthropic-auth-token")
|
| 142 |
+
)
|
| 143 |
+
if header:
|
| 144 |
+
# Support both raw key in X-API-Key and Bearer token in Authorization
|
| 145 |
+
token = header
|
| 146 |
+
if header.lower().startswith("bearer "):
|
| 147 |
+
token = header.split(" ", 1)[1]
|
| 148 |
+
# Strip anything after the first colon to handle tokens with appended model names
|
| 149 |
+
if token and ":" in token:
|
| 150 |
+
token = token.split(":", 1)[0]
|
| 151 |
+
else:
|
| 152 |
+
token = _extract_query_token(request)
|
| 153 |
+
|
| 154 |
+
if not token:
|
| 155 |
+
raise HTTPException(status_code=401, detail="Missing API key")
|
| 156 |
+
|
| 157 |
+
if token != anthropic_auth_token:
|
| 158 |
+
raise HTTPException(status_code=401, detail="Invalid API key")
|
| 159 |
+
|
| 160 |
+
|
| 161 |
+
def _extract_query_token(request: Request) -> str | None:
|
| 162 |
+
"""Extract auth token from query string for private proxy deployments."""
|
| 163 |
+
query_params = request.query_params
|
| 164 |
+
if "psw" in query_params:
|
| 165 |
+
token = query_params["psw"]
|
| 166 |
+
if token and ":" in token:
|
| 167 |
+
return token.split(":", 1)[0]
|
| 168 |
+
return token or None
|
| 169 |
+
|
| 170 |
+
raw_query_bytes = request.scope.get("query_string", b"")
|
| 171 |
+
raw_query = raw_query_bytes.decode("utf-8", errors="ignore")
|
| 172 |
+
if not raw_query:
|
| 173 |
+
return None
|
| 174 |
+
|
| 175 |
+
for part in raw_query.split("&"):
|
| 176 |
+
if part.startswith("psw:"):
|
| 177 |
+
token = unquote_plus(part[len("psw:") :])
|
| 178 |
+
if token and ":" in token:
|
| 179 |
+
return token.split(":", 1)[0]
|
| 180 |
+
return token or None
|
| 181 |
+
if part.startswith("psw%3A") or part.startswith("psw%3a"):
|
| 182 |
+
token = unquote_plus(part[len("psw%3A") :])
|
| 183 |
+
if token and ":" in token:
|
| 184 |
+
return token.split(":", 1)[0]
|
| 185 |
+
return token or None
|
| 186 |
+
|
| 187 |
+
return None
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def _is_hf_signed_page_request(request: Request) -> bool:
|
| 191 |
+
"""Return True for Hugging Face signed browser requests to non-API pages."""
|
| 192 |
+
if request.method not in {"GET", "HEAD"}:
|
| 193 |
+
return False
|
| 194 |
+
|
| 195 |
+
if request.url.path.startswith("/v1/"):
|
| 196 |
+
return False
|
| 197 |
+
|
| 198 |
+
if "__sign" not in request.query_params:
|
| 199 |
+
return False
|
| 200 |
+
|
| 201 |
+
accept = request.headers.get("accept", "").lower()
|
| 202 |
+
return "text/html" in accept or "*/*" in accept
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def require_api_key(
|
| 206 |
+
request: Request, settings: Settings = Depends(get_settings)
|
| 207 |
+
) -> None:
|
| 208 |
+
"""FastAPI dependency wrapper for API key validation."""
|
| 209 |
+
validate_request_api_key(request, settings)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
def get_provider() -> BaseProvider:
|
| 213 |
+
"""Get or create the default provider (based on MODEL env var).
|
| 214 |
+
|
| 215 |
+
Backward-compatible convenience for health/root endpoints and tests.
|
| 216 |
+
"""
|
| 217 |
+
return get_provider_for_type(get_settings().provider_type)
|
| 218 |
+
|
| 219 |
+
|
| 220 |
+
async def cleanup_provider():
|
| 221 |
+
"""Cleanup all provider resources."""
|
| 222 |
+
global _providers
|
| 223 |
+
for provider in _providers.values():
|
| 224 |
+
await provider.cleanup()
|
| 225 |
+
_providers = {}
|
| 226 |
+
logger.debug("Provider cleanup completed")
|
Claude_Code/api/detection.py
ADDED
|
@@ -0,0 +1,130 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Request detection utilities for API optimizations.
|
| 2 |
+
|
| 3 |
+
Detects quota checks, title generation, prefix detection, suggestion mode,
|
| 4 |
+
and filepath extraction requests to enable fast-path responses.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from providers.common.text import extract_text_from_content
|
| 8 |
+
|
| 9 |
+
from .models.anthropic import MessagesRequest
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def is_quota_check_request(request_data: MessagesRequest) -> bool:
|
| 13 |
+
"""Check if this is a quota probe request.
|
| 14 |
+
|
| 15 |
+
Quota checks are typically simple requests with max_tokens=1
|
| 16 |
+
and a single message containing the word "quota".
|
| 17 |
+
"""
|
| 18 |
+
if (
|
| 19 |
+
request_data.max_tokens == 1
|
| 20 |
+
and len(request_data.messages) == 1
|
| 21 |
+
and request_data.messages[0].role == "user"
|
| 22 |
+
):
|
| 23 |
+
text = extract_text_from_content(request_data.messages[0].content)
|
| 24 |
+
if "quota" in text.lower():
|
| 25 |
+
return True
|
| 26 |
+
return False
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
def is_title_generation_request(request_data: MessagesRequest) -> bool:
|
| 30 |
+
"""Check if this is a conversation title generation request.
|
| 31 |
+
|
| 32 |
+
Title generation requests are detected by a system prompt containing
|
| 33 |
+
title extraction instructions, no tools, and a single user message.
|
| 34 |
+
"""
|
| 35 |
+
if not request_data.system or request_data.tools:
|
| 36 |
+
return False
|
| 37 |
+
system_text = extract_text_from_content(request_data.system).lower()
|
| 38 |
+
return "new conversation topic" in system_text and "title" in system_text
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
def is_prefix_detection_request(request_data: MessagesRequest) -> tuple[bool, str]:
|
| 42 |
+
"""Check if this is a fast prefix detection request.
|
| 43 |
+
|
| 44 |
+
Prefix detection requests contain a policy_spec block and
|
| 45 |
+
a Command: section for extracting shell command prefixes.
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
Tuple of (is_prefix_request, command_string)
|
| 49 |
+
"""
|
| 50 |
+
if len(request_data.messages) != 1 or request_data.messages[0].role != "user":
|
| 51 |
+
return False, ""
|
| 52 |
+
|
| 53 |
+
content = extract_text_from_content(request_data.messages[0].content)
|
| 54 |
+
|
| 55 |
+
if "<policy_spec>" in content and "Command:" in content:
|
| 56 |
+
try:
|
| 57 |
+
cmd_start = content.rfind("Command:") + len("Command:")
|
| 58 |
+
return True, content[cmd_start:].strip()
|
| 59 |
+
except Exception:
|
| 60 |
+
pass
|
| 61 |
+
|
| 62 |
+
return False, ""
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
def is_suggestion_mode_request(request_data: MessagesRequest) -> bool:
|
| 66 |
+
"""Check if this is a suggestion mode request.
|
| 67 |
+
|
| 68 |
+
Suggestion mode requests contain "[SUGGESTION MODE:" in the user's message,
|
| 69 |
+
used for auto-suggesting what the user might type next.
|
| 70 |
+
"""
|
| 71 |
+
for msg in request_data.messages:
|
| 72 |
+
if msg.role == "user":
|
| 73 |
+
text = extract_text_from_content(msg.content)
|
| 74 |
+
if "[SUGGESTION MODE:" in text:
|
| 75 |
+
return True
|
| 76 |
+
return False
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def is_filepath_extraction_request(
|
| 80 |
+
request_data: MessagesRequest,
|
| 81 |
+
) -> tuple[bool, str, str]:
|
| 82 |
+
"""Check if this is a filepath extraction request.
|
| 83 |
+
|
| 84 |
+
Filepath extraction requests have a single user message with
|
| 85 |
+
"Command:" and "Output:" sections, asking to extract file paths
|
| 86 |
+
from command output.
|
| 87 |
+
|
| 88 |
+
Returns:
|
| 89 |
+
Tuple of (is_filepath_request, command, output)
|
| 90 |
+
"""
|
| 91 |
+
if len(request_data.messages) != 1 or request_data.messages[0].role != "user":
|
| 92 |
+
return False, "", ""
|
| 93 |
+
if request_data.tools:
|
| 94 |
+
return False, "", ""
|
| 95 |
+
|
| 96 |
+
content = extract_text_from_content(request_data.messages[0].content)
|
| 97 |
+
|
| 98 |
+
if "Command:" not in content or "Output:" not in content:
|
| 99 |
+
return False, "", ""
|
| 100 |
+
|
| 101 |
+
# Match if user content OR system block indicates filepath extraction
|
| 102 |
+
user_has_filepaths = (
|
| 103 |
+
"filepaths" in content.lower() or "<filepaths>" in content.lower()
|
| 104 |
+
)
|
| 105 |
+
system_text = (
|
| 106 |
+
extract_text_from_content(request_data.system) if request_data.system else ""
|
| 107 |
+
)
|
| 108 |
+
system_has_extract = (
|
| 109 |
+
"extract any file paths" in system_text.lower()
|
| 110 |
+
or "file paths that this command" in system_text.lower()
|
| 111 |
+
)
|
| 112 |
+
if not user_has_filepaths and not system_has_extract:
|
| 113 |
+
return False, "", ""
|
| 114 |
+
|
| 115 |
+
try:
|
| 116 |
+
cmd_start = content.find("Command:") + len("Command:")
|
| 117 |
+
output_marker = content.find("Output:", cmd_start)
|
| 118 |
+
if output_marker == -1:
|
| 119 |
+
return False, "", ""
|
| 120 |
+
|
| 121 |
+
command = content[cmd_start:output_marker].strip()
|
| 122 |
+
output = content[output_marker + len("Output:") :].strip()
|
| 123 |
+
|
| 124 |
+
for marker in ["<", "\n\n"]:
|
| 125 |
+
if marker in output:
|
| 126 |
+
output = output.split(marker)[0].strip()
|
| 127 |
+
|
| 128 |
+
return True, command, output
|
| 129 |
+
except Exception:
|
| 130 |
+
return False, "", ""
|
Claude_Code/api/models/__init__.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""API models exports."""
|
| 2 |
+
|
| 3 |
+
from .anthropic import (
|
| 4 |
+
ContentBlockImage,
|
| 5 |
+
ContentBlockText,
|
| 6 |
+
ContentBlockThinking,
|
| 7 |
+
ContentBlockToolResult,
|
| 8 |
+
ContentBlockToolUse,
|
| 9 |
+
Message,
|
| 10 |
+
MessagesRequest,
|
| 11 |
+
Role,
|
| 12 |
+
SystemContent,
|
| 13 |
+
ThinkingConfig,
|
| 14 |
+
TokenCountRequest,
|
| 15 |
+
Tool,
|
| 16 |
+
)
|
| 17 |
+
from .responses import MessagesResponse, TokenCountResponse, Usage
|
| 18 |
+
|
| 19 |
+
__all__ = [
|
| 20 |
+
"ContentBlockImage",
|
| 21 |
+
"ContentBlockText",
|
| 22 |
+
"ContentBlockThinking",
|
| 23 |
+
"ContentBlockToolResult",
|
| 24 |
+
"ContentBlockToolUse",
|
| 25 |
+
"Message",
|
| 26 |
+
"MessagesRequest",
|
| 27 |
+
"MessagesResponse",
|
| 28 |
+
"Role",
|
| 29 |
+
"SystemContent",
|
| 30 |
+
"ThinkingConfig",
|
| 31 |
+
"TokenCountRequest",
|
| 32 |
+
"TokenCountResponse",
|
| 33 |
+
"Tool",
|
| 34 |
+
"Usage",
|
| 35 |
+
]
|
Claude_Code/api/models/anthropic.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic models for Anthropic-compatible requests."""
|
| 2 |
+
|
| 3 |
+
from enum import StrEnum
|
| 4 |
+
from typing import Any, Literal
|
| 5 |
+
|
| 6 |
+
from loguru import logger
|
| 7 |
+
from pydantic import BaseModel, field_validator, model_validator
|
| 8 |
+
|
| 9 |
+
from config.settings import Settings, get_settings
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
# =============================================================================
|
| 13 |
+
# Content Block Types
|
| 14 |
+
# =============================================================================
|
| 15 |
+
class Role(StrEnum):
|
| 16 |
+
user = "user"
|
| 17 |
+
assistant = "assistant"
|
| 18 |
+
system = "system"
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class ContentBlockText(BaseModel):
|
| 22 |
+
type: Literal["text"]
|
| 23 |
+
text: str
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ContentBlockImage(BaseModel):
|
| 27 |
+
type: Literal["image"]
|
| 28 |
+
source: dict[str, Any]
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class ContentBlockToolUse(BaseModel):
|
| 32 |
+
type: Literal["tool_use"]
|
| 33 |
+
id: str
|
| 34 |
+
name: str
|
| 35 |
+
input: dict[str, Any]
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class ContentBlockToolResult(BaseModel):
|
| 39 |
+
type: Literal["tool_result"]
|
| 40 |
+
tool_use_id: str
|
| 41 |
+
content: str | list[Any] | dict[str, Any]
|
| 42 |
+
|
| 43 |
+
|
| 44 |
+
class ContentBlockThinking(BaseModel):
|
| 45 |
+
type: Literal["thinking"]
|
| 46 |
+
thinking: str
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
class SystemContent(BaseModel):
|
| 50 |
+
type: Literal["text"]
|
| 51 |
+
text: str
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# =============================================================================
|
| 55 |
+
# Message Types
|
| 56 |
+
# =============================================================================
|
| 57 |
+
class Message(BaseModel):
|
| 58 |
+
role: Literal["user", "assistant"]
|
| 59 |
+
content: (
|
| 60 |
+
str
|
| 61 |
+
| list[
|
| 62 |
+
ContentBlockText
|
| 63 |
+
| ContentBlockImage
|
| 64 |
+
| ContentBlockToolUse
|
| 65 |
+
| ContentBlockToolResult
|
| 66 |
+
| ContentBlockThinking
|
| 67 |
+
]
|
| 68 |
+
)
|
| 69 |
+
reasoning_content: str | None = None
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class Tool(BaseModel):
|
| 73 |
+
name: str
|
| 74 |
+
description: str | None = None
|
| 75 |
+
input_schema: dict[str, Any]
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
class ThinkingConfig(BaseModel):
|
| 79 |
+
enabled: bool = True
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
# =============================================================================
|
| 83 |
+
# Request Models
|
| 84 |
+
# =============================================================================
|
| 85 |
+
class MessagesRequest(BaseModel):
|
| 86 |
+
model: str
|
| 87 |
+
max_tokens: int | None = None
|
| 88 |
+
messages: list[Message]
|
| 89 |
+
system: str | list[SystemContent] | None = None
|
| 90 |
+
stop_sequences: list[str] | None = None
|
| 91 |
+
stream: bool | None = True
|
| 92 |
+
temperature: float | None = None
|
| 93 |
+
top_p: float | None = None
|
| 94 |
+
top_k: int | None = None
|
| 95 |
+
metadata: dict[str, Any] | None = None
|
| 96 |
+
tools: list[Tool] | None = None
|
| 97 |
+
tool_choice: dict[str, Any] | None = None
|
| 98 |
+
thinking: ThinkingConfig | None = None
|
| 99 |
+
extra_body: dict[str, Any] | None = None
|
| 100 |
+
original_model: str | None = None
|
| 101 |
+
resolved_provider_model: str | None = None
|
| 102 |
+
|
| 103 |
+
@model_validator(mode="after")
|
| 104 |
+
def map_model(self) -> "MessagesRequest":
|
| 105 |
+
"""Map any Claude model name to the configured model (model-aware)."""
|
| 106 |
+
settings = get_settings()
|
| 107 |
+
if self.original_model is None:
|
| 108 |
+
self.original_model = self.model
|
| 109 |
+
|
| 110 |
+
resolved_full = settings.resolve_model(self.original_model)
|
| 111 |
+
self.resolved_provider_model = resolved_full
|
| 112 |
+
self.model = Settings.parse_model_name(resolved_full)
|
| 113 |
+
|
| 114 |
+
if self.model != self.original_model:
|
| 115 |
+
logger.debug(f"MODEL MAPPING: '{self.original_model}' -> '{self.model}'")
|
| 116 |
+
|
| 117 |
+
return self
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
class TokenCountRequest(BaseModel):
|
| 121 |
+
model: str
|
| 122 |
+
messages: list[Message]
|
| 123 |
+
system: str | list[SystemContent] | None = None
|
| 124 |
+
tools: list[Tool] | None = None
|
| 125 |
+
thinking: ThinkingConfig | None = None
|
| 126 |
+
tool_choice: dict[str, Any] | None = None
|
| 127 |
+
|
| 128 |
+
@field_validator("model")
|
| 129 |
+
@classmethod
|
| 130 |
+
def validate_model_field(cls, v: str, info) -> str:
|
| 131 |
+
"""Map any Claude model name to the configured model (model-aware)."""
|
| 132 |
+
settings = get_settings()
|
| 133 |
+
resolved_full = settings.resolve_model(v)
|
| 134 |
+
return Settings.parse_model_name(resolved_full)
|
Claude_Code/api/models/responses.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Pydantic models for API responses."""
|
| 2 |
+
|
| 3 |
+
from typing import Any, Literal
|
| 4 |
+
|
| 5 |
+
from pydantic import BaseModel
|
| 6 |
+
|
| 7 |
+
from .anthropic import ContentBlockText, ContentBlockThinking, ContentBlockToolUse
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
class TokenCountResponse(BaseModel):
|
| 11 |
+
input_tokens: int
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class Usage(BaseModel):
|
| 15 |
+
input_tokens: int
|
| 16 |
+
output_tokens: int
|
| 17 |
+
cache_creation_input_tokens: int = 0
|
| 18 |
+
cache_read_input_tokens: int = 0
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
class MessagesResponse(BaseModel):
|
| 22 |
+
id: str
|
| 23 |
+
model: str
|
| 24 |
+
role: Literal["assistant"] = "assistant"
|
| 25 |
+
content: list[
|
| 26 |
+
ContentBlockText | ContentBlockToolUse | ContentBlockThinking | dict[str, Any]
|
| 27 |
+
]
|
| 28 |
+
type: Literal["message"] = "message"
|
| 29 |
+
stop_reason: (
|
| 30 |
+
Literal["end_turn", "max_tokens", "stop_sequence", "tool_use"] | None
|
| 31 |
+
) = None
|
| 32 |
+
stop_sequence: str | None = None
|
| 33 |
+
usage: Usage
|
Claude_Code/api/optimization_handlers.py
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Optimization handlers for fast-path API responses.
|
| 2 |
+
|
| 3 |
+
Each handler returns a MessagesResponse if the request matches and the
|
| 4 |
+
optimization is enabled, otherwise None.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import uuid
|
| 8 |
+
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
from config.settings import Settings
|
| 12 |
+
|
| 13 |
+
from .command_utils import extract_command_prefix, extract_filepaths_from_command
|
| 14 |
+
from .detection import (
|
| 15 |
+
is_filepath_extraction_request,
|
| 16 |
+
is_prefix_detection_request,
|
| 17 |
+
is_quota_check_request,
|
| 18 |
+
is_suggestion_mode_request,
|
| 19 |
+
is_title_generation_request,
|
| 20 |
+
)
|
| 21 |
+
from .models.anthropic import MessagesRequest
|
| 22 |
+
from .models.responses import MessagesResponse, Usage
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def try_prefix_detection(
|
| 26 |
+
request_data: MessagesRequest, settings: Settings
|
| 27 |
+
) -> MessagesResponse | None:
|
| 28 |
+
"""Fast prefix detection - return command prefix without API call."""
|
| 29 |
+
if not settings.fast_prefix_detection:
|
| 30 |
+
return None
|
| 31 |
+
|
| 32 |
+
is_prefix_req, command = is_prefix_detection_request(request_data)
|
| 33 |
+
if not is_prefix_req:
|
| 34 |
+
return None
|
| 35 |
+
|
| 36 |
+
logger.info("Optimization: Fast prefix detection request")
|
| 37 |
+
return MessagesResponse(
|
| 38 |
+
id=f"msg_{uuid.uuid4()}",
|
| 39 |
+
model=request_data.model,
|
| 40 |
+
content=[{"type": "text", "text": extract_command_prefix(command)}],
|
| 41 |
+
stop_reason="end_turn",
|
| 42 |
+
usage=Usage(input_tokens=100, output_tokens=5),
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def try_quota_mock(
|
| 47 |
+
request_data: MessagesRequest, settings: Settings
|
| 48 |
+
) -> MessagesResponse | None:
|
| 49 |
+
"""Mock quota probe requests."""
|
| 50 |
+
if not settings.enable_network_probe_mock:
|
| 51 |
+
return None
|
| 52 |
+
if not is_quota_check_request(request_data):
|
| 53 |
+
return None
|
| 54 |
+
|
| 55 |
+
logger.info("Optimization: Intercepted and mocked quota probe")
|
| 56 |
+
return MessagesResponse(
|
| 57 |
+
id=f"msg_{uuid.uuid4()}",
|
| 58 |
+
model=request_data.model,
|
| 59 |
+
role="assistant",
|
| 60 |
+
content=[{"type": "text", "text": "Quota check passed."}],
|
| 61 |
+
stop_reason="end_turn",
|
| 62 |
+
usage=Usage(input_tokens=10, output_tokens=5),
|
| 63 |
+
)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def try_title_skip(
|
| 67 |
+
request_data: MessagesRequest, settings: Settings
|
| 68 |
+
) -> MessagesResponse | None:
|
| 69 |
+
"""Skip title generation requests."""
|
| 70 |
+
if not settings.enable_title_generation_skip:
|
| 71 |
+
return None
|
| 72 |
+
if not is_title_generation_request(request_data):
|
| 73 |
+
return None
|
| 74 |
+
|
| 75 |
+
logger.info("Optimization: Skipped title generation request")
|
| 76 |
+
return MessagesResponse(
|
| 77 |
+
id=f"msg_{uuid.uuid4()}",
|
| 78 |
+
model=request_data.model,
|
| 79 |
+
role="assistant",
|
| 80 |
+
content=[{"type": "text", "text": "Conversation"}],
|
| 81 |
+
stop_reason="end_turn",
|
| 82 |
+
usage=Usage(input_tokens=100, output_tokens=5),
|
| 83 |
+
)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def try_suggestion_skip(
|
| 87 |
+
request_data: MessagesRequest, settings: Settings
|
| 88 |
+
) -> MessagesResponse | None:
|
| 89 |
+
"""Skip suggestion mode requests."""
|
| 90 |
+
if not settings.enable_suggestion_mode_skip:
|
| 91 |
+
return None
|
| 92 |
+
if not is_suggestion_mode_request(request_data):
|
| 93 |
+
return None
|
| 94 |
+
|
| 95 |
+
logger.info("Optimization: Skipped suggestion mode request")
|
| 96 |
+
return MessagesResponse(
|
| 97 |
+
id=f"msg_{uuid.uuid4()}",
|
| 98 |
+
model=request_data.model,
|
| 99 |
+
role="assistant",
|
| 100 |
+
content=[{"type": "text", "text": ""}],
|
| 101 |
+
stop_reason="end_turn",
|
| 102 |
+
usage=Usage(input_tokens=100, output_tokens=1),
|
| 103 |
+
)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
def try_filepath_mock(
|
| 107 |
+
request_data: MessagesRequest, settings: Settings
|
| 108 |
+
) -> MessagesResponse | None:
|
| 109 |
+
"""Mock filepath extraction requests."""
|
| 110 |
+
if not settings.enable_filepath_extraction_mock:
|
| 111 |
+
return None
|
| 112 |
+
|
| 113 |
+
is_fp, cmd, output = is_filepath_extraction_request(request_data)
|
| 114 |
+
if not is_fp:
|
| 115 |
+
return None
|
| 116 |
+
|
| 117 |
+
filepaths = extract_filepaths_from_command(cmd, output)
|
| 118 |
+
logger.info("Optimization: Mocked filepath extraction")
|
| 119 |
+
return MessagesResponse(
|
| 120 |
+
id=f"msg_{uuid.uuid4()}",
|
| 121 |
+
model=request_data.model,
|
| 122 |
+
role="assistant",
|
| 123 |
+
content=[{"type": "text", "text": filepaths}],
|
| 124 |
+
stop_reason="end_turn",
|
| 125 |
+
usage=Usage(input_tokens=100, output_tokens=10),
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
|
| 129 |
+
# Cheapest/most common optimizations first for faster short-circuit.
|
| 130 |
+
OPTIMIZATION_HANDLERS = [
|
| 131 |
+
try_quota_mock,
|
| 132 |
+
try_prefix_detection,
|
| 133 |
+
try_title_skip,
|
| 134 |
+
try_suggestion_skip,
|
| 135 |
+
try_filepath_mock,
|
| 136 |
+
]
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
def try_optimizations(
|
| 140 |
+
request_data: MessagesRequest, settings: Settings
|
| 141 |
+
) -> MessagesResponse | None:
|
| 142 |
+
"""Run optimization handlers in order. Returns first match or None."""
|
| 143 |
+
for handler in OPTIMIZATION_HANDLERS:
|
| 144 |
+
result = handler(request_data, settings)
|
| 145 |
+
if result is not None:
|
| 146 |
+
return result
|
| 147 |
+
return None
|
Claude_Code/api/request_utils.py
ADDED
|
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Request utility functions for API route handlers.
|
| 2 |
+
|
| 3 |
+
Contains token counting for API requests.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import json
|
| 7 |
+
|
| 8 |
+
import tiktoken
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
from providers.common import get_block_attr
|
| 12 |
+
|
| 13 |
+
ENCODER = tiktoken.get_encoding("cl100k_base")
|
| 14 |
+
|
| 15 |
+
__all__ = ["get_token_count"]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def get_token_count(
|
| 19 |
+
messages: list,
|
| 20 |
+
system: str | list | None = None,
|
| 21 |
+
tools: list | None = None,
|
| 22 |
+
) -> int:
|
| 23 |
+
"""Estimate token count for a request.
|
| 24 |
+
|
| 25 |
+
Uses tiktoken cl100k_base encoding to estimate token usage.
|
| 26 |
+
Includes system prompt, messages, tools, and per-message overhead.
|
| 27 |
+
"""
|
| 28 |
+
total_tokens = 0
|
| 29 |
+
|
| 30 |
+
if system:
|
| 31 |
+
if isinstance(system, str):
|
| 32 |
+
total_tokens += len(ENCODER.encode(system))
|
| 33 |
+
elif isinstance(system, list):
|
| 34 |
+
for block in system:
|
| 35 |
+
text = get_block_attr(block, "text", "")
|
| 36 |
+
if text:
|
| 37 |
+
total_tokens += len(ENCODER.encode(str(text)))
|
| 38 |
+
total_tokens += 4 # System block formatting overhead
|
| 39 |
+
|
| 40 |
+
for msg in messages:
|
| 41 |
+
if isinstance(msg.content, str):
|
| 42 |
+
total_tokens += len(ENCODER.encode(msg.content))
|
| 43 |
+
elif isinstance(msg.content, list):
|
| 44 |
+
for block in msg.content:
|
| 45 |
+
b_type = get_block_attr(block, "type") or None
|
| 46 |
+
|
| 47 |
+
if b_type == "text":
|
| 48 |
+
text = get_block_attr(block, "text", "")
|
| 49 |
+
total_tokens += len(ENCODER.encode(str(text)))
|
| 50 |
+
elif b_type == "thinking":
|
| 51 |
+
thinking = get_block_attr(block, "thinking", "")
|
| 52 |
+
total_tokens += len(ENCODER.encode(str(thinking)))
|
| 53 |
+
elif b_type == "tool_use":
|
| 54 |
+
name = get_block_attr(block, "name", "")
|
| 55 |
+
inp = get_block_attr(block, "input", {})
|
| 56 |
+
block_id = get_block_attr(block, "id", "")
|
| 57 |
+
total_tokens += len(ENCODER.encode(str(name)))
|
| 58 |
+
total_tokens += len(ENCODER.encode(json.dumps(inp)))
|
| 59 |
+
total_tokens += len(ENCODER.encode(str(block_id)))
|
| 60 |
+
total_tokens += 15
|
| 61 |
+
elif b_type == "image":
|
| 62 |
+
source = get_block_attr(block, "source")
|
| 63 |
+
if isinstance(source, dict):
|
| 64 |
+
data = source.get("data") or source.get("base64") or ""
|
| 65 |
+
if data:
|
| 66 |
+
total_tokens += max(85, len(data) // 3000)
|
| 67 |
+
else:
|
| 68 |
+
total_tokens += 765
|
| 69 |
+
else:
|
| 70 |
+
total_tokens += 765
|
| 71 |
+
elif b_type == "tool_result":
|
| 72 |
+
content = get_block_attr(block, "content", "")
|
| 73 |
+
tool_use_id = get_block_attr(block, "tool_use_id", "")
|
| 74 |
+
if isinstance(content, str):
|
| 75 |
+
total_tokens += len(ENCODER.encode(content))
|
| 76 |
+
else:
|
| 77 |
+
total_tokens += len(ENCODER.encode(json.dumps(content)))
|
| 78 |
+
total_tokens += len(ENCODER.encode(str(tool_use_id)))
|
| 79 |
+
total_tokens += 8
|
| 80 |
+
else:
|
| 81 |
+
logger.debug(
|
| 82 |
+
"Unexpected block type %r, falling back to json/str encoding",
|
| 83 |
+
b_type,
|
| 84 |
+
)
|
| 85 |
+
try:
|
| 86 |
+
total_tokens += len(ENCODER.encode(json.dumps(block)))
|
| 87 |
+
except TypeError, ValueError:
|
| 88 |
+
total_tokens += len(ENCODER.encode(str(block)))
|
| 89 |
+
|
| 90 |
+
if tools:
|
| 91 |
+
for tool in tools:
|
| 92 |
+
tool_str = (
|
| 93 |
+
tool.name + (tool.description or "") + json.dumps(tool.input_schema)
|
| 94 |
+
)
|
| 95 |
+
total_tokens += len(ENCODER.encode(tool_str))
|
| 96 |
+
|
| 97 |
+
total_tokens += len(messages) * 4
|
| 98 |
+
if tools:
|
| 99 |
+
total_tokens += len(tools) * 5
|
| 100 |
+
|
| 101 |
+
return max(1, total_tokens)
|
Claude_Code/api/routes.py
ADDED
|
@@ -0,0 +1,345 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""FastAPI route handlers."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import shutil
|
| 5 |
+
import tempfile
|
| 6 |
+
import time
|
| 7 |
+
import traceback
|
| 8 |
+
import uuid
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
|
| 11 |
+
from fastapi import APIRouter, BackgroundTasks, Depends, HTTPException, Request
|
| 12 |
+
from fastapi.responses import HTMLResponse, StreamingResponse
|
| 13 |
+
from loguru import logger
|
| 14 |
+
|
| 15 |
+
from config.settings import Settings
|
| 16 |
+
from providers.common import get_user_facing_error_message
|
| 17 |
+
from providers.exceptions import InvalidRequestError, ProviderError
|
| 18 |
+
|
| 19 |
+
from .dependencies import get_provider_for_type, get_settings, require_api_key
|
| 20 |
+
from .models.anthropic import MessagesRequest, TokenCountRequest
|
| 21 |
+
from .models.responses import TokenCountResponse
|
| 22 |
+
from .optimization_handlers import try_optimizations
|
| 23 |
+
from .request_utils import get_token_count
|
| 24 |
+
|
| 25 |
+
router = APIRouter()
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _home_page_html(status_payload: dict[str, str]) -> str:
|
| 29 |
+
"""Render the home page HTML with the factory reset button."""
|
| 30 |
+
return f"""
|
| 31 |
+
<!doctype html>
|
| 32 |
+
<html lang=\"en\">
|
| 33 |
+
<head>
|
| 34 |
+
<meta charset=\"utf-8\" />
|
| 35 |
+
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />
|
| 36 |
+
<title>Claude Code Proxy</title>
|
| 37 |
+
<style>
|
| 38 |
+
:root {{ color-scheme: dark; }}
|
| 39 |
+
body {{ margin: 0; min-height: 100vh; display: grid; place-items: center; font-family: Inter, Segoe UI, Arial, sans-serif; background: radial-gradient(1200px 600px at 20% 10%, #2e1b4a, #12131f 45%, #0b0c12); color: #e8ecff; }}
|
| 40 |
+
.card {{ width: min(92vw, 560px); background: rgba(255, 255, 255, 0.06); border: 1px solid rgba(255, 255, 255, 0.14); border-radius: 20px; padding: 28px; box-shadow: 0 20px 50px rgba(0, 0, 0, 0.35); }}
|
| 41 |
+
h1 {{ margin: 0 0 10px; font-size: 1.35rem; }}
|
| 42 |
+
p {{ margin: 0 0 8px; color: #cfd7ff; line-height: 1.45; }}
|
| 43 |
+
.meta {{ margin: 14px 0 18px; font-size: 0.95rem; color: #dfe6ff; }}
|
| 44 |
+
.meta span {{ display: inline-block; margin-right: 12px; opacity: 0.95; }}
|
| 45 |
+
button {{ border: none; border-radius: 14px; padding: 12px 18px; font-size: 1rem; font-weight: 700; color: white; cursor: pointer; background: linear-gradient(135deg, #ff507a, #7f5bff); box-shadow: 0 10px 20px rgba(127, 91, 255, 0.35); }}
|
| 46 |
+
button:disabled {{ opacity: 0.65; cursor: wait; }}
|
| 47 |
+
.status {{ margin-top: 14px; min-height: 24px; font-size: 0.95rem; color: #b8ffd8; }}
|
| 48 |
+
</style>
|
| 49 |
+
</head>
|
| 50 |
+
<body>
|
| 51 |
+
<div class=\"card\">
|
| 52 |
+
<h1>Claude Code Proxy</h1>
|
| 53 |
+
<p>Server is running.</p>
|
| 54 |
+
<div class=\"meta\">
|
| 55 |
+
<span><strong>Status:</strong> {status_payload['status']}</span>
|
| 56 |
+
<span><strong>Provider:</strong> {status_payload['provider']}</span>
|
| 57 |
+
<span><strong>Model:</strong> {status_payload['model']}</span>
|
| 58 |
+
</div>
|
| 59 |
+
<button id=\"resetBtn\">Factory Restart</button>
|
| 60 |
+
<div class=\"status\" id=\"status\"></div>
|
| 61 |
+
</div>
|
| 62 |
+
<script>
|
| 63 |
+
const btn = document.getElementById('resetBtn');
|
| 64 |
+
const status = document.getElementById('status');
|
| 65 |
+
btn.addEventListener('click', async () => {{
|
| 66 |
+
btn.disabled = true;
|
| 67 |
+
status.textContent = 'Resetting cache/workspace and restarting...';
|
| 68 |
+
try {{
|
| 69 |
+
const response = await fetch('/admin/factory-reset' + window.location.search, {{ method: 'POST' }});
|
| 70 |
+
const data = await response.json();
|
| 71 |
+
if (!response.ok) {{
|
| 72 |
+
throw new Error(data.detail || 'Request failed');
|
| 73 |
+
}}
|
| 74 |
+
status.textContent = 'Restart initiated. This page will disconnect briefly.';
|
| 75 |
+
}} catch (err) {{
|
| 76 |
+
status.textContent = 'Failed: ' + (err.message || String(err));
|
| 77 |
+
btn.disabled = false;
|
| 78 |
+
}}
|
| 79 |
+
}});
|
| 80 |
+
</script>
|
| 81 |
+
</body>
|
| 82 |
+
</html>
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def _clear_path(path: Path) -> int:
|
| 87 |
+
"""Best-effort removal of a file/directory path. Returns removed item count."""
|
| 88 |
+
if not path.exists():
|
| 89 |
+
return 0
|
| 90 |
+
try:
|
| 91 |
+
if path.is_dir():
|
| 92 |
+
shutil.rmtree(path)
|
| 93 |
+
else:
|
| 94 |
+
path.unlink()
|
| 95 |
+
return 1
|
| 96 |
+
except Exception as e:
|
| 97 |
+
logger.warning("Failed to remove path {}: {}", path, e)
|
| 98 |
+
return 0
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
def _clear_workspace_contents(workspace: Path) -> int:
|
| 102 |
+
"""Best-effort clear of workspace contents while preserving root directory."""
|
| 103 |
+
if not workspace.exists() or not workspace.is_dir():
|
| 104 |
+
return 0
|
| 105 |
+
removed = 0
|
| 106 |
+
for child in workspace.iterdir():
|
| 107 |
+
removed += _clear_path(child)
|
| 108 |
+
return removed
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
def _clear_runtime_state(settings: Settings) -> dict[str, int]:
|
| 112 |
+
"""Clear runtime caches/workspace data for a lightweight factory reset."""
|
| 113 |
+
removed = {
|
| 114 |
+
"workspace_items": 0,
|
| 115 |
+
"cache_dirs": 0,
|
| 116 |
+
"pycache_dirs": 0,
|
| 117 |
+
}
|
| 118 |
+
|
| 119 |
+
workspace = Path(settings.claude_workspace).expanduser().resolve()
|
| 120 |
+
removed["workspace_items"] = _clear_workspace_contents(workspace)
|
| 121 |
+
|
| 122 |
+
cache_dirs = [
|
| 123 |
+
Path.home() / ".cache" / "huggingface",
|
| 124 |
+
Path.home() / ".cache" / "uv",
|
| 125 |
+
Path.home() / ".cache" / "pip",
|
| 126 |
+
Path(tempfile.gettempdir()) / "huggingface",
|
| 127 |
+
]
|
| 128 |
+
for cache_dir in cache_dirs:
|
| 129 |
+
removed["cache_dirs"] += _clear_path(cache_dir)
|
| 130 |
+
|
| 131 |
+
project_root = Path.cwd()
|
| 132 |
+
for pycache_dir in project_root.rglob("__pycache__"):
|
| 133 |
+
if ".venv" in pycache_dir.parts:
|
| 134 |
+
continue
|
| 135 |
+
removed["pycache_dirs"] += _clear_path(pycache_dir)
|
| 136 |
+
|
| 137 |
+
return removed
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def _restart_process() -> None:
|
| 141 |
+
"""Terminate process so container orchestrator restarts the app."""
|
| 142 |
+
logger.warning("Factory reset requested: restarting process")
|
| 143 |
+
time.sleep(1.0)
|
| 144 |
+
os._exit(0)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
# =============================================================================
|
| 148 |
+
# Routes
|
| 149 |
+
# =============================================================================
|
| 150 |
+
@router.post("/v1/messages")
|
| 151 |
+
async def create_message(
|
| 152 |
+
request_data: MessagesRequest,
|
| 153 |
+
raw_request: Request,
|
| 154 |
+
settings: Settings = Depends(get_settings),
|
| 155 |
+
_auth=Depends(require_api_key),
|
| 156 |
+
):
|
| 157 |
+
"""Create a message (always streaming)."""
|
| 158 |
+
|
| 159 |
+
try:
|
| 160 |
+
if not request_data.messages:
|
| 161 |
+
raise InvalidRequestError("messages cannot be empty")
|
| 162 |
+
|
| 163 |
+
optimized = try_optimizations(request_data, settings)
|
| 164 |
+
if optimized is not None:
|
| 165 |
+
return optimized
|
| 166 |
+
logger.debug("No optimization matched, routing to provider")
|
| 167 |
+
|
| 168 |
+
# Resolve provider from the model-aware mapping
|
| 169 |
+
provider_type = Settings.parse_provider_type(
|
| 170 |
+
request_data.resolved_provider_model or settings.model
|
| 171 |
+
)
|
| 172 |
+
provider = get_provider_for_type(provider_type)
|
| 173 |
+
|
| 174 |
+
request_id = f"req_{uuid.uuid4().hex[:12]}"
|
| 175 |
+
logger.info(
|
| 176 |
+
"API_REQUEST: request_id={} model={} messages={}",
|
| 177 |
+
request_id,
|
| 178 |
+
request_data.model,
|
| 179 |
+
len(request_data.messages),
|
| 180 |
+
)
|
| 181 |
+
logger.debug("FULL_PAYLOAD [{}]: {}", request_id, request_data.model_dump())
|
| 182 |
+
|
| 183 |
+
input_tokens = get_token_count(
|
| 184 |
+
request_data.messages, request_data.system, request_data.tools
|
| 185 |
+
)
|
| 186 |
+
return StreamingResponse(
|
| 187 |
+
provider.stream_response(
|
| 188 |
+
request_data,
|
| 189 |
+
input_tokens=input_tokens,
|
| 190 |
+
request_id=request_id,
|
| 191 |
+
),
|
| 192 |
+
media_type="text/event-stream",
|
| 193 |
+
headers={
|
| 194 |
+
"X-Accel-Buffering": "no",
|
| 195 |
+
"Cache-Control": "no-cache",
|
| 196 |
+
"Connection": "keep-alive",
|
| 197 |
+
},
|
| 198 |
+
)
|
| 199 |
+
|
| 200 |
+
except ProviderError:
|
| 201 |
+
raise
|
| 202 |
+
except Exception as e:
|
| 203 |
+
logger.error(f"Error: {e!s}\n{traceback.format_exc()}")
|
| 204 |
+
raise HTTPException(
|
| 205 |
+
status_code=getattr(e, "status_code", 500),
|
| 206 |
+
detail=get_user_facing_error_message(e),
|
| 207 |
+
) from e
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
@router.post("/v1/messages/count_tokens")
|
| 211 |
+
async def count_tokens(request_data: TokenCountRequest, _auth=Depends(require_api_key)):
|
| 212 |
+
"""Count tokens for a request."""
|
| 213 |
+
request_id = f"req_{uuid.uuid4().hex[:12]}"
|
| 214 |
+
with logger.contextualize(request_id=request_id):
|
| 215 |
+
try:
|
| 216 |
+
tokens = get_token_count(
|
| 217 |
+
request_data.messages, request_data.system, request_data.tools
|
| 218 |
+
)
|
| 219 |
+
logger.info(
|
| 220 |
+
"COUNT_TOKENS: request_id={} model={} messages={} input_tokens={}",
|
| 221 |
+
request_id,
|
| 222 |
+
getattr(request_data, "model", "unknown"),
|
| 223 |
+
len(request_data.messages),
|
| 224 |
+
tokens,
|
| 225 |
+
)
|
| 226 |
+
return TokenCountResponse(input_tokens=tokens)
|
| 227 |
+
except Exception as e:
|
| 228 |
+
logger.error(
|
| 229 |
+
"COUNT_TOKENS_ERROR: request_id={} error={}\n{}",
|
| 230 |
+
request_id,
|
| 231 |
+
get_user_facing_error_message(e),
|
| 232 |
+
traceback.format_exc(),
|
| 233 |
+
)
|
| 234 |
+
raise HTTPException(
|
| 235 |
+
status_code=500, detail=get_user_facing_error_message(e)
|
| 236 |
+
) from e
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@router.get("/")
|
| 240 |
+
async def root(
|
| 241 |
+
request: Request,
|
| 242 |
+
settings: Settings = Depends(get_settings),
|
| 243 |
+
_auth=Depends(require_api_key),
|
| 244 |
+
):
|
| 245 |
+
"""Root endpoint (JSON for API clients, HTML for browsers)."""
|
| 246 |
+
payload = {
|
| 247 |
+
"status": "ok",
|
| 248 |
+
"provider": settings.provider_type,
|
| 249 |
+
"model": settings.model,
|
| 250 |
+
}
|
| 251 |
+
accept = request.headers.get("accept", "")
|
| 252 |
+
if "__sign" in request.query_params or "text/html" in accept.lower():
|
| 253 |
+
return HTMLResponse(content=_home_page_html(payload))
|
| 254 |
+
return payload
|
| 255 |
+
|
| 256 |
+
|
| 257 |
+
@router.get("/health")
|
| 258 |
+
async def health():
|
| 259 |
+
"""Health check endpoint."""
|
| 260 |
+
return {"status": "healthy"}
|
| 261 |
+
|
| 262 |
+
|
| 263 |
+
@router.post("/stop")
|
| 264 |
+
async def stop_cli(request: Request, _auth=Depends(require_api_key)):
|
| 265 |
+
"""Stop all CLI sessions and pending tasks."""
|
| 266 |
+
handler = getattr(request.app.state, "message_handler", None)
|
| 267 |
+
if not handler:
|
| 268 |
+
# Fallback if messaging not initialized
|
| 269 |
+
cli_manager = getattr(request.app.state, "cli_manager", None)
|
| 270 |
+
if cli_manager:
|
| 271 |
+
await cli_manager.stop_all()
|
| 272 |
+
logger.info("STOP_CLI: source=cli_manager cancelled_count=N/A")
|
| 273 |
+
return {"status": "stopped", "source": "cli_manager"}
|
| 274 |
+
raise HTTPException(status_code=503, detail="Messaging system not initialized")
|
| 275 |
+
|
| 276 |
+
count = await handler.stop_all_tasks()
|
| 277 |
+
logger.info("STOP_CLI: source=handler cancelled_count={}", count)
|
| 278 |
+
return {"status": "stopped", "cancelled_count": count}
|
| 279 |
+
|
| 280 |
+
|
| 281 |
+
@router.get("/admin/factory-reset", response_class=HTMLResponse)
|
| 282 |
+
async def factory_reset_page(request: Request, _auth=Depends(require_api_key)):
|
| 283 |
+
"""Simple admin UI for one-click factory reset and restart."""
|
| 284 |
+
return """
|
| 285 |
+
<!doctype html>
|
| 286 |
+
<html lang=\"en\">
|
| 287 |
+
<head>
|
| 288 |
+
<meta charset=\"utf-8\" />
|
| 289 |
+
<meta name=\"viewport\" content=\"width=device-width, initial-scale=1\" />
|
| 290 |
+
<title>Factory Reset</title>
|
| 291 |
+
<style>
|
| 292 |
+
:root { color-scheme: dark; }
|
| 293 |
+
body { margin: 0; min-height: 100vh; display: grid; place-items: center; font-family: Inter, Segoe UI, Arial, sans-serif; background: radial-gradient(1200px 600px at 20% 10%, #2e1b4a, #12131f 45%, #0b0c12); color: #e8ecff; }
|
| 294 |
+
.card { width: min(92vw, 520px); background: rgba(255, 255, 255, 0.06); border: 1px solid rgba(255, 255, 255, 0.14); border-radius: 20px; padding: 28px; box-shadow: 0 20px 50px rgba(0, 0, 0, 0.35); }
|
| 295 |
+
h1 { margin: 0 0 10px; font-size: 1.35rem; }
|
| 296 |
+
p { margin: 0 0 18px; color: #cfd7ff; line-height: 1.45; }
|
| 297 |
+
button { border: none; border-radius: 14px; padding: 12px 18px; font-size: 1rem; font-weight: 700; color: white; cursor: pointer; background: linear-gradient(135deg, #ff507a, #7f5bff); box-shadow: 0 10px 20px rgba(127, 91, 255, 0.35); }
|
| 298 |
+
button:disabled { opacity: 0.65; cursor: wait; }
|
| 299 |
+
.status { margin-top: 14px; min-height: 24px; font-size: 0.95rem; color: #b8ffd8; }
|
| 300 |
+
</style>
|
| 301 |
+
</head>
|
| 302 |
+
<body>
|
| 303 |
+
<div class=\"card\">
|
| 304 |
+
<h1>Factory Reset & Restart</h1>
|
| 305 |
+
<p>Clears runtime cache and workspace data, then restarts this server.</p>
|
| 306 |
+
<button id=\"resetBtn\">Factory Restart</button>
|
| 307 |
+
<div class=\"status\" id=\"status\"></div>
|
| 308 |
+
</div>
|
| 309 |
+
<script>
|
| 310 |
+
const btn = document.getElementById('resetBtn');
|
| 311 |
+
const status = document.getElementById('status');
|
| 312 |
+
btn.addEventListener('click', async () => {
|
| 313 |
+
btn.disabled = true;
|
| 314 |
+
status.textContent = 'Resetting cache/workspace and restarting...';
|
| 315 |
+
try {
|
| 316 |
+
const response = await fetch('/admin/factory-reset' + window.location.search, { method: 'POST' });
|
| 317 |
+
const data = await response.json();
|
| 318 |
+
if (!response.ok) {
|
| 319 |
+
throw new Error(data.detail || 'Request failed');
|
| 320 |
+
}
|
| 321 |
+
status.textContent = 'Restart initiated. This page will disconnect briefly.';
|
| 322 |
+
} catch (err) {
|
| 323 |
+
status.textContent = 'Failed: ' + (err.message || String(err));
|
| 324 |
+
btn.disabled = false;
|
| 325 |
+
}
|
| 326 |
+
});
|
| 327 |
+
</script>
|
| 328 |
+
</body>
|
| 329 |
+
</html>
|
| 330 |
+
"""
|
| 331 |
+
|
| 332 |
+
|
| 333 |
+
@router.post("/admin/factory-reset")
|
| 334 |
+
async def factory_reset(
|
| 335 |
+
background_tasks: BackgroundTasks,
|
| 336 |
+
settings: Settings = Depends(get_settings),
|
| 337 |
+
_auth=Depends(require_api_key),
|
| 338 |
+
):
|
| 339 |
+
"""Clear runtime state and restart process (for Space maintenance)."""
|
| 340 |
+
cleared = _clear_runtime_state(settings)
|
| 341 |
+
background_tasks.add_task(_restart_process)
|
| 342 |
+
return {
|
| 343 |
+
"status": "restarting",
|
| 344 |
+
"cleared": cleared,
|
| 345 |
+
}
|
Claude_Code/claude-pick
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env bash
|
| 2 |
+
# claude-pick — Interactive model picker for free-claude-code
|
| 3 |
+
# Usage: claude-pick [extra claude args...]
|
| 4 |
+
|
| 5 |
+
set -euo pipefail
|
| 6 |
+
|
| 7 |
+
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
| 8 |
+
MODELS_FILE="$SCRIPT_DIR/nvidia_nim_models.json"
|
| 9 |
+
ENV_FILE="${CLAUDE_PICK_ENV_FILE:-$SCRIPT_DIR/.env}"
|
| 10 |
+
PORT="${CLAUDE_PICK_PORT:-8082}"
|
| 11 |
+
BASE_URL="http://localhost:$PORT"
|
| 12 |
+
OPENROUTER_MODELS_URL="https://openrouter.ai/api/v1/models"
|
| 13 |
+
DEFAULT_LM_STUDIO_BASE_URL="http://localhost:1234/v1"
|
| 14 |
+
DEFAULT_LLAMACPP_BASE_URL="http://localhost:8080/v1"
|
| 15 |
+
|
| 16 |
+
if ! command -v python3 >/dev/null 2>&1; then
|
| 17 |
+
echo "Error: python3 is required." >&2
|
| 18 |
+
exit 1
|
| 19 |
+
fi
|
| 20 |
+
|
| 21 |
+
read_env_value() {
|
| 22 |
+
local key="$1"
|
| 23 |
+
[[ -f "$ENV_FILE" ]] || return 0
|
| 24 |
+
|
| 25 |
+
local raw
|
| 26 |
+
raw="$(grep -E "^[[:space:]]*${key}[[:space:]]*=" "$ENV_FILE" | tail -n 1 || true)"
|
| 27 |
+
raw="${raw#*=}"
|
| 28 |
+
raw="${raw%%#*}"
|
| 29 |
+
raw="$(echo "$raw" | xargs || true)"
|
| 30 |
+
raw="${raw%\"}"
|
| 31 |
+
raw="${raw#\"}"
|
| 32 |
+
raw="${raw%\'}"
|
| 33 |
+
raw="${raw#\'}"
|
| 34 |
+
echo "$raw"
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
if ! command -v fzf >/dev/null 2>&1; then
|
| 38 |
+
echo "Error: fzf is required for the model picker." >&2
|
| 39 |
+
echo "Install it from: https://github.com/junegunn/fzf" >&2
|
| 40 |
+
exit 1
|
| 41 |
+
fi
|
| 42 |
+
|
| 43 |
+
parse_models_from_json() {
|
| 44 |
+
python3 -c '
|
| 45 |
+
import json, sys
|
| 46 |
+
try:
|
| 47 |
+
payload = json.load(sys.stdin)
|
| 48 |
+
except Exception:
|
| 49 |
+
sys.exit(0)
|
| 50 |
+
for item in payload.get("data", []):
|
| 51 |
+
model_id = item.get("id")
|
| 52 |
+
if model_id:
|
| 53 |
+
print(model_id)
|
| 54 |
+
'
|
| 55 |
+
}
|
| 56 |
+
|
| 57 |
+
get_nvidia_models() {
|
| 58 |
+
if [[ ! -f "$MODELS_FILE" ]]; then
|
| 59 |
+
echo "Error: $MODELS_FILE not found." >&2
|
| 60 |
+
echo "Run: curl \"https://integrate.api.nvidia.com/v1/models\" > nvidia_nim_models.json" >&2
|
| 61 |
+
exit 1
|
| 62 |
+
fi
|
| 63 |
+
|
| 64 |
+
python3 -c '
|
| 65 |
+
import json, sys
|
| 66 |
+
with open(sys.argv[1], "r", encoding="utf-8") as f:
|
| 67 |
+
payload = json.load(f)
|
| 68 |
+
for item in payload.get("data", []):
|
| 69 |
+
model_id = item.get("id")
|
| 70 |
+
if model_id:
|
| 71 |
+
print(model_id)
|
| 72 |
+
' "$MODELS_FILE"
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
get_openrouter_models() {
|
| 76 |
+
if ! command -v curl >/dev/null 2>&1; then
|
| 77 |
+
echo "Error: curl is required for OpenRouter model discovery." >&2
|
| 78 |
+
exit 1
|
| 79 |
+
fi
|
| 80 |
+
|
| 81 |
+
local openrouter_key
|
| 82 |
+
openrouter_key="${OPENROUTER_API_KEY:-$(read_env_value OPENROUTER_API_KEY)}"
|
| 83 |
+
|
| 84 |
+
local response
|
| 85 |
+
if [[ -n "$openrouter_key" ]]; then
|
| 86 |
+
if ! response="$(curl -fsSL -H "Authorization: Bearer $openrouter_key" "$OPENROUTER_MODELS_URL")"; then
|
| 87 |
+
echo "Error: Failed to fetch OpenRouter models." >&2
|
| 88 |
+
exit 1
|
| 89 |
+
fi
|
| 90 |
+
else
|
| 91 |
+
if ! response="$(curl -fsSL "$OPENROUTER_MODELS_URL")"; then
|
| 92 |
+
echo "Error: Failed to fetch OpenRouter models." >&2
|
| 93 |
+
exit 1
|
| 94 |
+
fi
|
| 95 |
+
fi
|
| 96 |
+
|
| 97 |
+
parse_models_from_json <<< "$response"
|
| 98 |
+
}
|
| 99 |
+
|
| 100 |
+
get_lmstudio_models() {
|
| 101 |
+
if ! command -v curl >/dev/null 2>&1; then
|
| 102 |
+
echo "Error: curl is required for LM Studio model discovery." >&2
|
| 103 |
+
exit 1
|
| 104 |
+
fi
|
| 105 |
+
|
| 106 |
+
local lm_base
|
| 107 |
+
lm_base="${LM_STUDIO_BASE_URL:-$(read_env_value LM_STUDIO_BASE_URL)}"
|
| 108 |
+
lm_base="${lm_base:-$DEFAULT_LM_STUDIO_BASE_URL}"
|
| 109 |
+
|
| 110 |
+
local models_url
|
| 111 |
+
if [[ "$lm_base" == */v1 ]]; then
|
| 112 |
+
models_url="${lm_base}/models"
|
| 113 |
+
else
|
| 114 |
+
models_url="${lm_base}/v1/models"
|
| 115 |
+
fi
|
| 116 |
+
|
| 117 |
+
local response
|
| 118 |
+
if ! response="$(curl -fsSL "$models_url")"; then
|
| 119 |
+
echo "Error: Failed to query LM Studio models at $models_url" >&2
|
| 120 |
+
echo "Start LM Studio server first (Developer tab or: lms server start)." >&2
|
| 121 |
+
exit 1
|
| 122 |
+
fi
|
| 123 |
+
|
| 124 |
+
parse_models_from_json <<< "$response"
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
provider="${CLAUDE_PICK_PROVIDER:-$(read_env_value PROVIDER_TYPE)}"
|
| 128 |
+
provider="${provider:-nvidia_nim}"
|
| 129 |
+
|
| 130 |
+
prompt="Select a model> "
|
| 131 |
+
case "$provider" in
|
| 132 |
+
nvidia_nim)
|
| 133 |
+
models="$(get_nvidia_models)"
|
| 134 |
+
prompt="Select a NVIDIA NIM model> "
|
| 135 |
+
;;
|
| 136 |
+
open_router|openrouter)
|
| 137 |
+
models="$(get_openrouter_models)"
|
| 138 |
+
prompt="Select an OpenRouter model> "
|
| 139 |
+
;;
|
| 140 |
+
lmstudio|lm_studio|lm-studio)
|
| 141 |
+
models="$(get_lmstudio_models)"
|
| 142 |
+
prompt="Select an LM Studio model> "
|
| 143 |
+
;;
|
| 144 |
+
llamacpp|llama.cpp)
|
| 145 |
+
# llama.cpp doesn't have a standardized /models endpoint that returns all loaded models reliably
|
| 146 |
+
# in the same way, but it does support Anthropic routing. We can use a stub model or query if available.
|
| 147 |
+
# For simple picker, we'll just allow passing a default or typing it in, but to match fzf we offer a stub.
|
| 148 |
+
models="local-model\nllama-server"
|
| 149 |
+
prompt="Select a llama.cpp model> "
|
| 150 |
+
;;
|
| 151 |
+
*)
|
| 152 |
+
echo "Error: Unsupported PROVIDER_TYPE='$provider'." >&2
|
| 153 |
+
echo "Expected one of: nvidia_nim, open_router, lmstudio, llamacpp" >&2
|
| 154 |
+
exit 1
|
| 155 |
+
;;
|
| 156 |
+
esac
|
| 157 |
+
|
| 158 |
+
models="$(printf "%s\n" "$models" | sed '/^[[:space:]]*$/d' | sort -u)"
|
| 159 |
+
if [[ -z "$models" ]]; then
|
| 160 |
+
echo "Error: No models found for provider '$provider'." >&2
|
| 161 |
+
exit 1
|
| 162 |
+
fi
|
| 163 |
+
|
| 164 |
+
model="$(printf "%s\n" "$models" | fzf --prompt="$prompt" --height=40% --reverse)"
|
| 165 |
+
|
| 166 |
+
if [[ -z "${model:-}" ]]; then
|
| 167 |
+
echo "No model selected." >&2
|
| 168 |
+
exit 1
|
| 169 |
+
fi
|
| 170 |
+
|
| 171 |
+
# Read auth token from .env or environment
|
| 172 |
+
auth_token="${ANTHROPIC_AUTH_TOKEN:-$(read_env_value ANTHROPIC_AUTH_TOKEN)}"
|
| 173 |
+
if [[ -z "$auth_token" ]]; then
|
| 174 |
+
auth_token="freecc"
|
| 175 |
+
fi
|
| 176 |
+
|
| 177 |
+
# If auth_token doesn't contain a colon, append ":$model"
|
| 178 |
+
if [[ "$auth_token" != *:* ]]; then
|
| 179 |
+
auth_token="$auth_token:$model"
|
| 180 |
+
fi
|
| 181 |
+
|
| 182 |
+
echo "Launching Claude with provider: $provider, model: $model" >&2
|
| 183 |
+
ANTHROPIC_AUTH_TOKEN="$auth_token" ANTHROPIC_BASE_URL="$BASE_URL" exec claude "$@"
|
Claude_Code/cli/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI integration for Claude Code."""
|
| 2 |
+
|
| 3 |
+
from .manager import CLISessionManager
|
| 4 |
+
from .session import CLISession
|
| 5 |
+
|
| 6 |
+
__all__ = ["CLISession", "CLISessionManager"]
|
Claude_Code/cli/entrypoints.py
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI entry points for the installed package."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def serve() -> None:
|
| 7 |
+
"""Start the FastAPI server (registered as `free-claude-code` script)."""
|
| 8 |
+
import uvicorn
|
| 9 |
+
|
| 10 |
+
from cli.process_registry import kill_all_best_effort
|
| 11 |
+
from config.settings import get_settings
|
| 12 |
+
|
| 13 |
+
settings = get_settings()
|
| 14 |
+
try:
|
| 15 |
+
uvicorn.run(
|
| 16 |
+
"api.app:app",
|
| 17 |
+
host=settings.host,
|
| 18 |
+
port=settings.port,
|
| 19 |
+
log_level="debug",
|
| 20 |
+
timeout_graceful_shutdown=5,
|
| 21 |
+
)
|
| 22 |
+
finally:
|
| 23 |
+
kill_all_best_effort()
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def init() -> None:
|
| 27 |
+
"""Scaffold config at ~/.config/free-claude-code/.env (registered as `fcc-init`)."""
|
| 28 |
+
import importlib.resources
|
| 29 |
+
from pathlib import Path
|
| 30 |
+
|
| 31 |
+
config_dir = Path.home() / ".config" / "free-claude-code"
|
| 32 |
+
env_file = config_dir / ".env"
|
| 33 |
+
|
| 34 |
+
if env_file.exists():
|
| 35 |
+
print(f"Config already exists at {env_file}")
|
| 36 |
+
print("Delete it first if you want to reset to defaults.")
|
| 37 |
+
return
|
| 38 |
+
|
| 39 |
+
config_dir.mkdir(parents=True, exist_ok=True)
|
| 40 |
+
template = (
|
| 41 |
+
importlib.resources.files("config").joinpath("env.example").read_text("utf-8")
|
| 42 |
+
)
|
| 43 |
+
env_file.write_text(template, encoding="utf-8")
|
| 44 |
+
print(f"Config created at {env_file}")
|
| 45 |
+
print(
|
| 46 |
+
"Edit it to set your API keys and model preferences, then run: free-claude-code"
|
| 47 |
+
)
|
Claude_Code/cli/manager.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CLI Session Manager for Multi-Instance Claude CLI Support
|
| 3 |
+
|
| 4 |
+
Manages a pool of CLISession instances, each handling one conversation.
|
| 5 |
+
This enables true parallel processing where multiple conversations run
|
| 6 |
+
simultaneously in separate CLI processes.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import asyncio
|
| 10 |
+
import uuid
|
| 11 |
+
|
| 12 |
+
from loguru import logger
|
| 13 |
+
|
| 14 |
+
from .session import CLISession
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class CLISessionManager:
|
| 18 |
+
"""
|
| 19 |
+
Manages multiple CLISession instances for parallel conversation processing.
|
| 20 |
+
|
| 21 |
+
Each new conversation gets its own CLISession with its own subprocess.
|
| 22 |
+
Replies to existing conversations reuse the same CLISession instance.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(
|
| 26 |
+
self,
|
| 27 |
+
workspace_path: str,
|
| 28 |
+
api_url: str,
|
| 29 |
+
allowed_dirs: list[str] | None = None,
|
| 30 |
+
plans_directory: str | None = None,
|
| 31 |
+
):
|
| 32 |
+
"""
|
| 33 |
+
Initialize the session manager.
|
| 34 |
+
|
| 35 |
+
Args:
|
| 36 |
+
workspace_path: Working directory for CLI processes
|
| 37 |
+
api_url: API URL for the proxy
|
| 38 |
+
allowed_dirs: Directories the CLI is allowed to access
|
| 39 |
+
plans_directory: Directory for Claude Code CLI plan files (passed via --settings)
|
| 40 |
+
"""
|
| 41 |
+
self.workspace = workspace_path
|
| 42 |
+
self.api_url = api_url
|
| 43 |
+
self.allowed_dirs = allowed_dirs or []
|
| 44 |
+
self.plans_directory = plans_directory
|
| 45 |
+
|
| 46 |
+
self._sessions: dict[str, CLISession] = {}
|
| 47 |
+
self._pending_sessions: dict[str, CLISession] = {}
|
| 48 |
+
self._temp_to_real: dict[str, str] = {}
|
| 49 |
+
self._real_to_temp: dict[str, str] = {}
|
| 50 |
+
self._lock = asyncio.Lock()
|
| 51 |
+
|
| 52 |
+
logger.info("CLISessionManager initialized")
|
| 53 |
+
|
| 54 |
+
async def get_or_create_session(
|
| 55 |
+
self, session_id: str | None = None
|
| 56 |
+
) -> tuple[CLISession, str, bool]:
|
| 57 |
+
"""
|
| 58 |
+
Get an existing session or create a new one.
|
| 59 |
+
|
| 60 |
+
Returns:
|
| 61 |
+
Tuple of (CLISession instance, session_id, is_new_session)
|
| 62 |
+
"""
|
| 63 |
+
async with self._lock:
|
| 64 |
+
if session_id:
|
| 65 |
+
lookup_id = self._temp_to_real.get(session_id, session_id)
|
| 66 |
+
|
| 67 |
+
if lookup_id in self._sessions:
|
| 68 |
+
return self._sessions[lookup_id], lookup_id, False
|
| 69 |
+
if lookup_id in self._pending_sessions:
|
| 70 |
+
return self._pending_sessions[lookup_id], lookup_id, False
|
| 71 |
+
|
| 72 |
+
temp_id = session_id if session_id else f"pending_{uuid.uuid4().hex[:8]}"
|
| 73 |
+
|
| 74 |
+
new_session = CLISession(
|
| 75 |
+
workspace_path=self.workspace,
|
| 76 |
+
api_url=self.api_url,
|
| 77 |
+
allowed_dirs=self.allowed_dirs,
|
| 78 |
+
plans_directory=self.plans_directory,
|
| 79 |
+
)
|
| 80 |
+
self._pending_sessions[temp_id] = new_session
|
| 81 |
+
logger.info(f"Created new session: {temp_id}")
|
| 82 |
+
|
| 83 |
+
return new_session, temp_id, True
|
| 84 |
+
|
| 85 |
+
async def register_real_session_id(
|
| 86 |
+
self, temp_id: str, real_session_id: str
|
| 87 |
+
) -> bool:
|
| 88 |
+
"""Register the real session ID from CLI output."""
|
| 89 |
+
async with self._lock:
|
| 90 |
+
if temp_id not in self._pending_sessions:
|
| 91 |
+
logger.warning(f"Temp session {temp_id} not found")
|
| 92 |
+
return False
|
| 93 |
+
|
| 94 |
+
session = self._pending_sessions.pop(temp_id)
|
| 95 |
+
self._sessions[real_session_id] = session
|
| 96 |
+
self._temp_to_real[temp_id] = real_session_id
|
| 97 |
+
self._real_to_temp[real_session_id] = temp_id
|
| 98 |
+
|
| 99 |
+
logger.info(f"Registered session: {temp_id} -> {real_session_id}")
|
| 100 |
+
return True
|
| 101 |
+
|
| 102 |
+
async def remove_session(self, session_id: str) -> bool:
|
| 103 |
+
"""Remove a session from the manager."""
|
| 104 |
+
async with self._lock:
|
| 105 |
+
if session_id in self._pending_sessions:
|
| 106 |
+
session = self._pending_sessions.pop(session_id)
|
| 107 |
+
await session.stop()
|
| 108 |
+
return True
|
| 109 |
+
|
| 110 |
+
if session_id in self._sessions:
|
| 111 |
+
session = self._sessions.pop(session_id)
|
| 112 |
+
await session.stop()
|
| 113 |
+
temp_id = self._real_to_temp.pop(session_id, None)
|
| 114 |
+
if temp_id is not None:
|
| 115 |
+
self._temp_to_real.pop(temp_id, None)
|
| 116 |
+
return True
|
| 117 |
+
|
| 118 |
+
return False
|
| 119 |
+
|
| 120 |
+
async def stop_all(self):
|
| 121 |
+
"""Stop all sessions."""
|
| 122 |
+
async with self._lock:
|
| 123 |
+
all_sessions = list(self._sessions.values()) + list(
|
| 124 |
+
self._pending_sessions.values()
|
| 125 |
+
)
|
| 126 |
+
for session in all_sessions:
|
| 127 |
+
try:
|
| 128 |
+
await session.stop()
|
| 129 |
+
except Exception as e:
|
| 130 |
+
logger.error(f"Error stopping session: {e}")
|
| 131 |
+
|
| 132 |
+
self._sessions.clear()
|
| 133 |
+
self._pending_sessions.clear()
|
| 134 |
+
self._temp_to_real.clear()
|
| 135 |
+
self._real_to_temp.clear()
|
| 136 |
+
logger.info("All sessions stopped")
|
| 137 |
+
|
| 138 |
+
def get_stats(self) -> dict:
|
| 139 |
+
"""Get session statistics."""
|
| 140 |
+
return {
|
| 141 |
+
"active_sessions": len(self._sessions),
|
| 142 |
+
"pending_sessions": len(self._pending_sessions),
|
| 143 |
+
"busy_count": sum(1 for s in self._sessions.values() if s.is_busy),
|
| 144 |
+
}
|
Claude_Code/cli/process_registry.py
ADDED
|
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Track and clean up spawned CLI subprocesses.
|
| 2 |
+
|
| 3 |
+
This is a safety net for cases where the server is interrupted (Ctrl+C) and the
|
| 4 |
+
FastAPI lifespan cleanup doesn't run to completion. We only track processes we
|
| 5 |
+
spawn so we don't accidentally kill unrelated system processes.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from __future__ import annotations
|
| 9 |
+
|
| 10 |
+
import atexit
|
| 11 |
+
import os
|
| 12 |
+
import subprocess
|
| 13 |
+
import threading
|
| 14 |
+
|
| 15 |
+
from loguru import logger
|
| 16 |
+
|
| 17 |
+
_lock = threading.Lock()
|
| 18 |
+
_pids: set[int] = set()
|
| 19 |
+
_atexit_registered = False
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def ensure_atexit_registered() -> None:
|
| 23 |
+
global _atexit_registered
|
| 24 |
+
with _lock:
|
| 25 |
+
if _atexit_registered:
|
| 26 |
+
return
|
| 27 |
+
atexit.register(kill_all_best_effort)
|
| 28 |
+
_atexit_registered = True
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
def register_pid(pid: int) -> None:
|
| 32 |
+
if not pid:
|
| 33 |
+
return
|
| 34 |
+
ensure_atexit_registered()
|
| 35 |
+
with _lock:
|
| 36 |
+
_pids.add(int(pid))
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def unregister_pid(pid: int) -> None:
|
| 40 |
+
if not pid:
|
| 41 |
+
return
|
| 42 |
+
with _lock:
|
| 43 |
+
_pids.discard(int(pid))
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def kill_all_best_effort() -> None:
|
| 47 |
+
"""Kill any still-running registered pids (best-effort)."""
|
| 48 |
+
with _lock:
|
| 49 |
+
pids = list(_pids)
|
| 50 |
+
_pids.clear()
|
| 51 |
+
|
| 52 |
+
if not pids:
|
| 53 |
+
return
|
| 54 |
+
|
| 55 |
+
if os.name == "nt":
|
| 56 |
+
for pid in pids:
|
| 57 |
+
try:
|
| 58 |
+
# /T kills child processes, /F forces termination.
|
| 59 |
+
subprocess.run(
|
| 60 |
+
["taskkill", "/PID", str(pid), "/T", "/F"],
|
| 61 |
+
stdout=subprocess.DEVNULL,
|
| 62 |
+
stderr=subprocess.DEVNULL,
|
| 63 |
+
check=False,
|
| 64 |
+
)
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.debug("process_registry: taskkill failed pid=%s: %s", pid, e)
|
| 67 |
+
return
|
| 68 |
+
|
| 69 |
+
# Best-effort fallback for non-Windows.
|
| 70 |
+
for pid in pids:
|
| 71 |
+
try:
|
| 72 |
+
os.kill(pid, 9)
|
| 73 |
+
except Exception as e:
|
| 74 |
+
logger.debug("process_registry: kill failed pid=%s: %s", pid, e)
|
Claude_Code/cli/session.py
ADDED
|
@@ -0,0 +1,257 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Claude Code CLI session management."""
|
| 2 |
+
|
| 3 |
+
import asyncio
|
| 4 |
+
import json
|
| 5 |
+
import os
|
| 6 |
+
from collections.abc import AsyncGenerator
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
from .process_registry import register_pid, unregister_pid
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class CLISession:
|
| 15 |
+
"""Manages a single persistent Claude Code CLI subprocess."""
|
| 16 |
+
|
| 17 |
+
def __init__(
|
| 18 |
+
self,
|
| 19 |
+
workspace_path: str,
|
| 20 |
+
api_url: str,
|
| 21 |
+
allowed_dirs: list[str] | None = None,
|
| 22 |
+
plans_directory: str | None = None,
|
| 23 |
+
):
|
| 24 |
+
self.workspace = os.path.normpath(os.path.abspath(workspace_path))
|
| 25 |
+
self.api_url = api_url
|
| 26 |
+
self.allowed_dirs = [os.path.normpath(d) for d in (allowed_dirs or [])]
|
| 27 |
+
self.plans_directory = plans_directory
|
| 28 |
+
self.process: asyncio.subprocess.Process | None = None
|
| 29 |
+
self.current_session_id: str | None = None
|
| 30 |
+
self._is_busy = False
|
| 31 |
+
self._cli_lock = asyncio.Lock()
|
| 32 |
+
|
| 33 |
+
@property
|
| 34 |
+
def is_busy(self) -> bool:
|
| 35 |
+
"""Check if a task is currently running."""
|
| 36 |
+
return self._is_busy
|
| 37 |
+
|
| 38 |
+
async def start_task(
|
| 39 |
+
self, prompt: str, session_id: str | None = None, fork_session: bool = False
|
| 40 |
+
) -> AsyncGenerator[dict]:
|
| 41 |
+
"""
|
| 42 |
+
Start a new task or continue an existing session.
|
| 43 |
+
|
| 44 |
+
Args:
|
| 45 |
+
prompt: The user's message/prompt
|
| 46 |
+
session_id: Optional session ID to resume
|
| 47 |
+
|
| 48 |
+
Yields:
|
| 49 |
+
Event dictionaries from the CLI
|
| 50 |
+
"""
|
| 51 |
+
async with self._cli_lock:
|
| 52 |
+
self._is_busy = True
|
| 53 |
+
env = os.environ.copy()
|
| 54 |
+
|
| 55 |
+
if "ANTHROPIC_API_KEY" not in env:
|
| 56 |
+
env["ANTHROPIC_API_KEY"] = "sk-placeholder-key-for-proxy"
|
| 57 |
+
|
| 58 |
+
env["ANTHROPIC_API_URL"] = self.api_url
|
| 59 |
+
if self.api_url.endswith("/v1"):
|
| 60 |
+
env["ANTHROPIC_BASE_URL"] = self.api_url[:-3]
|
| 61 |
+
else:
|
| 62 |
+
env["ANTHROPIC_BASE_URL"] = self.api_url
|
| 63 |
+
|
| 64 |
+
env["TERM"] = "dumb"
|
| 65 |
+
env["PYTHONIOENCODING"] = "utf-8"
|
| 66 |
+
|
| 67 |
+
# Build command
|
| 68 |
+
if session_id and not session_id.startswith("pending_"):
|
| 69 |
+
cmd = [
|
| 70 |
+
"claude",
|
| 71 |
+
"--resume",
|
| 72 |
+
session_id,
|
| 73 |
+
]
|
| 74 |
+
if fork_session:
|
| 75 |
+
cmd.append("--fork-session")
|
| 76 |
+
cmd += [
|
| 77 |
+
"-p",
|
| 78 |
+
prompt,
|
| 79 |
+
"--output-format",
|
| 80 |
+
"stream-json",
|
| 81 |
+
"--dangerously-skip-permissions",
|
| 82 |
+
"--verbose",
|
| 83 |
+
]
|
| 84 |
+
logger.info(f"Resuming Claude session {session_id}")
|
| 85 |
+
else:
|
| 86 |
+
cmd = [
|
| 87 |
+
"claude",
|
| 88 |
+
"-p",
|
| 89 |
+
prompt,
|
| 90 |
+
"--output-format",
|
| 91 |
+
"stream-json",
|
| 92 |
+
"--dangerously-skip-permissions",
|
| 93 |
+
"--verbose",
|
| 94 |
+
]
|
| 95 |
+
logger.info("Starting new Claude session")
|
| 96 |
+
|
| 97 |
+
if self.allowed_dirs:
|
| 98 |
+
for d in self.allowed_dirs:
|
| 99 |
+
cmd.extend(["--add-dir", d])
|
| 100 |
+
|
| 101 |
+
if self.plans_directory is not None:
|
| 102 |
+
settings_json = json.dumps({"plansDirectory": self.plans_directory})
|
| 103 |
+
cmd.extend(["--settings", settings_json])
|
| 104 |
+
|
| 105 |
+
try:
|
| 106 |
+
self.process = await asyncio.create_subprocess_exec(
|
| 107 |
+
*cmd,
|
| 108 |
+
stdout=asyncio.subprocess.PIPE,
|
| 109 |
+
stderr=asyncio.subprocess.PIPE,
|
| 110 |
+
cwd=self.workspace,
|
| 111 |
+
env=env,
|
| 112 |
+
)
|
| 113 |
+
if self.process and self.process.pid:
|
| 114 |
+
register_pid(self.process.pid)
|
| 115 |
+
|
| 116 |
+
if not self.process or not self.process.stdout:
|
| 117 |
+
yield {"type": "exit", "code": 1}
|
| 118 |
+
return
|
| 119 |
+
|
| 120 |
+
session_id_extracted = False
|
| 121 |
+
buffer = bytearray()
|
| 122 |
+
|
| 123 |
+
try:
|
| 124 |
+
while True:
|
| 125 |
+
chunk = await self.process.stdout.read(65536)
|
| 126 |
+
if not chunk:
|
| 127 |
+
if buffer:
|
| 128 |
+
line_str = buffer.decode(
|
| 129 |
+
"utf-8", errors="replace"
|
| 130 |
+
).strip()
|
| 131 |
+
if line_str:
|
| 132 |
+
async for event in self._handle_line_gen(
|
| 133 |
+
line_str, session_id_extracted
|
| 134 |
+
):
|
| 135 |
+
if event.get("type") == "session_info":
|
| 136 |
+
session_id_extracted = True
|
| 137 |
+
yield event
|
| 138 |
+
break
|
| 139 |
+
|
| 140 |
+
buffer.extend(chunk)
|
| 141 |
+
|
| 142 |
+
while True:
|
| 143 |
+
newline_pos = buffer.find(b"\n")
|
| 144 |
+
if newline_pos == -1:
|
| 145 |
+
break
|
| 146 |
+
|
| 147 |
+
line = buffer[:newline_pos]
|
| 148 |
+
buffer = buffer[newline_pos + 1 :]
|
| 149 |
+
|
| 150 |
+
line_str = line.decode("utf-8", errors="replace").strip()
|
| 151 |
+
if line_str:
|
| 152 |
+
async for event in self._handle_line_gen(
|
| 153 |
+
line_str, session_id_extracted
|
| 154 |
+
):
|
| 155 |
+
if event.get("type") == "session_info":
|
| 156 |
+
session_id_extracted = True
|
| 157 |
+
yield event
|
| 158 |
+
except asyncio.CancelledError:
|
| 159 |
+
# Cancelling the handler task should not leave a Claude CLI
|
| 160 |
+
# subprocess running in the background.
|
| 161 |
+
try:
|
| 162 |
+
await asyncio.shield(self.stop())
|
| 163 |
+
finally:
|
| 164 |
+
raise
|
| 165 |
+
|
| 166 |
+
stderr_text = None
|
| 167 |
+
if self.process.stderr:
|
| 168 |
+
stderr_output = await self.process.stderr.read()
|
| 169 |
+
if stderr_output:
|
| 170 |
+
stderr_text = stderr_output.decode(
|
| 171 |
+
"utf-8", errors="replace"
|
| 172 |
+
).strip()
|
| 173 |
+
logger.error(f"Claude CLI Stderr: {stderr_text}")
|
| 174 |
+
# Yield stderr as error event so it shows in UI
|
| 175 |
+
if stderr_text:
|
| 176 |
+
logger.info("CLI_SESSION: Yielding error event from stderr")
|
| 177 |
+
yield {"type": "error", "error": {"message": stderr_text}}
|
| 178 |
+
|
| 179 |
+
return_code = await self.process.wait()
|
| 180 |
+
logger.info(
|
| 181 |
+
f"Claude CLI exited with code {return_code}, stderr_present={bool(stderr_text)}"
|
| 182 |
+
)
|
| 183 |
+
if return_code != 0 and not stderr_text:
|
| 184 |
+
logger.warning(
|
| 185 |
+
f"CLI_SESSION: Process exited with code {return_code} but no stderr captured"
|
| 186 |
+
)
|
| 187 |
+
yield {
|
| 188 |
+
"type": "exit",
|
| 189 |
+
"code": return_code,
|
| 190 |
+
"stderr": stderr_text,
|
| 191 |
+
}
|
| 192 |
+
finally:
|
| 193 |
+
self._is_busy = False
|
| 194 |
+
if self.process and self.process.pid:
|
| 195 |
+
unregister_pid(self.process.pid)
|
| 196 |
+
|
| 197 |
+
async def _handle_line_gen(
|
| 198 |
+
self, line_str: str, session_id_extracted: bool
|
| 199 |
+
) -> AsyncGenerator[dict]:
|
| 200 |
+
"""Process a single line and yield events."""
|
| 201 |
+
try:
|
| 202 |
+
event = json.loads(line_str)
|
| 203 |
+
if not session_id_extracted:
|
| 204 |
+
extracted_id = self._extract_session_id(event)
|
| 205 |
+
if extracted_id:
|
| 206 |
+
self.current_session_id = extracted_id
|
| 207 |
+
logger.info(f"Extracted session ID: {extracted_id}")
|
| 208 |
+
yield {"type": "session_info", "session_id": extracted_id}
|
| 209 |
+
|
| 210 |
+
yield event
|
| 211 |
+
except json.JSONDecodeError:
|
| 212 |
+
logger.debug(f"Non-JSON output: {line_str}")
|
| 213 |
+
yield {"type": "raw", "content": line_str}
|
| 214 |
+
|
| 215 |
+
def _extract_session_id(self, event: Any) -> str | None:
|
| 216 |
+
"""Extract session ID from CLI event."""
|
| 217 |
+
if not isinstance(event, dict):
|
| 218 |
+
return None
|
| 219 |
+
|
| 220 |
+
if "session_id" in event:
|
| 221 |
+
return event["session_id"]
|
| 222 |
+
if "sessionId" in event:
|
| 223 |
+
return event["sessionId"]
|
| 224 |
+
|
| 225 |
+
for key in ["init", "system", "result", "metadata"]:
|
| 226 |
+
if key in event and isinstance(event[key], dict):
|
| 227 |
+
nested = event[key]
|
| 228 |
+
if "session_id" in nested:
|
| 229 |
+
return nested["session_id"]
|
| 230 |
+
if "sessionId" in nested:
|
| 231 |
+
return nested["sessionId"]
|
| 232 |
+
|
| 233 |
+
if "conversation" in event and isinstance(event["conversation"], dict):
|
| 234 |
+
conv = event["conversation"]
|
| 235 |
+
if "id" in conv:
|
| 236 |
+
return conv["id"]
|
| 237 |
+
|
| 238 |
+
return None
|
| 239 |
+
|
| 240 |
+
async def stop(self):
|
| 241 |
+
"""Stop the CLI process."""
|
| 242 |
+
if self.process and self.process.returncode is None:
|
| 243 |
+
try:
|
| 244 |
+
logger.info(f"Stopping Claude CLI process {self.process.pid}")
|
| 245 |
+
self.process.terminate()
|
| 246 |
+
try:
|
| 247 |
+
await asyncio.wait_for(self.process.wait(), timeout=5.0)
|
| 248 |
+
except TimeoutError:
|
| 249 |
+
self.process.kill()
|
| 250 |
+
await self.process.wait()
|
| 251 |
+
if self.process and self.process.pid:
|
| 252 |
+
unregister_pid(self.process.pid)
|
| 253 |
+
return True
|
| 254 |
+
except Exception as e:
|
| 255 |
+
logger.error(f"Error stopping process: {e}")
|
| 256 |
+
return False
|
| 257 |
+
return False
|
Claude_Code/config/__init__.py
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Configuration management."""
|
| 2 |
+
|
| 3 |
+
from .settings import Settings, get_settings
|
| 4 |
+
|
| 5 |
+
__all__ = ["Settings", "get_settings"]
|
Claude_Code/config/env.example
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# NVIDIA NIM Config
|
| 2 |
+
NVIDIA_NIM_API_KEY=""
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
# OpenRouter Config
|
| 6 |
+
OPENROUTER_API_KEY=""
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
# LM Studio Config (local provider, no API key required)
|
| 10 |
+
LM_STUDIO_BASE_URL="http://localhost:1234/v1"
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
# All Claude model requests are mapped to these models, plain model is fallback
|
| 14 |
+
# Format: provider_type/model/name
|
| 15 |
+
# Valid providers: "nvidia_nim" | "open_router" | "lmstudio"
|
| 16 |
+
MODEL_OPUS="nvidia_nim/z-ai/glm4.7"
|
| 17 |
+
MODEL_SONNET="open_router/arcee-ai/trinity-large-preview:free"
|
| 18 |
+
MODEL_HAIKU="open_router/stepfun/step-3.5-flash:free"
|
| 19 |
+
MODEL="nvidia_nim/z-ai/glm4.7"
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Provider config
|
| 23 |
+
PROVIDER_RATE_LIMIT=40
|
| 24 |
+
PROVIDER_RATE_WINDOW=60
|
| 25 |
+
PROVIDER_MAX_CONCURRENCY=5
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
# HTTP client timeouts (seconds) for provider API requests
|
| 29 |
+
HTTP_READ_TIMEOUT=120
|
| 30 |
+
HTTP_WRITE_TIMEOUT=10
|
| 31 |
+
HTTP_CONNECT_TIMEOUT=2
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Messaging Platform: "telegram" | "discord"
|
| 35 |
+
MESSAGING_PLATFORM="discord"
|
| 36 |
+
MESSAGING_RATE_LIMIT=1
|
| 37 |
+
MESSAGING_RATE_WINDOW=1
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
# Voice Note Transcription
|
| 41 |
+
VOICE_NOTE_ENABLED=false
|
| 42 |
+
# WHISPER_DEVICE: "cpu" | "cuda" | "nvidia_nim"
|
| 43 |
+
# - "cpu"/"cuda": Hugging Face transformers Whisper (offline, free; install with: uv sync --extra voice_local)
|
| 44 |
+
# - "nvidia_nim": NVIDIA NIM Whisper via Riva gRPC (requires NVIDIA_NIM_API_KEY; install with: uv sync --extra voice)
|
| 45 |
+
WHISPER_DEVICE="nvidia_nim"
|
| 46 |
+
# WHISPER_MODEL:
|
| 47 |
+
# - For cpu/cuda: Hugging Face ID or short name (tiny, base, small, medium, large-v2, large-v3, large-v3-turbo)
|
| 48 |
+
# - For nvidia_nim: NVIDIA NIM model (e.g., "nvidia/parakeet-ctc-1.1b-asr", "openai/whisper-large-v3")
|
| 49 |
+
# - For nvidia_nim, default to "openai/whisper-large-v3" for best performance
|
| 50 |
+
WHISPER_MODEL="openai/whisper-large-v3"
|
| 51 |
+
HF_TOKEN=""
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# Telegram Config
|
| 55 |
+
TELEGRAM_BOT_TOKEN=""
|
| 56 |
+
ALLOWED_TELEGRAM_USER_ID=""
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
# Discord Config
|
| 60 |
+
DISCORD_BOT_TOKEN=""
|
| 61 |
+
ALLOWED_DISCORD_CHANNELS=""
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# Agent Config
|
| 65 |
+
CLAUDE_WORKSPACE="./agent_workspace"
|
| 66 |
+
ALLOWED_DIR=""
|
| 67 |
+
FAST_PREFIX_DETECTION=true
|
| 68 |
+
ENABLE_NETWORK_PROBE_MOCK=true
|
| 69 |
+
ENABLE_TITLE_GENERATION_SKIP=true
|
| 70 |
+
ENABLE_SUGGESTION_MODE_SKIP=true
|
| 71 |
+
ENABLE_FILEPATH_EXTRACTION_MOCK=true
|
Claude_Code/config/logging_config.py
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Loguru-based structured logging configuration.
|
| 2 |
+
|
| 3 |
+
All logs are written to server.log as JSON lines for full traceability.
|
| 4 |
+
Stdlib logging is intercepted and funneled to loguru.
|
| 5 |
+
Context vars (request_id, node_id, chat_id) from contextualize() are
|
| 6 |
+
included at top level for easy grep/filter.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import json
|
| 10 |
+
import logging
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
from loguru import logger
|
| 14 |
+
|
| 15 |
+
_configured = False
|
| 16 |
+
|
| 17 |
+
# Context keys we promote to top-level JSON for traceability
|
| 18 |
+
_CONTEXT_KEYS = ("request_id", "node_id", "chat_id")
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def _serialize_with_context(record) -> str:
|
| 22 |
+
"""Format record as JSON with context vars at top level.
|
| 23 |
+
Returns a format template; we inject _json into record for output.
|
| 24 |
+
"""
|
| 25 |
+
extra = record.get("extra", {})
|
| 26 |
+
out = {
|
| 27 |
+
"time": str(record["time"]),
|
| 28 |
+
"level": record["level"].name,
|
| 29 |
+
"message": record["message"],
|
| 30 |
+
"module": record["name"],
|
| 31 |
+
"function": record["function"],
|
| 32 |
+
"line": record["line"],
|
| 33 |
+
}
|
| 34 |
+
for key in _CONTEXT_KEYS:
|
| 35 |
+
if key in extra and extra[key] is not None:
|
| 36 |
+
out[key] = extra[key]
|
| 37 |
+
record["_json"] = json.dumps(out, default=str)
|
| 38 |
+
return "{_json}\n"
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
class InterceptHandler(logging.Handler):
|
| 42 |
+
"""Redirect stdlib logging to loguru."""
|
| 43 |
+
|
| 44 |
+
def emit(self, record: logging.LogRecord) -> None:
|
| 45 |
+
try:
|
| 46 |
+
level = logger.level(record.levelname).name
|
| 47 |
+
except ValueError:
|
| 48 |
+
level = record.levelno
|
| 49 |
+
|
| 50 |
+
frame, depth = logging.currentframe(), 2
|
| 51 |
+
while frame is not None and frame.f_code.co_filename == logging.__file__:
|
| 52 |
+
frame = frame.f_back
|
| 53 |
+
depth += 1
|
| 54 |
+
|
| 55 |
+
logger.opt(depth=depth, exception=record.exc_info).log(
|
| 56 |
+
level, record.getMessage()
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def configure_logging(log_file: str, *, force: bool = False) -> None:
|
| 61 |
+
"""Configure loguru with JSON output to log_file and intercept stdlib logging.
|
| 62 |
+
|
| 63 |
+
Idempotent: skips if already configured (e.g. hot reload).
|
| 64 |
+
Use force=True to reconfigure (e.g. in tests with a different log path).
|
| 65 |
+
"""
|
| 66 |
+
global _configured
|
| 67 |
+
if _configured and not force:
|
| 68 |
+
return
|
| 69 |
+
_configured = True
|
| 70 |
+
|
| 71 |
+
# Remove default loguru handler (writes to stderr)
|
| 72 |
+
logger.remove()
|
| 73 |
+
|
| 74 |
+
# Truncate log file on fresh start for clean debugging
|
| 75 |
+
Path(log_file).write_text("")
|
| 76 |
+
|
| 77 |
+
# Add file sink: JSON lines, DEBUG level, context vars at top level
|
| 78 |
+
logger.add(
|
| 79 |
+
log_file,
|
| 80 |
+
level="DEBUG",
|
| 81 |
+
format=_serialize_with_context,
|
| 82 |
+
encoding="utf-8",
|
| 83 |
+
mode="a",
|
| 84 |
+
rotation="50 MB",
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
# Intercept stdlib logging: route all root logger output to loguru
|
| 88 |
+
intercept = InterceptHandler()
|
| 89 |
+
logging.root.handlers = [intercept]
|
| 90 |
+
logging.root.setLevel(logging.DEBUG)
|
Claude_Code/config/nim.py
ADDED
|
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""NVIDIA NIM settings (fixed values, no env config)."""
|
| 2 |
+
|
| 3 |
+
from pydantic import BaseModel, ConfigDict, Field, field_validator
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class NimSettings(BaseModel):
|
| 7 |
+
"""Fixed NVIDIA NIM settings (not configurable via env)."""
|
| 8 |
+
|
| 9 |
+
temperature: float = Field(1.0, ge=0.0)
|
| 10 |
+
top_p: float = Field(1.0, ge=0.0, le=1.0)
|
| 11 |
+
top_k: int = -1
|
| 12 |
+
max_tokens: int = Field(81920, ge=1)
|
| 13 |
+
presence_penalty: float = Field(0.0, ge=-2.0, le=2.0)
|
| 14 |
+
frequency_penalty: float = Field(0.0, ge=-2.0, le=2.0)
|
| 15 |
+
|
| 16 |
+
min_p: float = Field(0.0, ge=0.0, le=1.0)
|
| 17 |
+
repetition_penalty: float = Field(1.0, ge=0.0)
|
| 18 |
+
|
| 19 |
+
seed: int | None = None
|
| 20 |
+
stop: str | None = None
|
| 21 |
+
|
| 22 |
+
parallel_tool_calls: bool = True
|
| 23 |
+
ignore_eos: bool = False
|
| 24 |
+
enable_thinking: bool = False
|
| 25 |
+
|
| 26 |
+
min_tokens: int = Field(0, ge=0)
|
| 27 |
+
chat_template: str | None = None
|
| 28 |
+
request_id: str | None = None
|
| 29 |
+
|
| 30 |
+
model_config = ConfigDict(extra="forbid")
|
| 31 |
+
|
| 32 |
+
@field_validator("top_k")
|
| 33 |
+
@classmethod
|
| 34 |
+
def validate_top_k(cls, v):
|
| 35 |
+
if v < -1:
|
| 36 |
+
raise ValueError("top_k must be -1 or >= 0")
|
| 37 |
+
return v
|
| 38 |
+
|
| 39 |
+
@field_validator("seed", mode="before")
|
| 40 |
+
@classmethod
|
| 41 |
+
def parse_optional_int(cls, v):
|
| 42 |
+
if v == "" or v is None:
|
| 43 |
+
return None
|
| 44 |
+
return int(v)
|
| 45 |
+
|
| 46 |
+
@field_validator("stop", "chat_template", "request_id", mode="before")
|
| 47 |
+
@classmethod
|
| 48 |
+
def parse_optional_str(cls, v):
|
| 49 |
+
if v == "":
|
| 50 |
+
return None
|
| 51 |
+
return v
|
Claude_Code/config/settings.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Centralized configuration using Pydantic Settings."""
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
from functools import lru_cache
|
| 5 |
+
from pathlib import Path
|
| 6 |
+
|
| 7 |
+
from pydantic import Field, field_validator, model_validator
|
| 8 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
| 9 |
+
|
| 10 |
+
from .nim import NimSettings
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def _env_files() -> tuple[Path, ...]:
|
| 14 |
+
"""Return env file paths in priority order (later overrides earlier)."""
|
| 15 |
+
files: list[Path] = [
|
| 16 |
+
Path.home() / ".config" / "free-claude-code" / ".env",
|
| 17 |
+
Path(".env"),
|
| 18 |
+
]
|
| 19 |
+
if explicit := os.environ.get("FCC_ENV_FILE"):
|
| 20 |
+
files.append(Path(explicit))
|
| 21 |
+
return tuple(files)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class Settings(BaseSettings):
|
| 25 |
+
"""Application settings loaded from environment variables."""
|
| 26 |
+
|
| 27 |
+
# ==================== OpenRouter Config ====================
|
| 28 |
+
open_router_api_key: str = Field(default="", validation_alias="OPENROUTER_API_KEY")
|
| 29 |
+
|
| 30 |
+
# ==================== Messaging Platform Selection ====================
|
| 31 |
+
# Valid: "telegram" | "discord"
|
| 32 |
+
messaging_platform: str = Field(
|
| 33 |
+
default="discord", validation_alias="MESSAGING_PLATFORM"
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
# ==================== NVIDIA NIM Config ====================
|
| 37 |
+
nvidia_nim_api_key: str = ""
|
| 38 |
+
|
| 39 |
+
# ==================== LM Studio Config ====================
|
| 40 |
+
lm_studio_base_url: str = Field(
|
| 41 |
+
default="http://localhost:1234/v1",
|
| 42 |
+
validation_alias="LM_STUDIO_BASE_URL",
|
| 43 |
+
)
|
| 44 |
+
|
| 45 |
+
# ==================== Llama.cpp Config ====================
|
| 46 |
+
llamacpp_base_url: str = Field(
|
| 47 |
+
default="http://localhost:8080/v1",
|
| 48 |
+
validation_alias="LLAMACPP_BASE_URL",
|
| 49 |
+
)
|
| 50 |
+
|
| 51 |
+
# ==================== Model ====================
|
| 52 |
+
# All Claude model requests are mapped to this single model (fallback)
|
| 53 |
+
# Format: provider_type/model/name
|
| 54 |
+
model: str = "nvidia_nim/meta/llama3-70b-instruct"
|
| 55 |
+
|
| 56 |
+
# Per-model overrides (optional, falls back to MODEL)
|
| 57 |
+
# Each can use a different provider
|
| 58 |
+
model_opus: str | None = Field(default=None, validation_alias="MODEL_OPUS")
|
| 59 |
+
model_sonnet: str | None = Field(default=None, validation_alias="MODEL_SONNET")
|
| 60 |
+
model_haiku: str | None = Field(default=None, validation_alias="MODEL_HAIKU")
|
| 61 |
+
|
| 62 |
+
# ==================== Provider Rate Limiting ====================
|
| 63 |
+
provider_rate_limit: int = Field(default=40, validation_alias="PROVIDER_RATE_LIMIT")
|
| 64 |
+
provider_rate_window: int = Field(
|
| 65 |
+
default=60, validation_alias="PROVIDER_RATE_WINDOW"
|
| 66 |
+
)
|
| 67 |
+
provider_max_concurrency: int = Field(
|
| 68 |
+
default=5, validation_alias="PROVIDER_MAX_CONCURRENCY"
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# ==================== HTTP Client Timeouts ====================
|
| 72 |
+
http_read_timeout: float = Field(
|
| 73 |
+
default=300.0, validation_alias="HTTP_READ_TIMEOUT"
|
| 74 |
+
)
|
| 75 |
+
http_write_timeout: float = Field(
|
| 76 |
+
default=10.0, validation_alias="HTTP_WRITE_TIMEOUT"
|
| 77 |
+
)
|
| 78 |
+
http_connect_timeout: float = Field(
|
| 79 |
+
default=2.0, validation_alias="HTTP_CONNECT_TIMEOUT"
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
# ==================== Fast Prefix Detection ====================
|
| 83 |
+
fast_prefix_detection: bool = True
|
| 84 |
+
|
| 85 |
+
# ==================== Optimizations ====================
|
| 86 |
+
enable_network_probe_mock: bool = True
|
| 87 |
+
enable_title_generation_skip: bool = True
|
| 88 |
+
enable_suggestion_mode_skip: bool = True
|
| 89 |
+
enable_filepath_extraction_mock: bool = True
|
| 90 |
+
|
| 91 |
+
# ==================== NIM Settings ====================
|
| 92 |
+
nim: NimSettings = Field(default_factory=NimSettings)
|
| 93 |
+
nim_enable_thinking: bool = Field(
|
| 94 |
+
default=False, validation_alias="NIM_ENABLE_THINKING"
|
| 95 |
+
)
|
| 96 |
+
|
| 97 |
+
# ==================== Voice Note Transcription ====================
|
| 98 |
+
voice_note_enabled: bool = Field(
|
| 99 |
+
default=True, validation_alias="VOICE_NOTE_ENABLED"
|
| 100 |
+
)
|
| 101 |
+
# Device: "cpu" | "cuda" | "nvidia_nim"
|
| 102 |
+
# - "cpu"/"cuda": local Whisper (requires voice_local extra: uv sync --extra voice_local)
|
| 103 |
+
# - "nvidia_nim": NVIDIA NIM Whisper API (requires voice extra: uv sync --extra voice)
|
| 104 |
+
whisper_device: str = Field(default="cpu", validation_alias="WHISPER_DEVICE")
|
| 105 |
+
# Whisper model ID or short name (for local Whisper) or NVIDIA NIM model (for nvidia_nim)
|
| 106 |
+
# Local Whisper: "tiny", "base", "small", "medium", "large-v2", "large-v3", "large-v3-turbo"
|
| 107 |
+
# NVIDIA NIM: "nvidia/parakeet-ctc-1.1b-asr", "openai/whisper-large-v3", etc.
|
| 108 |
+
whisper_model: str = Field(default="base", validation_alias="WHISPER_MODEL")
|
| 109 |
+
# Hugging Face token for faster model downloads (optional, for local Whisper)
|
| 110 |
+
hf_token: str = Field(default="", validation_alias="HF_TOKEN")
|
| 111 |
+
|
| 112 |
+
# ==================== Bot Wrapper Config ====================
|
| 113 |
+
telegram_bot_token: str | None = None
|
| 114 |
+
allowed_telegram_user_id: str | None = None
|
| 115 |
+
discord_bot_token: str | None = Field(
|
| 116 |
+
default=None, validation_alias="DISCORD_BOT_TOKEN"
|
| 117 |
+
)
|
| 118 |
+
allowed_discord_channels: str | None = Field(
|
| 119 |
+
default=None, validation_alias="ALLOWED_DISCORD_CHANNELS"
|
| 120 |
+
)
|
| 121 |
+
claude_workspace: str = "./agent_workspace"
|
| 122 |
+
allowed_dir: str = ""
|
| 123 |
+
|
| 124 |
+
# ==================== Server ====================
|
| 125 |
+
host: str = "0.0.0.0"
|
| 126 |
+
port: int = 8082
|
| 127 |
+
log_file: str = "server.log"
|
| 128 |
+
# Optional server API key to protect endpoints (Anthropic-style)
|
| 129 |
+
# Set via env `ANTHROPIC_AUTH_TOKEN`. When empty, no auth is required.
|
| 130 |
+
anthropic_auth_token: str = Field(
|
| 131 |
+
default="", validation_alias="ANTHROPIC_AUTH_TOKEN"
|
| 132 |
+
)
|
| 133 |
+
|
| 134 |
+
# Handle empty strings for optional string fields
|
| 135 |
+
@field_validator(
|
| 136 |
+
"telegram_bot_token",
|
| 137 |
+
"allowed_telegram_user_id",
|
| 138 |
+
"discord_bot_token",
|
| 139 |
+
"allowed_discord_channels",
|
| 140 |
+
mode="before",
|
| 141 |
+
)
|
| 142 |
+
@classmethod
|
| 143 |
+
def parse_optional_str(cls, v):
|
| 144 |
+
if v == "":
|
| 145 |
+
return None
|
| 146 |
+
return v
|
| 147 |
+
|
| 148 |
+
@field_validator("whisper_device")
|
| 149 |
+
@classmethod
|
| 150 |
+
def validate_whisper_device(cls, v: str) -> str:
|
| 151 |
+
if v not in ("cpu", "cuda", "nvidia_nim"):
|
| 152 |
+
raise ValueError(
|
| 153 |
+
f"whisper_device must be 'cpu', 'cuda', or 'nvidia_nim', got {v!r}"
|
| 154 |
+
)
|
| 155 |
+
return v
|
| 156 |
+
|
| 157 |
+
@field_validator("model", "model_opus", "model_sonnet", "model_haiku")
|
| 158 |
+
@classmethod
|
| 159 |
+
def validate_model_format(cls, v: str | None) -> str | None:
|
| 160 |
+
if v is None:
|
| 161 |
+
return None
|
| 162 |
+
valid_providers = ("nvidia_nim", "open_router", "lmstudio", "llamacpp")
|
| 163 |
+
if "/" not in v:
|
| 164 |
+
raise ValueError(
|
| 165 |
+
f"Model must be prefixed with provider type. "
|
| 166 |
+
f"Valid providers: {', '.join(valid_providers)}. "
|
| 167 |
+
f"Format: provider_type/model/name"
|
| 168 |
+
)
|
| 169 |
+
provider = v.split("/", 1)[0]
|
| 170 |
+
if provider not in valid_providers:
|
| 171 |
+
raise ValueError(
|
| 172 |
+
f"Invalid provider: '{provider}'. "
|
| 173 |
+
f"Supported: 'nvidia_nim', 'open_router', 'lmstudio', 'llamacpp'"
|
| 174 |
+
)
|
| 175 |
+
return v
|
| 176 |
+
|
| 177 |
+
@model_validator(mode="after")
|
| 178 |
+
def _inject_nim_thinking(self) -> "Settings":
|
| 179 |
+
self.nim = self.nim.model_copy(
|
| 180 |
+
update={"enable_thinking": self.nim_enable_thinking}
|
| 181 |
+
)
|
| 182 |
+
return self
|
| 183 |
+
|
| 184 |
+
@model_validator(mode="after")
|
| 185 |
+
def check_nvidia_nim_api_key(self) -> "Settings":
|
| 186 |
+
if (
|
| 187 |
+
self.voice_note_enabled
|
| 188 |
+
and self.whisper_device == "nvidia_nim"
|
| 189 |
+
and not self.nvidia_nim_api_key.strip()
|
| 190 |
+
):
|
| 191 |
+
raise ValueError(
|
| 192 |
+
"NVIDIA_NIM_API_KEY is required when WHISPER_DEVICE is 'nvidia_nim'. "
|
| 193 |
+
"Set it in your .env file."
|
| 194 |
+
)
|
| 195 |
+
return self
|
| 196 |
+
|
| 197 |
+
@property
|
| 198 |
+
def provider_type(self) -> str:
|
| 199 |
+
"""Extract provider type from the default model string."""
|
| 200 |
+
return self.model.split("/", 1)[0]
|
| 201 |
+
|
| 202 |
+
@property
|
| 203 |
+
def model_name(self) -> str:
|
| 204 |
+
"""Extract the actual model name from the default model string."""
|
| 205 |
+
return self.model.split("/", 1)[1]
|
| 206 |
+
|
| 207 |
+
def resolve_model(self, claude_model_name: str) -> str:
|
| 208 |
+
"""Resolve a Claude model name to the configured provider/model string.
|
| 209 |
+
|
| 210 |
+
Classifies the incoming Claude model (opus/sonnet/haiku) and
|
| 211 |
+
returns the model-specific override if configured, otherwise the fallback MODEL.
|
| 212 |
+
"""
|
| 213 |
+
name_lower = claude_model_name.lower()
|
| 214 |
+
if "opus" in name_lower and self.model_opus is not None:
|
| 215 |
+
return self.model_opus
|
| 216 |
+
if "haiku" in name_lower and self.model_haiku is not None:
|
| 217 |
+
return self.model_haiku
|
| 218 |
+
if "sonnet" in name_lower and self.model_sonnet is not None:
|
| 219 |
+
return self.model_sonnet
|
| 220 |
+
return self.model
|
| 221 |
+
|
| 222 |
+
@staticmethod
|
| 223 |
+
def parse_provider_type(model_string: str) -> str:
|
| 224 |
+
"""Extract provider type from any 'provider/model' string."""
|
| 225 |
+
return model_string.split("/", 1)[0]
|
| 226 |
+
|
| 227 |
+
@staticmethod
|
| 228 |
+
def parse_model_name(model_string: str) -> str:
|
| 229 |
+
"""Extract model name from any 'provider/model' string."""
|
| 230 |
+
return model_string.split("/", 1)[1]
|
| 231 |
+
|
| 232 |
+
model_config = SettingsConfigDict(
|
| 233 |
+
env_file=_env_files(),
|
| 234 |
+
env_file_encoding="utf-8",
|
| 235 |
+
extra="ignore",
|
| 236 |
+
)
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
@lru_cache
|
| 240 |
+
def get_settings() -> Settings:
|
| 241 |
+
"""Get cached settings instance."""
|
| 242 |
+
return Settings()
|
Claude_Code/messaging/__init__.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Platform-agnostic messaging layer."""
|
| 2 |
+
|
| 3 |
+
from .event_parser import parse_cli_event
|
| 4 |
+
from .handler import ClaudeMessageHandler
|
| 5 |
+
from .models import IncomingMessage
|
| 6 |
+
from .platforms.base import CLISession, MessagingPlatform, SessionManagerInterface
|
| 7 |
+
from .session import SessionStore
|
| 8 |
+
from .trees.data import MessageNode, MessageState, MessageTree
|
| 9 |
+
from .trees.queue_manager import TreeQueueManager
|
| 10 |
+
|
| 11 |
+
__all__ = [
|
| 12 |
+
"CLISession",
|
| 13 |
+
"ClaudeMessageHandler",
|
| 14 |
+
"IncomingMessage",
|
| 15 |
+
"MessageNode",
|
| 16 |
+
"MessageState",
|
| 17 |
+
"MessageTree",
|
| 18 |
+
"MessagingPlatform",
|
| 19 |
+
"SessionManagerInterface",
|
| 20 |
+
"SessionStore",
|
| 21 |
+
"TreeQueueManager",
|
| 22 |
+
"parse_cli_event",
|
| 23 |
+
]
|
Claude_Code/messaging/commands.py
ADDED
|
@@ -0,0 +1,283 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Command handlers for messaging platform commands (/stop, /stats, /clear).
|
| 2 |
+
|
| 3 |
+
Extracted from ClaudeMessageHandler to keep handler.py focused on
|
| 4 |
+
core message processing logic.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from __future__ import annotations
|
| 8 |
+
|
| 9 |
+
from typing import TYPE_CHECKING
|
| 10 |
+
|
| 11 |
+
from loguru import logger
|
| 12 |
+
|
| 13 |
+
if TYPE_CHECKING:
|
| 14 |
+
from messaging.handler import ClaudeMessageHandler
|
| 15 |
+
from messaging.models import IncomingMessage
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
async def handle_stop_command(
|
| 19 |
+
handler: ClaudeMessageHandler, incoming: IncomingMessage
|
| 20 |
+
) -> None:
|
| 21 |
+
"""Handle /stop command from messaging platform."""
|
| 22 |
+
# Reply-scoped stop: reply "/stop" to stop only that task.
|
| 23 |
+
if incoming.is_reply() and incoming.reply_to_message_id:
|
| 24 |
+
reply_id = incoming.reply_to_message_id
|
| 25 |
+
tree = handler.tree_queue.get_tree_for_node(reply_id)
|
| 26 |
+
node_id = handler.tree_queue.resolve_parent_node_id(reply_id) if tree else None
|
| 27 |
+
|
| 28 |
+
if not node_id:
|
| 29 |
+
msg_id = await handler.platform.queue_send_message(
|
| 30 |
+
incoming.chat_id,
|
| 31 |
+
handler.format_status(
|
| 32 |
+
"⏹", "Stopped.", "Nothing to stop for that message."
|
| 33 |
+
),
|
| 34 |
+
fire_and_forget=False,
|
| 35 |
+
message_thread_id=incoming.message_thread_id,
|
| 36 |
+
)
|
| 37 |
+
handler.record_outgoing_message(
|
| 38 |
+
incoming.platform, incoming.chat_id, msg_id, "command"
|
| 39 |
+
)
|
| 40 |
+
return
|
| 41 |
+
|
| 42 |
+
count = await handler.stop_task(node_id)
|
| 43 |
+
noun = "request" if count == 1 else "requests"
|
| 44 |
+
msg_id = await handler.platform.queue_send_message(
|
| 45 |
+
incoming.chat_id,
|
| 46 |
+
handler.format_status("⏹", "Stopped.", f"Cancelled {count} {noun}."),
|
| 47 |
+
fire_and_forget=False,
|
| 48 |
+
message_thread_id=incoming.message_thread_id,
|
| 49 |
+
)
|
| 50 |
+
handler.record_outgoing_message(
|
| 51 |
+
incoming.platform, incoming.chat_id, msg_id, "command"
|
| 52 |
+
)
|
| 53 |
+
return
|
| 54 |
+
|
| 55 |
+
# Global stop: legacy behavior (stop everything)
|
| 56 |
+
count = await handler.stop_all_tasks()
|
| 57 |
+
msg_id = await handler.platform.queue_send_message(
|
| 58 |
+
incoming.chat_id,
|
| 59 |
+
handler.format_status(
|
| 60 |
+
"⏹", "Stopped.", f"Cancelled {count} pending or active requests."
|
| 61 |
+
),
|
| 62 |
+
fire_and_forget=False,
|
| 63 |
+
message_thread_id=incoming.message_thread_id,
|
| 64 |
+
)
|
| 65 |
+
handler.record_outgoing_message(
|
| 66 |
+
incoming.platform, incoming.chat_id, msg_id, "command"
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
async def handle_stats_command(
|
| 71 |
+
handler: ClaudeMessageHandler, incoming: IncomingMessage
|
| 72 |
+
) -> None:
|
| 73 |
+
"""Handle /stats command."""
|
| 74 |
+
stats = handler.cli_manager.get_stats()
|
| 75 |
+
tree_count = handler.tree_queue.get_tree_count()
|
| 76 |
+
ctx = handler.get_render_ctx()
|
| 77 |
+
msg_id = await handler.platform.queue_send_message(
|
| 78 |
+
incoming.chat_id,
|
| 79 |
+
"📊 "
|
| 80 |
+
+ ctx.bold("Stats")
|
| 81 |
+
+ "\n"
|
| 82 |
+
+ ctx.escape_text(f"• Active CLI: {stats['active_sessions']}")
|
| 83 |
+
+ "\n"
|
| 84 |
+
+ ctx.escape_text(f"• Message Trees: {tree_count}"),
|
| 85 |
+
fire_and_forget=False,
|
| 86 |
+
message_thread_id=incoming.message_thread_id,
|
| 87 |
+
)
|
| 88 |
+
handler.record_outgoing_message(
|
| 89 |
+
incoming.platform, incoming.chat_id, msg_id, "command"
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
async def _delete_message_ids(
|
| 94 |
+
handler: ClaudeMessageHandler, chat_id: str, msg_ids: set[str]
|
| 95 |
+
) -> None:
|
| 96 |
+
"""Best-effort delete messages by ID. Sorts numeric IDs descending."""
|
| 97 |
+
if not msg_ids:
|
| 98 |
+
return
|
| 99 |
+
|
| 100 |
+
def _as_int(s: str) -> int | None:
|
| 101 |
+
try:
|
| 102 |
+
return int(str(s))
|
| 103 |
+
except Exception:
|
| 104 |
+
return None
|
| 105 |
+
|
| 106 |
+
numeric: list[tuple[int, str]] = []
|
| 107 |
+
non_numeric: list[str] = []
|
| 108 |
+
for mid in msg_ids:
|
| 109 |
+
n = _as_int(mid)
|
| 110 |
+
if n is None:
|
| 111 |
+
non_numeric.append(mid)
|
| 112 |
+
else:
|
| 113 |
+
numeric.append((n, mid))
|
| 114 |
+
numeric.sort(reverse=True)
|
| 115 |
+
ordered = [mid for _, mid in numeric] + non_numeric
|
| 116 |
+
|
| 117 |
+
batch_fn = getattr(handler.platform, "queue_delete_messages", None)
|
| 118 |
+
if callable(batch_fn):
|
| 119 |
+
try:
|
| 120 |
+
CHUNK = 100
|
| 121 |
+
for i in range(0, len(ordered), CHUNK):
|
| 122 |
+
chunk = ordered[i : i + CHUNK]
|
| 123 |
+
await batch_fn(chat_id, chunk, fire_and_forget=False)
|
| 124 |
+
except Exception as e:
|
| 125 |
+
logger.debug(f"Batch delete failed: {type(e).__name__}: {e}")
|
| 126 |
+
else:
|
| 127 |
+
for mid in ordered:
|
| 128 |
+
try:
|
| 129 |
+
await handler.platform.queue_delete_message(
|
| 130 |
+
chat_id, mid, fire_and_forget=False
|
| 131 |
+
)
|
| 132 |
+
except Exception as e:
|
| 133 |
+
logger.debug(f"Delete failed for msg {mid}: {type(e).__name__}: {e}")
|
| 134 |
+
|
| 135 |
+
|
| 136 |
+
async def _handle_clear_branch(
|
| 137 |
+
handler: ClaudeMessageHandler,
|
| 138 |
+
incoming: IncomingMessage,
|
| 139 |
+
branch_root_id: str,
|
| 140 |
+
) -> None:
|
| 141 |
+
"""
|
| 142 |
+
Clear a branch (replied-to node + all descendants).
|
| 143 |
+
|
| 144 |
+
Order: cancel tasks, delete messages, remove branch, update session store.
|
| 145 |
+
"""
|
| 146 |
+
tree = handler.tree_queue.get_tree_for_node(branch_root_id)
|
| 147 |
+
if not tree:
|
| 148 |
+
return
|
| 149 |
+
|
| 150 |
+
# 1) Cancel branch tasks (no stop_all)
|
| 151 |
+
cancelled = await handler.tree_queue.cancel_branch(branch_root_id)
|
| 152 |
+
handler.update_cancelled_nodes_ui(cancelled)
|
| 153 |
+
|
| 154 |
+
# 2) Collect message IDs from branch nodes only
|
| 155 |
+
msg_ids: set[str] = set()
|
| 156 |
+
branch_ids = tree.get_descendants(branch_root_id)
|
| 157 |
+
for nid in branch_ids:
|
| 158 |
+
node = tree.get_node(nid)
|
| 159 |
+
if node:
|
| 160 |
+
if node.incoming.message_id:
|
| 161 |
+
msg_ids.add(str(node.incoming.message_id))
|
| 162 |
+
if node.status_message_id:
|
| 163 |
+
msg_ids.add(str(node.status_message_id))
|
| 164 |
+
if incoming.message_id:
|
| 165 |
+
msg_ids.add(str(incoming.message_id))
|
| 166 |
+
|
| 167 |
+
# 3) Delete messages (best-effort)
|
| 168 |
+
await _delete_message_ids(handler, incoming.chat_id, msg_ids)
|
| 169 |
+
|
| 170 |
+
# 4) Remove branch from tree
|
| 171 |
+
removed, root_id, removed_entire_tree = await handler.tree_queue.remove_branch(
|
| 172 |
+
branch_root_id
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
# 5) Update session store
|
| 176 |
+
try:
|
| 177 |
+
handler.session_store.remove_node_mappings([n.node_id for n in removed])
|
| 178 |
+
if removed_entire_tree:
|
| 179 |
+
handler.session_store.remove_tree(root_id)
|
| 180 |
+
else:
|
| 181 |
+
updated_tree = handler.tree_queue.get_tree(root_id)
|
| 182 |
+
if updated_tree:
|
| 183 |
+
handler.session_store.save_tree(root_id, updated_tree.to_dict())
|
| 184 |
+
except Exception as e:
|
| 185 |
+
logger.warning(f"Failed to update session store after branch clear: {e}")
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
async def handle_clear_command(
|
| 189 |
+
handler: ClaudeMessageHandler, incoming: IncomingMessage
|
| 190 |
+
) -> None:
|
| 191 |
+
"""
|
| 192 |
+
Handle /clear command.
|
| 193 |
+
|
| 194 |
+
Reply-scoped: reply to a message to clear that branch (node + descendants).
|
| 195 |
+
Standalone: global clear (stop all, delete all chat messages, reset store).
|
| 196 |
+
"""
|
| 197 |
+
from messaging.trees import TreeQueueManager
|
| 198 |
+
|
| 199 |
+
if incoming.is_reply() and incoming.reply_to_message_id:
|
| 200 |
+
reply_id = incoming.reply_to_message_id
|
| 201 |
+
tree = handler.tree_queue.get_tree_for_node(reply_id)
|
| 202 |
+
branch_root_id = (
|
| 203 |
+
handler.tree_queue.resolve_parent_node_id(reply_id) if tree else None
|
| 204 |
+
)
|
| 205 |
+
if not branch_root_id:
|
| 206 |
+
cancel_fn = getattr(handler.platform, "cancel_pending_voice", None)
|
| 207 |
+
if cancel_fn is not None:
|
| 208 |
+
cancelled = await cancel_fn(incoming.chat_id, reply_id)
|
| 209 |
+
if cancelled is not None:
|
| 210 |
+
voice_msg_id, status_msg_id = cancelled
|
| 211 |
+
msg_ids_to_del: set[str] = {voice_msg_id, status_msg_id}
|
| 212 |
+
if incoming.message_id is not None:
|
| 213 |
+
msg_ids_to_del.add(str(incoming.message_id))
|
| 214 |
+
await _delete_message_ids(handler, incoming.chat_id, msg_ids_to_del)
|
| 215 |
+
msg_id = await handler.platform.queue_send_message(
|
| 216 |
+
incoming.chat_id,
|
| 217 |
+
handler.format_status("🗑", "Cleared.", "Voice note cancelled."),
|
| 218 |
+
fire_and_forget=False,
|
| 219 |
+
message_thread_id=incoming.message_thread_id,
|
| 220 |
+
)
|
| 221 |
+
handler.record_outgoing_message(
|
| 222 |
+
incoming.platform, incoming.chat_id, msg_id, "command"
|
| 223 |
+
)
|
| 224 |
+
return
|
| 225 |
+
msg_id = await handler.platform.queue_send_message(
|
| 226 |
+
incoming.chat_id,
|
| 227 |
+
handler.format_status(
|
| 228 |
+
"🗑", "Cleared.", "Nothing to clear for that message."
|
| 229 |
+
),
|
| 230 |
+
fire_and_forget=False,
|
| 231 |
+
message_thread_id=incoming.message_thread_id,
|
| 232 |
+
)
|
| 233 |
+
handler.record_outgoing_message(
|
| 234 |
+
incoming.platform, incoming.chat_id, msg_id, "command"
|
| 235 |
+
)
|
| 236 |
+
return
|
| 237 |
+
await _handle_clear_branch(handler, incoming, branch_root_id)
|
| 238 |
+
return
|
| 239 |
+
|
| 240 |
+
# Global clear
|
| 241 |
+
# 1) Stop tasks first (ensures no more work is running).
|
| 242 |
+
await handler.stop_all_tasks()
|
| 243 |
+
|
| 244 |
+
# 2) Clear chat: best-effort delete messages we can identify.
|
| 245 |
+
msg_ids: set[str] = set()
|
| 246 |
+
|
| 247 |
+
# Add any recorded message IDs for this chat (commands, command replies, etc).
|
| 248 |
+
try:
|
| 249 |
+
for mid in handler.session_store.get_message_ids_for_chat(
|
| 250 |
+
incoming.platform, incoming.chat_id
|
| 251 |
+
):
|
| 252 |
+
if mid is not None:
|
| 253 |
+
msg_ids.add(str(mid))
|
| 254 |
+
except Exception as e:
|
| 255 |
+
logger.debug(f"Failed to read message log for /clear: {e}")
|
| 256 |
+
|
| 257 |
+
try:
|
| 258 |
+
msg_ids.update(
|
| 259 |
+
handler.tree_queue.get_message_ids_for_chat(
|
| 260 |
+
incoming.platform, incoming.chat_id
|
| 261 |
+
)
|
| 262 |
+
)
|
| 263 |
+
except Exception as e:
|
| 264 |
+
logger.warning(f"Failed to gather messages for /clear: {e}")
|
| 265 |
+
|
| 266 |
+
# Also delete the command message itself.
|
| 267 |
+
if incoming.message_id is not None:
|
| 268 |
+
msg_ids.add(str(incoming.message_id))
|
| 269 |
+
|
| 270 |
+
await _delete_message_ids(handler, incoming.chat_id, msg_ids)
|
| 271 |
+
|
| 272 |
+
# 3) Clear persistent state and reset in-memory queue/tree state.
|
| 273 |
+
try:
|
| 274 |
+
handler.session_store.clear_all()
|
| 275 |
+
except Exception as e:
|
| 276 |
+
logger.warning(f"Failed to clear session store: {e}")
|
| 277 |
+
|
| 278 |
+
handler.replace_tree_queue(
|
| 279 |
+
TreeQueueManager(
|
| 280 |
+
queue_update_callback=handler.update_queue_positions,
|
| 281 |
+
node_started_callback=handler.mark_node_processing,
|
| 282 |
+
)
|
| 283 |
+
)
|
Claude_Code/messaging/event_parser.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""CLI event parser for Claude Code CLI output.
|
| 2 |
+
|
| 3 |
+
This parser emits an ordered stream of low-level events suitable for building a
|
| 4 |
+
Claude Code-like transcript in messaging UIs.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def parse_cli_event(event: Any) -> list[dict]:
|
| 13 |
+
"""
|
| 14 |
+
Parse a CLI event and return a structured result.
|
| 15 |
+
|
| 16 |
+
Args:
|
| 17 |
+
event: Raw event dictionary from CLI
|
| 18 |
+
|
| 19 |
+
Returns:
|
| 20 |
+
List of parsed event dicts. Empty list if not recognized.
|
| 21 |
+
"""
|
| 22 |
+
if not isinstance(event, dict):
|
| 23 |
+
return []
|
| 24 |
+
|
| 25 |
+
etype = event.get("type")
|
| 26 |
+
results: list[dict[str, Any]] = []
|
| 27 |
+
|
| 28 |
+
# Some CLI/proxy layers emit "system" events that are not user-visible and
|
| 29 |
+
# carry no transcript content. Ignore them explicitly to avoid noisy logs.
|
| 30 |
+
if etype == "system":
|
| 31 |
+
return []
|
| 32 |
+
|
| 33 |
+
# 1. Handle full messages (assistant/user or result)
|
| 34 |
+
msg_obj = None
|
| 35 |
+
if etype == "assistant" or etype == "user":
|
| 36 |
+
msg_obj = event.get("message")
|
| 37 |
+
elif etype == "result":
|
| 38 |
+
res = event.get("result")
|
| 39 |
+
if isinstance(res, dict):
|
| 40 |
+
msg_obj = res.get("message")
|
| 41 |
+
# Some variants put content directly on the result.
|
| 42 |
+
if not msg_obj and isinstance(res.get("content"), list):
|
| 43 |
+
msg_obj = {"content": res.get("content")}
|
| 44 |
+
if not msg_obj:
|
| 45 |
+
msg_obj = event.get("message")
|
| 46 |
+
# Some variants put content directly on the event.
|
| 47 |
+
if not msg_obj and isinstance(event.get("content"), list):
|
| 48 |
+
msg_obj = {"content": event.get("content")}
|
| 49 |
+
|
| 50 |
+
if msg_obj and isinstance(msg_obj, dict):
|
| 51 |
+
content = msg_obj.get("content", [])
|
| 52 |
+
if isinstance(content, list):
|
| 53 |
+
# Preserve order exactly as content blocks appear.
|
| 54 |
+
for c in content:
|
| 55 |
+
if not isinstance(c, dict):
|
| 56 |
+
continue
|
| 57 |
+
ctype = c.get("type")
|
| 58 |
+
if ctype == "text":
|
| 59 |
+
results.append({"type": "text_chunk", "text": c.get("text", "")})
|
| 60 |
+
elif ctype == "thinking":
|
| 61 |
+
results.append(
|
| 62 |
+
{"type": "thinking_chunk", "text": c.get("thinking", "")}
|
| 63 |
+
)
|
| 64 |
+
elif ctype == "tool_use":
|
| 65 |
+
results.append(
|
| 66 |
+
{
|
| 67 |
+
"type": "tool_use",
|
| 68 |
+
"id": str(c.get("id", "") or "").strip(),
|
| 69 |
+
"name": c.get("name", ""),
|
| 70 |
+
"input": c.get("input"),
|
| 71 |
+
}
|
| 72 |
+
)
|
| 73 |
+
elif ctype == "tool_result":
|
| 74 |
+
results.append(
|
| 75 |
+
{
|
| 76 |
+
"type": "tool_result",
|
| 77 |
+
"tool_use_id": str(c.get("tool_use_id", "") or "").strip(),
|
| 78 |
+
"content": c.get("content"),
|
| 79 |
+
"is_error": bool(c.get("is_error", False)),
|
| 80 |
+
}
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
if results:
|
| 84 |
+
return results
|
| 85 |
+
|
| 86 |
+
# 2. Handle streaming deltas
|
| 87 |
+
if etype == "content_block_delta":
|
| 88 |
+
delta = event.get("delta", {})
|
| 89 |
+
if isinstance(delta, dict):
|
| 90 |
+
if delta.get("type") == "text_delta":
|
| 91 |
+
return [
|
| 92 |
+
{
|
| 93 |
+
"type": "text_delta",
|
| 94 |
+
"index": event.get("index", -1),
|
| 95 |
+
"text": delta.get("text", ""),
|
| 96 |
+
}
|
| 97 |
+
]
|
| 98 |
+
if delta.get("type") == "thinking_delta":
|
| 99 |
+
return [
|
| 100 |
+
{
|
| 101 |
+
"type": "thinking_delta",
|
| 102 |
+
"index": event.get("index", -1),
|
| 103 |
+
"text": delta.get("thinking", ""),
|
| 104 |
+
}
|
| 105 |
+
]
|
| 106 |
+
if delta.get("type") == "input_json_delta":
|
| 107 |
+
return [
|
| 108 |
+
{
|
| 109 |
+
"type": "tool_use_delta",
|
| 110 |
+
"index": event.get("index", -1),
|
| 111 |
+
"partial_json": delta.get("partial_json", ""),
|
| 112 |
+
}
|
| 113 |
+
]
|
| 114 |
+
|
| 115 |
+
# 3. Handle tool usage start
|
| 116 |
+
if etype == "content_block_start":
|
| 117 |
+
block = event.get("content_block", {})
|
| 118 |
+
if isinstance(block, dict):
|
| 119 |
+
btype = block.get("type")
|
| 120 |
+
if btype == "thinking":
|
| 121 |
+
return [{"type": "thinking_start", "index": event.get("index", -1)}]
|
| 122 |
+
if btype == "text":
|
| 123 |
+
return [{"type": "text_start", "index": event.get("index", -1)}]
|
| 124 |
+
if btype == "tool_use":
|
| 125 |
+
return [
|
| 126 |
+
{
|
| 127 |
+
"type": "tool_use_start",
|
| 128 |
+
"index": event.get("index", -1),
|
| 129 |
+
"id": str(block.get("id", "") or "").strip(),
|
| 130 |
+
"name": block.get("name", ""),
|
| 131 |
+
"input": block.get("input"),
|
| 132 |
+
}
|
| 133 |
+
]
|
| 134 |
+
|
| 135 |
+
# 3.5 Handle block stop (to close open streaming segments)
|
| 136 |
+
if etype == "content_block_stop":
|
| 137 |
+
return [{"type": "block_stop", "index": event.get("index", -1)}]
|
| 138 |
+
|
| 139 |
+
# 4. Handle errors and exit
|
| 140 |
+
if etype == "error":
|
| 141 |
+
err = event.get("error")
|
| 142 |
+
msg = err.get("message") if isinstance(err, dict) else str(err)
|
| 143 |
+
logger.info(f"CLI_PARSER: Parsed error event: {msg}")
|
| 144 |
+
return [{"type": "error", "message": msg}]
|
| 145 |
+
elif etype == "exit":
|
| 146 |
+
code = event.get("code", 0)
|
| 147 |
+
stderr = event.get("stderr")
|
| 148 |
+
if code == 0:
|
| 149 |
+
logger.debug(f"CLI_PARSER: Successful exit (code={code})")
|
| 150 |
+
return [{"type": "complete", "status": "success"}]
|
| 151 |
+
else:
|
| 152 |
+
# Non-zero exit is an error
|
| 153 |
+
error_msg = stderr if stderr else f"Process exited with code {code}"
|
| 154 |
+
logger.warning(f"CLI_PARSER: Error exit (code={code}): {error_msg}")
|
| 155 |
+
return [
|
| 156 |
+
{"type": "error", "message": error_msg},
|
| 157 |
+
{"type": "complete", "status": "failed"},
|
| 158 |
+
]
|
| 159 |
+
|
| 160 |
+
# Log unrecognized events for debugging
|
| 161 |
+
if etype:
|
| 162 |
+
logger.debug(f"CLI_PARSER: Unrecognized event type: {etype}")
|
| 163 |
+
return []
|
Claude_Code/messaging/handler.py
ADDED
|
@@ -0,0 +1,770 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Claude Message Handler
|
| 3 |
+
|
| 4 |
+
Platform-agnostic Claude interaction logic.
|
| 5 |
+
Handles the core workflow of processing user messages via Claude CLI.
|
| 6 |
+
Uses tree-based queuing for message ordering.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import asyncio
|
| 10 |
+
import os
|
| 11 |
+
import time
|
| 12 |
+
|
| 13 |
+
from loguru import logger
|
| 14 |
+
|
| 15 |
+
from providers.common import get_user_facing_error_message
|
| 16 |
+
|
| 17 |
+
from .commands import (
|
| 18 |
+
handle_clear_command,
|
| 19 |
+
handle_stats_command,
|
| 20 |
+
handle_stop_command,
|
| 21 |
+
)
|
| 22 |
+
from .event_parser import parse_cli_event
|
| 23 |
+
from .models import IncomingMessage
|
| 24 |
+
from .platforms.base import MessagingPlatform, SessionManagerInterface
|
| 25 |
+
from .rendering.discord_markdown import (
|
| 26 |
+
discord_bold,
|
| 27 |
+
discord_code_inline,
|
| 28 |
+
escape_discord,
|
| 29 |
+
escape_discord_code,
|
| 30 |
+
render_markdown_to_discord,
|
| 31 |
+
)
|
| 32 |
+
from .rendering.discord_markdown import (
|
| 33 |
+
format_status as format_status_discord, # (emoji, label, suffix)
|
| 34 |
+
)
|
| 35 |
+
from .rendering.telegram_markdown import (
|
| 36 |
+
escape_md_v2,
|
| 37 |
+
escape_md_v2_code,
|
| 38 |
+
mdv2_bold,
|
| 39 |
+
mdv2_code_inline,
|
| 40 |
+
render_markdown_to_mdv2,
|
| 41 |
+
)
|
| 42 |
+
from .rendering.telegram_markdown import (
|
| 43 |
+
format_status as format_status_telegram,
|
| 44 |
+
)
|
| 45 |
+
from .session import SessionStore
|
| 46 |
+
from .transcript import RenderCtx, TranscriptBuffer
|
| 47 |
+
from .trees.queue_manager import (
|
| 48 |
+
MessageNode,
|
| 49 |
+
MessageState,
|
| 50 |
+
MessageTree,
|
| 51 |
+
TreeQueueManager,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
# Status message prefixes used to filter our own messages (ignore echo)
|
| 55 |
+
STATUS_MESSAGE_PREFIXES = ("⏳", "💭", "🔧", "✅", "❌", "🚀", "🤖", "📋", "📊", "🔄")
|
| 56 |
+
|
| 57 |
+
# Event types that update the transcript (frozenset for O(1) membership)
|
| 58 |
+
TRANSCRIPT_EVENT_TYPES = frozenset(
|
| 59 |
+
{
|
| 60 |
+
"thinking_start",
|
| 61 |
+
"thinking_delta",
|
| 62 |
+
"thinking_chunk",
|
| 63 |
+
"thinking_stop",
|
| 64 |
+
"text_start",
|
| 65 |
+
"text_delta",
|
| 66 |
+
"text_chunk",
|
| 67 |
+
"text_stop",
|
| 68 |
+
"tool_use_start",
|
| 69 |
+
"tool_use_delta",
|
| 70 |
+
"tool_use_stop",
|
| 71 |
+
"tool_use",
|
| 72 |
+
"tool_result",
|
| 73 |
+
"block_stop",
|
| 74 |
+
"error",
|
| 75 |
+
}
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
# Event type -> (emoji, label) for status updates (O(1) lookup)
|
| 79 |
+
_EVENT_STATUS_MAP = {
|
| 80 |
+
"thinking_start": ("🧠", "Claude is thinking..."),
|
| 81 |
+
"thinking_delta": ("🧠", "Claude is thinking..."),
|
| 82 |
+
"thinking_chunk": ("🧠", "Claude is thinking..."),
|
| 83 |
+
"text_start": ("🧠", "Claude is working..."),
|
| 84 |
+
"text_delta": ("🧠", "Claude is working..."),
|
| 85 |
+
"text_chunk": ("🧠", "Claude is working..."),
|
| 86 |
+
"tool_result": ("⏳", "Executing tools..."),
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _get_status_for_event(ptype: str, parsed: dict, format_status_fn) -> str | None:
|
| 91 |
+
"""Return status string for event type, or None if no status update needed."""
|
| 92 |
+
entry = _EVENT_STATUS_MAP.get(ptype)
|
| 93 |
+
if entry is not None:
|
| 94 |
+
emoji, label = entry
|
| 95 |
+
return format_status_fn(emoji, label)
|
| 96 |
+
if ptype in ("tool_use_start", "tool_use_delta", "tool_use"):
|
| 97 |
+
if parsed.get("name") == "Task":
|
| 98 |
+
return format_status_fn("🤖", "Subagent working...")
|
| 99 |
+
return format_status_fn("⏳", "Executing tools...")
|
| 100 |
+
return None
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class ClaudeMessageHandler:
|
| 104 |
+
"""
|
| 105 |
+
Platform-agnostic handler for Claude interactions.
|
| 106 |
+
|
| 107 |
+
Uses a tree-based message queue where:
|
| 108 |
+
- New messages create a tree root
|
| 109 |
+
- Replies become children of the message being replied to
|
| 110 |
+
- Each node has state: PENDING, IN_PROGRESS, COMPLETED, ERROR
|
| 111 |
+
- Per-tree queue ensures ordered processing
|
| 112 |
+
"""
|
| 113 |
+
|
| 114 |
+
def __init__(
|
| 115 |
+
self,
|
| 116 |
+
platform: MessagingPlatform,
|
| 117 |
+
cli_manager: SessionManagerInterface,
|
| 118 |
+
session_store: SessionStore,
|
| 119 |
+
):
|
| 120 |
+
self.platform = platform
|
| 121 |
+
self.cli_manager = cli_manager
|
| 122 |
+
self.session_store = session_store
|
| 123 |
+
self._tree_queue = TreeQueueManager(
|
| 124 |
+
queue_update_callback=self.update_queue_positions,
|
| 125 |
+
node_started_callback=self.mark_node_processing,
|
| 126 |
+
)
|
| 127 |
+
is_discord = platform.name == "discord"
|
| 128 |
+
self._format_status_fn = (
|
| 129 |
+
format_status_discord if is_discord else format_status_telegram
|
| 130 |
+
)
|
| 131 |
+
self._parse_mode_val: str | None = None if is_discord else "MarkdownV2"
|
| 132 |
+
self._render_ctx_val = RenderCtx(
|
| 133 |
+
bold=discord_bold if is_discord else mdv2_bold,
|
| 134 |
+
code_inline=discord_code_inline if is_discord else mdv2_code_inline,
|
| 135 |
+
escape_code=escape_discord_code if is_discord else escape_md_v2_code,
|
| 136 |
+
escape_text=escape_discord if is_discord else escape_md_v2,
|
| 137 |
+
render_markdown=render_markdown_to_discord
|
| 138 |
+
if is_discord
|
| 139 |
+
else render_markdown_to_mdv2,
|
| 140 |
+
)
|
| 141 |
+
self._limit_chars = 1900 if is_discord else 3900
|
| 142 |
+
|
| 143 |
+
def format_status(self, emoji: str, label: str, suffix: str | None = None) -> str:
|
| 144 |
+
return self._format_status_fn(emoji, label, suffix)
|
| 145 |
+
|
| 146 |
+
def _parse_mode(self) -> str | None:
|
| 147 |
+
return self._parse_mode_val
|
| 148 |
+
|
| 149 |
+
def get_render_ctx(self) -> RenderCtx:
|
| 150 |
+
return self._render_ctx_val
|
| 151 |
+
|
| 152 |
+
def _get_limit_chars(self) -> int:
|
| 153 |
+
return self._limit_chars
|
| 154 |
+
|
| 155 |
+
@property
|
| 156 |
+
def tree_queue(self) -> TreeQueueManager:
|
| 157 |
+
"""Accessor for the current tree queue manager."""
|
| 158 |
+
return self._tree_queue
|
| 159 |
+
|
| 160 |
+
def replace_tree_queue(self, tree_queue: TreeQueueManager) -> None:
|
| 161 |
+
"""Replace tree queue manager via explicit API."""
|
| 162 |
+
self._tree_queue = tree_queue
|
| 163 |
+
self._tree_queue.set_queue_update_callback(self.update_queue_positions)
|
| 164 |
+
self._tree_queue.set_node_started_callback(self.mark_node_processing)
|
| 165 |
+
|
| 166 |
+
async def handle_message(self, incoming: IncomingMessage) -> None:
|
| 167 |
+
"""
|
| 168 |
+
Main entry point for handling an incoming message.
|
| 169 |
+
|
| 170 |
+
Determines if this is a new conversation or reply,
|
| 171 |
+
creates/extends the message tree, and queues for processing.
|
| 172 |
+
"""
|
| 173 |
+
text_preview = (incoming.text or "")[:80]
|
| 174 |
+
if len(incoming.text or "") > 80:
|
| 175 |
+
text_preview += "..."
|
| 176 |
+
logger.info(
|
| 177 |
+
"HANDLER_ENTRY: chat_id={} message_id={} reply_to={} text_preview={!r}",
|
| 178 |
+
incoming.chat_id,
|
| 179 |
+
incoming.message_id,
|
| 180 |
+
incoming.reply_to_message_id,
|
| 181 |
+
text_preview,
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
with logger.contextualize(
|
| 185 |
+
chat_id=incoming.chat_id, node_id=incoming.message_id
|
| 186 |
+
):
|
| 187 |
+
await self._handle_message_impl(incoming)
|
| 188 |
+
|
| 189 |
+
async def _handle_message_impl(self, incoming: IncomingMessage) -> None:
|
| 190 |
+
"""Implementation of handle_message with context bound."""
|
| 191 |
+
# Check for commands
|
| 192 |
+
parts = (incoming.text or "").strip().split()
|
| 193 |
+
cmd = parts[0] if parts else ""
|
| 194 |
+
cmd_base = cmd.split("@", 1)[0] if cmd else ""
|
| 195 |
+
|
| 196 |
+
# Record incoming message ID for best-effort UI clearing (/clear), even if
|
| 197 |
+
# we later ignore this message (status/command/etc).
|
| 198 |
+
try:
|
| 199 |
+
if incoming.message_id is not None:
|
| 200 |
+
kind = "command" if cmd_base.startswith("/") else "content"
|
| 201 |
+
self.session_store.record_message_id(
|
| 202 |
+
incoming.platform,
|
| 203 |
+
incoming.chat_id,
|
| 204 |
+
str(incoming.message_id),
|
| 205 |
+
direction="in",
|
| 206 |
+
kind=kind,
|
| 207 |
+
)
|
| 208 |
+
except Exception as e:
|
| 209 |
+
logger.debug(f"Failed to record incoming message_id: {e}")
|
| 210 |
+
|
| 211 |
+
if cmd_base == "/clear":
|
| 212 |
+
await self._handle_clear_command(incoming)
|
| 213 |
+
return
|
| 214 |
+
|
| 215 |
+
if cmd_base == "/stop":
|
| 216 |
+
await self._handle_stop_command(incoming)
|
| 217 |
+
return
|
| 218 |
+
|
| 219 |
+
if cmd_base == "/stats":
|
| 220 |
+
await self._handle_stats_command(incoming)
|
| 221 |
+
return
|
| 222 |
+
|
| 223 |
+
# Filter out status messages (our own messages)
|
| 224 |
+
text = incoming.text or ""
|
| 225 |
+
if any(text.startswith(p) for p in STATUS_MESSAGE_PREFIXES):
|
| 226 |
+
return
|
| 227 |
+
|
| 228 |
+
# Check if this is a reply to an existing node in a tree
|
| 229 |
+
parent_node_id = None
|
| 230 |
+
tree = None
|
| 231 |
+
|
| 232 |
+
if incoming.is_reply() and incoming.reply_to_message_id:
|
| 233 |
+
# Look up if the replied-to message is in any tree (could be a node or status message)
|
| 234 |
+
reply_id = incoming.reply_to_message_id
|
| 235 |
+
tree = self.tree_queue.get_tree_for_node(reply_id)
|
| 236 |
+
if tree:
|
| 237 |
+
# Resolve to actual node ID (handles status message replies)
|
| 238 |
+
parent_node_id = self.tree_queue.resolve_parent_node_id(reply_id)
|
| 239 |
+
if parent_node_id:
|
| 240 |
+
logger.info(f"Found tree for reply, parent node: {parent_node_id}")
|
| 241 |
+
else:
|
| 242 |
+
logger.warning(
|
| 243 |
+
f"Reply to {incoming.reply_to_message_id} found tree but no valid parent node"
|
| 244 |
+
)
|
| 245 |
+
tree = None # Treat as new conversation
|
| 246 |
+
|
| 247 |
+
# Generate node ID
|
| 248 |
+
node_id = incoming.message_id
|
| 249 |
+
|
| 250 |
+
# Use pre-sent status (e.g. voice note) or send new
|
| 251 |
+
status_text = self._get_initial_status(tree, parent_node_id)
|
| 252 |
+
if incoming.status_message_id:
|
| 253 |
+
status_msg_id = incoming.status_message_id
|
| 254 |
+
await self.platform.queue_edit_message(
|
| 255 |
+
incoming.chat_id,
|
| 256 |
+
status_msg_id,
|
| 257 |
+
status_text,
|
| 258 |
+
parse_mode=self._parse_mode(),
|
| 259 |
+
fire_and_forget=False,
|
| 260 |
+
)
|
| 261 |
+
else:
|
| 262 |
+
status_msg_id = await self.platform.queue_send_message(
|
| 263 |
+
incoming.chat_id,
|
| 264 |
+
status_text,
|
| 265 |
+
reply_to=incoming.message_id,
|
| 266 |
+
fire_and_forget=False,
|
| 267 |
+
message_thread_id=incoming.message_thread_id,
|
| 268 |
+
)
|
| 269 |
+
self.record_outgoing_message(
|
| 270 |
+
incoming.platform, incoming.chat_id, status_msg_id, "status"
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
# Create or extend tree
|
| 274 |
+
if parent_node_id and tree and status_msg_id:
|
| 275 |
+
# Reply to existing node - add as child
|
| 276 |
+
tree, _node = await self.tree_queue.add_to_tree(
|
| 277 |
+
parent_node_id=parent_node_id,
|
| 278 |
+
node_id=node_id,
|
| 279 |
+
incoming=incoming,
|
| 280 |
+
status_message_id=status_msg_id,
|
| 281 |
+
)
|
| 282 |
+
# Register status message as a node too for reply chains
|
| 283 |
+
self.tree_queue.register_node(status_msg_id, tree.root_id)
|
| 284 |
+
self.session_store.register_node(status_msg_id, tree.root_id)
|
| 285 |
+
self.session_store.register_node(node_id, tree.root_id)
|
| 286 |
+
elif status_msg_id:
|
| 287 |
+
# New conversation - create new tree
|
| 288 |
+
tree = await self.tree_queue.create_tree(
|
| 289 |
+
node_id=node_id,
|
| 290 |
+
incoming=incoming,
|
| 291 |
+
status_message_id=status_msg_id,
|
| 292 |
+
)
|
| 293 |
+
# Register status message
|
| 294 |
+
self.tree_queue.register_node(status_msg_id, tree.root_id)
|
| 295 |
+
self.session_store.register_node(node_id, tree.root_id)
|
| 296 |
+
self.session_store.register_node(status_msg_id, tree.root_id)
|
| 297 |
+
|
| 298 |
+
# Persist tree
|
| 299 |
+
if tree:
|
| 300 |
+
self.session_store.save_tree(tree.root_id, tree.to_dict())
|
| 301 |
+
|
| 302 |
+
# Enqueue for processing
|
| 303 |
+
was_queued = await self.tree_queue.enqueue(
|
| 304 |
+
node_id=node_id,
|
| 305 |
+
processor=self._process_node,
|
| 306 |
+
)
|
| 307 |
+
|
| 308 |
+
if was_queued and status_msg_id:
|
| 309 |
+
# Update status to show queue position
|
| 310 |
+
queue_size = self.tree_queue.get_queue_size(node_id)
|
| 311 |
+
await self.platform.queue_edit_message(
|
| 312 |
+
incoming.chat_id,
|
| 313 |
+
status_msg_id,
|
| 314 |
+
self.format_status(
|
| 315 |
+
"📋", "Queued", f"(position {queue_size}) - waiting..."
|
| 316 |
+
),
|
| 317 |
+
parse_mode=self._parse_mode(),
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
async def update_queue_positions(self, tree: MessageTree) -> None:
|
| 321 |
+
"""Refresh queued status messages after a dequeue."""
|
| 322 |
+
try:
|
| 323 |
+
queued_ids = await tree.get_queue_snapshot()
|
| 324 |
+
except Exception as e:
|
| 325 |
+
logger.warning(f"Failed to read queue snapshot: {e}")
|
| 326 |
+
return
|
| 327 |
+
|
| 328 |
+
if not queued_ids:
|
| 329 |
+
return
|
| 330 |
+
|
| 331 |
+
position = 0
|
| 332 |
+
for node_id in queued_ids:
|
| 333 |
+
node = tree.get_node(node_id)
|
| 334 |
+
if not node or node.state != MessageState.PENDING:
|
| 335 |
+
continue
|
| 336 |
+
position += 1
|
| 337 |
+
self.platform.fire_and_forget(
|
| 338 |
+
self.platform.queue_edit_message(
|
| 339 |
+
node.incoming.chat_id,
|
| 340 |
+
node.status_message_id,
|
| 341 |
+
self.format_status(
|
| 342 |
+
"📋", "Queued", f"(position {position}) - waiting..."
|
| 343 |
+
),
|
| 344 |
+
parse_mode=self._parse_mode(),
|
| 345 |
+
)
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
async def mark_node_processing(self, tree: MessageTree, node_id: str) -> None:
|
| 349 |
+
"""Update the dequeued node's status to processing immediately."""
|
| 350 |
+
node = tree.get_node(node_id)
|
| 351 |
+
if not node or node.state == MessageState.ERROR:
|
| 352 |
+
return
|
| 353 |
+
self.platform.fire_and_forget(
|
| 354 |
+
self.platform.queue_edit_message(
|
| 355 |
+
node.incoming.chat_id,
|
| 356 |
+
node.status_message_id,
|
| 357 |
+
self.format_status("🔄", "Processing..."),
|
| 358 |
+
parse_mode=self._parse_mode(),
|
| 359 |
+
)
|
| 360 |
+
)
|
| 361 |
+
|
| 362 |
+
def _create_transcript_and_render_ctx(
|
| 363 |
+
self,
|
| 364 |
+
) -> tuple[TranscriptBuffer, RenderCtx]:
|
| 365 |
+
"""Create transcript buffer and render context for node processing."""
|
| 366 |
+
transcript = TranscriptBuffer(show_tool_results=False)
|
| 367 |
+
return transcript, self.get_render_ctx()
|
| 368 |
+
|
| 369 |
+
async def _handle_session_info_event(
|
| 370 |
+
self,
|
| 371 |
+
event_data: dict,
|
| 372 |
+
tree: MessageTree | None,
|
| 373 |
+
node_id: str,
|
| 374 |
+
captured_session_id: str | None,
|
| 375 |
+
temp_session_id: str | None,
|
| 376 |
+
) -> tuple[str | None, str | None]:
|
| 377 |
+
"""Handle session_info event; return updated (captured_session_id, temp_session_id)."""
|
| 378 |
+
if event_data.get("type") != "session_info":
|
| 379 |
+
return captured_session_id, temp_session_id
|
| 380 |
+
|
| 381 |
+
real_session_id = event_data.get("session_id")
|
| 382 |
+
if not real_session_id or not temp_session_id:
|
| 383 |
+
return captured_session_id, temp_session_id
|
| 384 |
+
|
| 385 |
+
await self.cli_manager.register_real_session_id(
|
| 386 |
+
temp_session_id, real_session_id
|
| 387 |
+
)
|
| 388 |
+
if tree and real_session_id:
|
| 389 |
+
await tree.update_state(
|
| 390 |
+
node_id,
|
| 391 |
+
MessageState.IN_PROGRESS,
|
| 392 |
+
session_id=real_session_id,
|
| 393 |
+
)
|
| 394 |
+
self.session_store.save_tree(tree.root_id, tree.to_dict())
|
| 395 |
+
|
| 396 |
+
return real_session_id, None
|
| 397 |
+
|
| 398 |
+
async def _process_parsed_event(
|
| 399 |
+
self,
|
| 400 |
+
parsed: dict,
|
| 401 |
+
transcript: TranscriptBuffer,
|
| 402 |
+
update_ui,
|
| 403 |
+
last_status: str | None,
|
| 404 |
+
had_transcript_events: bool,
|
| 405 |
+
tree: MessageTree | None,
|
| 406 |
+
node_id: str,
|
| 407 |
+
captured_session_id: str | None,
|
| 408 |
+
) -> tuple[str | None, bool]:
|
| 409 |
+
"""Process a single parsed CLI event. Returns (last_status, had_transcript_events)."""
|
| 410 |
+
ptype = parsed.get("type") or ""
|
| 411 |
+
|
| 412 |
+
if ptype in TRANSCRIPT_EVENT_TYPES:
|
| 413 |
+
transcript.apply(parsed)
|
| 414 |
+
had_transcript_events = True
|
| 415 |
+
|
| 416 |
+
status = _get_status_for_event(ptype, parsed, self.format_status)
|
| 417 |
+
if status is not None:
|
| 418 |
+
await update_ui(status)
|
| 419 |
+
last_status = status
|
| 420 |
+
elif ptype == "block_stop":
|
| 421 |
+
await update_ui(last_status, force=True)
|
| 422 |
+
elif ptype == "complete":
|
| 423 |
+
if not had_transcript_events:
|
| 424 |
+
transcript.apply({"type": "text_chunk", "text": "Done."})
|
| 425 |
+
logger.info("HANDLER: Task complete, updating UI")
|
| 426 |
+
await update_ui(self.format_status("✅", "Complete"), force=True)
|
| 427 |
+
if tree and captured_session_id:
|
| 428 |
+
await tree.update_state(
|
| 429 |
+
node_id,
|
| 430 |
+
MessageState.COMPLETED,
|
| 431 |
+
session_id=captured_session_id,
|
| 432 |
+
)
|
| 433 |
+
self.session_store.save_tree(tree.root_id, tree.to_dict())
|
| 434 |
+
elif ptype == "error":
|
| 435 |
+
error_msg = parsed.get("message", "Unknown error")
|
| 436 |
+
logger.error(f"HANDLER: Error event received: {error_msg}")
|
| 437 |
+
logger.info("HANDLER: Updating UI with error status")
|
| 438 |
+
await update_ui(self.format_status("❌", "Error"), force=True)
|
| 439 |
+
if tree:
|
| 440 |
+
await self._propagate_error_to_children(
|
| 441 |
+
node_id, error_msg, "Parent task failed"
|
| 442 |
+
)
|
| 443 |
+
|
| 444 |
+
return last_status, had_transcript_events
|
| 445 |
+
|
| 446 |
+
async def _process_node(
|
| 447 |
+
self,
|
| 448 |
+
node_id: str,
|
| 449 |
+
node: MessageNode,
|
| 450 |
+
) -> None:
|
| 451 |
+
"""Core task processor - handles a single Claude CLI interaction."""
|
| 452 |
+
incoming = node.incoming
|
| 453 |
+
status_msg_id = node.status_message_id
|
| 454 |
+
chat_id = incoming.chat_id
|
| 455 |
+
|
| 456 |
+
with logger.contextualize(node_id=node_id, chat_id=chat_id):
|
| 457 |
+
await self._process_node_impl(node_id, node, chat_id, status_msg_id)
|
| 458 |
+
|
| 459 |
+
async def _process_node_impl(
|
| 460 |
+
self,
|
| 461 |
+
node_id: str,
|
| 462 |
+
node: MessageNode,
|
| 463 |
+
chat_id: str,
|
| 464 |
+
status_msg_id: str,
|
| 465 |
+
) -> None:
|
| 466 |
+
"""Internal implementation of _process_node with context bound."""
|
| 467 |
+
incoming = node.incoming
|
| 468 |
+
|
| 469 |
+
tree = self.tree_queue.get_tree_for_node(node_id)
|
| 470 |
+
if tree:
|
| 471 |
+
await tree.update_state(node_id, MessageState.IN_PROGRESS)
|
| 472 |
+
|
| 473 |
+
transcript, render_ctx = self._create_transcript_and_render_ctx()
|
| 474 |
+
|
| 475 |
+
last_ui_update = 0.0
|
| 476 |
+
last_displayed_text = None
|
| 477 |
+
had_transcript_events = False
|
| 478 |
+
captured_session_id = None
|
| 479 |
+
temp_session_id = None
|
| 480 |
+
last_status: str | None = None
|
| 481 |
+
|
| 482 |
+
parent_session_id = None
|
| 483 |
+
if tree and node.parent_id:
|
| 484 |
+
parent_session_id = tree.get_parent_session_id(node_id)
|
| 485 |
+
if parent_session_id:
|
| 486 |
+
logger.info(f"Will fork from parent session: {parent_session_id}")
|
| 487 |
+
|
| 488 |
+
async def update_ui(status: str | None = None, force: bool = False) -> None:
|
| 489 |
+
nonlocal last_ui_update, last_displayed_text, last_status
|
| 490 |
+
now = time.time()
|
| 491 |
+
if not force and now - last_ui_update < 1.0:
|
| 492 |
+
return
|
| 493 |
+
|
| 494 |
+
last_ui_update = now
|
| 495 |
+
if status is not None:
|
| 496 |
+
last_status = status
|
| 497 |
+
try:
|
| 498 |
+
display = transcript.render(
|
| 499 |
+
render_ctx,
|
| 500 |
+
limit_chars=self._get_limit_chars(),
|
| 501 |
+
status=status,
|
| 502 |
+
)
|
| 503 |
+
except Exception as e:
|
| 504 |
+
logger.warning(f"Transcript render failed for node {node_id}: {e}")
|
| 505 |
+
return
|
| 506 |
+
if display and display != last_displayed_text:
|
| 507 |
+
logger.debug(
|
| 508 |
+
"PLATFORM_EDIT: node_id={} chat_id={} msg_id={} force={} status={!r} chars={}",
|
| 509 |
+
node_id,
|
| 510 |
+
chat_id,
|
| 511 |
+
status_msg_id,
|
| 512 |
+
bool(force),
|
| 513 |
+
status,
|
| 514 |
+
len(display),
|
| 515 |
+
)
|
| 516 |
+
if os.getenv("DEBUG_TELEGRAM_EDITS") == "1":
|
| 517 |
+
logger.debug("PLATFORM_EDIT_TEXT:\n{}", display)
|
| 518 |
+
else:
|
| 519 |
+
head = display[:500]
|
| 520 |
+
tail = display[-500:] if len(display) > 500 else ""
|
| 521 |
+
logger.debug("PLATFORM_EDIT_PREVIEW_HEAD:\n{}", head)
|
| 522 |
+
if tail:
|
| 523 |
+
logger.debug("PLATFORM_EDIT_PREVIEW_TAIL:\n{}", tail)
|
| 524 |
+
last_displayed_text = display
|
| 525 |
+
try:
|
| 526 |
+
await self.platform.queue_edit_message(
|
| 527 |
+
chat_id,
|
| 528 |
+
status_msg_id,
|
| 529 |
+
display,
|
| 530 |
+
parse_mode=self._parse_mode(),
|
| 531 |
+
)
|
| 532 |
+
except Exception as e:
|
| 533 |
+
logger.warning(f"Failed to update platform for node {node_id}: {e}")
|
| 534 |
+
|
| 535 |
+
try:
|
| 536 |
+
try:
|
| 537 |
+
(
|
| 538 |
+
cli_session,
|
| 539 |
+
session_or_temp_id,
|
| 540 |
+
is_new,
|
| 541 |
+
) = await self.cli_manager.get_or_create_session(session_id=None)
|
| 542 |
+
if is_new:
|
| 543 |
+
temp_session_id = session_or_temp_id
|
| 544 |
+
else:
|
| 545 |
+
captured_session_id = session_or_temp_id
|
| 546 |
+
except RuntimeError as e:
|
| 547 |
+
error_message = get_user_facing_error_message(e)
|
| 548 |
+
transcript.apply({"type": "error", "message": error_message})
|
| 549 |
+
await update_ui(
|
| 550 |
+
self.format_status("⏳", "Session limit reached"),
|
| 551 |
+
force=True,
|
| 552 |
+
)
|
| 553 |
+
if tree:
|
| 554 |
+
await tree.update_state(
|
| 555 |
+
node_id,
|
| 556 |
+
MessageState.ERROR,
|
| 557 |
+
error_message=error_message,
|
| 558 |
+
)
|
| 559 |
+
return
|
| 560 |
+
|
| 561 |
+
logger.info(f"HANDLER: Starting CLI task processing for node {node_id}")
|
| 562 |
+
event_count = 0
|
| 563 |
+
async for event_data in cli_session.start_task(
|
| 564 |
+
incoming.text,
|
| 565 |
+
session_id=parent_session_id,
|
| 566 |
+
fork_session=bool(parent_session_id),
|
| 567 |
+
):
|
| 568 |
+
if not isinstance(event_data, dict):
|
| 569 |
+
logger.warning(
|
| 570 |
+
f"HANDLER: Non-dict event received: {type(event_data)}"
|
| 571 |
+
)
|
| 572 |
+
continue
|
| 573 |
+
event_count += 1
|
| 574 |
+
if event_count % 10 == 0:
|
| 575 |
+
logger.debug(f"HANDLER: Processed {event_count} events so far")
|
| 576 |
+
|
| 577 |
+
(
|
| 578 |
+
captured_session_id,
|
| 579 |
+
temp_session_id,
|
| 580 |
+
) = await self._handle_session_info_event(
|
| 581 |
+
event_data, tree, node_id, captured_session_id, temp_session_id
|
| 582 |
+
)
|
| 583 |
+
if event_data.get("type") == "session_info":
|
| 584 |
+
continue
|
| 585 |
+
|
| 586 |
+
parsed_list = parse_cli_event(event_data)
|
| 587 |
+
logger.debug(f"HANDLER: Parsed {len(parsed_list)} events from CLI")
|
| 588 |
+
|
| 589 |
+
for parsed in parsed_list:
|
| 590 |
+
(
|
| 591 |
+
last_status,
|
| 592 |
+
had_transcript_events,
|
| 593 |
+
) = await self._process_parsed_event(
|
| 594 |
+
parsed,
|
| 595 |
+
transcript,
|
| 596 |
+
update_ui,
|
| 597 |
+
last_status,
|
| 598 |
+
had_transcript_events,
|
| 599 |
+
tree,
|
| 600 |
+
node_id,
|
| 601 |
+
captured_session_id,
|
| 602 |
+
)
|
| 603 |
+
|
| 604 |
+
except asyncio.CancelledError:
|
| 605 |
+
logger.warning(f"HANDLER: Task cancelled for node {node_id}")
|
| 606 |
+
cancel_reason = None
|
| 607 |
+
if isinstance(node.context, dict):
|
| 608 |
+
cancel_reason = node.context.get("cancel_reason")
|
| 609 |
+
|
| 610 |
+
if cancel_reason == "stop":
|
| 611 |
+
await update_ui(self.format_status("⏹", "Stopped."), force=True)
|
| 612 |
+
else:
|
| 613 |
+
transcript.apply({"type": "error", "message": "Task was cancelled"})
|
| 614 |
+
await update_ui(self.format_status("❌", "Cancelled"), force=True)
|
| 615 |
+
|
| 616 |
+
# Do not propagate cancellation to children; a reply-scoped "/stop"
|
| 617 |
+
# should only stop the targeted task.
|
| 618 |
+
if tree:
|
| 619 |
+
await tree.update_state(
|
| 620 |
+
node_id, MessageState.ERROR, error_message="Cancelled by user"
|
| 621 |
+
)
|
| 622 |
+
except Exception as e:
|
| 623 |
+
logger.error(
|
| 624 |
+
f"HANDLER: Task failed with exception: {type(e).__name__}: {e}"
|
| 625 |
+
)
|
| 626 |
+
error_msg = get_user_facing_error_message(e)[:200]
|
| 627 |
+
transcript.apply({"type": "error", "message": error_msg})
|
| 628 |
+
await update_ui(self.format_status("💥", "Task Failed"), force=True)
|
| 629 |
+
if tree:
|
| 630 |
+
await self._propagate_error_to_children(
|
| 631 |
+
node_id, error_msg, "Parent task failed"
|
| 632 |
+
)
|
| 633 |
+
finally:
|
| 634 |
+
logger.info(f"HANDLER: _process_node completed for node {node_id}")
|
| 635 |
+
# Free the session-manager slot. Session IDs are persisted in the tree and
|
| 636 |
+
# can be resumed later by ID; we don't need to keep a CLISession instance
|
| 637 |
+
# around after this node completes.
|
| 638 |
+
try:
|
| 639 |
+
if captured_session_id:
|
| 640 |
+
await self.cli_manager.remove_session(captured_session_id)
|
| 641 |
+
elif temp_session_id:
|
| 642 |
+
await self.cli_manager.remove_session(temp_session_id)
|
| 643 |
+
except Exception as e:
|
| 644 |
+
logger.debug(f"Failed to remove session for node {node_id}: {e}")
|
| 645 |
+
|
| 646 |
+
async def _propagate_error_to_children(
|
| 647 |
+
self,
|
| 648 |
+
node_id: str,
|
| 649 |
+
error_msg: str,
|
| 650 |
+
child_status_text: str,
|
| 651 |
+
) -> None:
|
| 652 |
+
"""Mark node as error and propagate to pending children with UI updates."""
|
| 653 |
+
affected = await self.tree_queue.mark_node_error(
|
| 654 |
+
node_id, error_msg, propagate_to_children=True
|
| 655 |
+
)
|
| 656 |
+
# Update status messages for all affected children (skip first = current node)
|
| 657 |
+
for child in affected[1:]:
|
| 658 |
+
self.platform.fire_and_forget(
|
| 659 |
+
self.platform.queue_edit_message(
|
| 660 |
+
child.incoming.chat_id,
|
| 661 |
+
child.status_message_id,
|
| 662 |
+
self.format_status("❌", "Cancelled:", child_status_text),
|
| 663 |
+
parse_mode=self._parse_mode(),
|
| 664 |
+
)
|
| 665 |
+
)
|
| 666 |
+
|
| 667 |
+
def _get_initial_status(
|
| 668 |
+
self,
|
| 669 |
+
tree: object | None,
|
| 670 |
+
parent_node_id: str | None,
|
| 671 |
+
) -> str:
|
| 672 |
+
"""Get initial status message text."""
|
| 673 |
+
if tree and parent_node_id:
|
| 674 |
+
# Reply to existing tree
|
| 675 |
+
if self.tree_queue.is_node_tree_busy(parent_node_id):
|
| 676 |
+
queue_size = self.tree_queue.get_queue_size(parent_node_id) + 1
|
| 677 |
+
return self.format_status(
|
| 678 |
+
"📋", "Queued", f"(position {queue_size}) - waiting..."
|
| 679 |
+
)
|
| 680 |
+
return self.format_status("🔄", "Continuing conversation...")
|
| 681 |
+
|
| 682 |
+
# New conversation
|
| 683 |
+
return self.format_status("⏳", "Launching new Claude CLI instance...")
|
| 684 |
+
|
| 685 |
+
async def stop_all_tasks(self) -> int:
|
| 686 |
+
"""
|
| 687 |
+
Stop all pending and in-progress tasks.
|
| 688 |
+
|
| 689 |
+
Order of operations:
|
| 690 |
+
1. Cancel tree queue tasks (uses internal locking)
|
| 691 |
+
2. Stop CLI sessions
|
| 692 |
+
3. Update UI for all affected nodes
|
| 693 |
+
"""
|
| 694 |
+
# 1. Cancel tree queue tasks using the public async method
|
| 695 |
+
logger.info("Cancelling tree queue tasks...")
|
| 696 |
+
cancelled_nodes = await self.tree_queue.cancel_all()
|
| 697 |
+
logger.info(f"Cancelled {len(cancelled_nodes)} nodes")
|
| 698 |
+
|
| 699 |
+
# 2. Stop CLI sessions - this kills subprocesses and ensures everything is dead
|
| 700 |
+
logger.info("Stopping all CLI sessions...")
|
| 701 |
+
await self.cli_manager.stop_all()
|
| 702 |
+
|
| 703 |
+
# 3. Update UI and persist state for all cancelled nodes
|
| 704 |
+
self.update_cancelled_nodes_ui(cancelled_nodes)
|
| 705 |
+
|
| 706 |
+
return len(cancelled_nodes)
|
| 707 |
+
|
| 708 |
+
async def stop_task(self, node_id: str) -> int:
|
| 709 |
+
"""
|
| 710 |
+
Stop a single queued or in-progress task node.
|
| 711 |
+
|
| 712 |
+
Used when the user replies "/stop" to a specific status/user message.
|
| 713 |
+
"""
|
| 714 |
+
tree = self.tree_queue.get_tree_for_node(node_id)
|
| 715 |
+
if tree:
|
| 716 |
+
node = tree.get_node(node_id)
|
| 717 |
+
if node and node.state not in (MessageState.COMPLETED, MessageState.ERROR):
|
| 718 |
+
# Used by _process_node cancellation path to render "Stopped."
|
| 719 |
+
node.set_context({"cancel_reason": "stop"})
|
| 720 |
+
|
| 721 |
+
cancelled_nodes = await self.tree_queue.cancel_node(node_id)
|
| 722 |
+
self.update_cancelled_nodes_ui(cancelled_nodes)
|
| 723 |
+
return len(cancelled_nodes)
|
| 724 |
+
|
| 725 |
+
def record_outgoing_message(
|
| 726 |
+
self,
|
| 727 |
+
platform: str,
|
| 728 |
+
chat_id: str,
|
| 729 |
+
msg_id: str | None,
|
| 730 |
+
kind: str,
|
| 731 |
+
) -> None:
|
| 732 |
+
"""Record outgoing message ID for /clear. Best-effort, never raises."""
|
| 733 |
+
if not msg_id:
|
| 734 |
+
return
|
| 735 |
+
try:
|
| 736 |
+
self.session_store.record_message_id(
|
| 737 |
+
platform, chat_id, str(msg_id), direction="out", kind=kind
|
| 738 |
+
)
|
| 739 |
+
except Exception as e:
|
| 740 |
+
logger.debug(f"Failed to record message_id: {e}")
|
| 741 |
+
|
| 742 |
+
def update_cancelled_nodes_ui(self, nodes: list[MessageNode]) -> None:
|
| 743 |
+
"""Update status messages and persist tree state for cancelled nodes."""
|
| 744 |
+
trees_to_save: dict[str, MessageTree] = {}
|
| 745 |
+
for node in nodes:
|
| 746 |
+
self.platform.fire_and_forget(
|
| 747 |
+
self.platform.queue_edit_message(
|
| 748 |
+
node.incoming.chat_id,
|
| 749 |
+
node.status_message_id,
|
| 750 |
+
self.format_status("⏹", "Stopped."),
|
| 751 |
+
parse_mode=self._parse_mode(),
|
| 752 |
+
)
|
| 753 |
+
)
|
| 754 |
+
tree = self.tree_queue.get_tree_for_node(node.node_id)
|
| 755 |
+
if tree:
|
| 756 |
+
trees_to_save[tree.root_id] = tree
|
| 757 |
+
for root_id, tree in trees_to_save.items():
|
| 758 |
+
self.session_store.save_tree(root_id, tree.to_dict())
|
| 759 |
+
|
| 760 |
+
async def _handle_stop_command(self, incoming: IncomingMessage) -> None:
|
| 761 |
+
"""Handle /stop command from messaging platform."""
|
| 762 |
+
await handle_stop_command(self, incoming)
|
| 763 |
+
|
| 764 |
+
async def _handle_stats_command(self, incoming: IncomingMessage) -> None:
|
| 765 |
+
"""Handle /stats command."""
|
| 766 |
+
await handle_stats_command(self, incoming)
|
| 767 |
+
|
| 768 |
+
async def _handle_clear_command(self, incoming: IncomingMessage) -> None:
|
| 769 |
+
"""Handle /clear command."""
|
| 770 |
+
await handle_clear_command(self, incoming)
|
Claude_Code/messaging/limiter.py
ADDED
|
@@ -0,0 +1,312 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Global Rate Limiter for Messaging Platforms.
|
| 3 |
+
|
| 4 |
+
Centralizes outgoing message requests and ensures compliance with rate limits
|
| 5 |
+
using a strict sliding window algorithm and a task queue.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import asyncio
|
| 9 |
+
import os
|
| 10 |
+
import time
|
| 11 |
+
from collections import deque
|
| 12 |
+
from collections.abc import Awaitable, Callable
|
| 13 |
+
from typing import Any
|
| 14 |
+
|
| 15 |
+
from loguru import logger
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class SlidingWindowLimiter:
|
| 19 |
+
"""Strict sliding window limiter.
|
| 20 |
+
|
| 21 |
+
Guarantees: at most `rate_limit` acquisitions in any interval of length
|
| 22 |
+
`rate_window` (seconds).
|
| 23 |
+
|
| 24 |
+
Implemented as an async context manager so call sites can do:
|
| 25 |
+
async with limiter:
|
| 26 |
+
...
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
def __init__(self, rate_limit: int, rate_window: float) -> None:
|
| 30 |
+
if rate_limit <= 0:
|
| 31 |
+
raise ValueError("rate_limit must be > 0")
|
| 32 |
+
if rate_window <= 0:
|
| 33 |
+
raise ValueError("rate_window must be > 0")
|
| 34 |
+
|
| 35 |
+
self._rate_limit = int(rate_limit)
|
| 36 |
+
self._rate_window = float(rate_window)
|
| 37 |
+
self._times: deque[float] = deque()
|
| 38 |
+
self._lock = asyncio.Lock()
|
| 39 |
+
|
| 40 |
+
async def acquire(self) -> None:
|
| 41 |
+
while True:
|
| 42 |
+
wait_time = 0.0
|
| 43 |
+
async with self._lock:
|
| 44 |
+
now = time.monotonic()
|
| 45 |
+
cutoff = now - self._rate_window
|
| 46 |
+
|
| 47 |
+
while self._times and self._times[0] <= cutoff:
|
| 48 |
+
self._times.popleft()
|
| 49 |
+
|
| 50 |
+
if len(self._times) < self._rate_limit:
|
| 51 |
+
self._times.append(now)
|
| 52 |
+
return
|
| 53 |
+
|
| 54 |
+
oldest = self._times[0]
|
| 55 |
+
wait_time = max(0.0, (oldest + self._rate_window) - now)
|
| 56 |
+
|
| 57 |
+
if wait_time > 0:
|
| 58 |
+
await asyncio.sleep(wait_time)
|
| 59 |
+
else:
|
| 60 |
+
await asyncio.sleep(0)
|
| 61 |
+
|
| 62 |
+
async def __aenter__(self) -> SlidingWindowLimiter:
|
| 63 |
+
await self.acquire()
|
| 64 |
+
return self
|
| 65 |
+
|
| 66 |
+
async def __aexit__(self, exc_type, exc, tb) -> bool:
|
| 67 |
+
return False
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
class MessagingRateLimiter:
|
| 71 |
+
"""
|
| 72 |
+
A thread-safe global rate limiter for messaging.
|
| 73 |
+
|
| 74 |
+
Uses a custom queue with task compaction (deduplication) to ensure
|
| 75 |
+
only the latest version of a message update is processed.
|
| 76 |
+
"""
|
| 77 |
+
|
| 78 |
+
_instance: MessagingRateLimiter | None = None
|
| 79 |
+
_lock = asyncio.Lock()
|
| 80 |
+
|
| 81 |
+
def __new__(cls, *args, **kwargs):
|
| 82 |
+
return super().__new__(cls)
|
| 83 |
+
|
| 84 |
+
@classmethod
|
| 85 |
+
async def get_instance(cls) -> MessagingRateLimiter:
|
| 86 |
+
"""Get the singleton instance of the limiter."""
|
| 87 |
+
async with cls._lock:
|
| 88 |
+
if cls._instance is None:
|
| 89 |
+
cls._instance = cls()
|
| 90 |
+
# Start the background worker (tracked for graceful shutdown).
|
| 91 |
+
cls._instance._start_worker()
|
| 92 |
+
return cls._instance
|
| 93 |
+
|
| 94 |
+
def __init__(self):
|
| 95 |
+
# Prevent double initialization in singleton
|
| 96 |
+
if hasattr(self, "_initialized"):
|
| 97 |
+
return
|
| 98 |
+
|
| 99 |
+
rate_limit = int(os.getenv("MESSAGING_RATE_LIMIT", "1"))
|
| 100 |
+
rate_window = float(os.getenv("MESSAGING_RATE_WINDOW", "2.0"))
|
| 101 |
+
|
| 102 |
+
self.limiter = SlidingWindowLimiter(rate_limit, rate_window)
|
| 103 |
+
# Custom queue state - using deque for O(1) popleft
|
| 104 |
+
self._queue_list: deque[str] = deque() # Deque of dedup_keys in order
|
| 105 |
+
self._queue_map: dict[
|
| 106 |
+
str, tuple[Callable[[], Awaitable[Any]], list[asyncio.Future]]
|
| 107 |
+
] = {}
|
| 108 |
+
self._condition = asyncio.Condition()
|
| 109 |
+
self._shutdown = asyncio.Event()
|
| 110 |
+
self._worker_task: asyncio.Task | None = None
|
| 111 |
+
|
| 112 |
+
self._initialized = True
|
| 113 |
+
self._paused_until = 0
|
| 114 |
+
|
| 115 |
+
logger.info(
|
| 116 |
+
f"MessagingRateLimiter initialized ({rate_limit} req / {rate_window}s with Task Compaction)"
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
def _start_worker(self) -> None:
|
| 120 |
+
"""Ensure the worker task exists."""
|
| 121 |
+
if self._worker_task and not self._worker_task.done():
|
| 122 |
+
return
|
| 123 |
+
# Named task helps debugging shutdown hangs.
|
| 124 |
+
self._worker_task = asyncio.create_task(
|
| 125 |
+
self._worker(), name="msg-limiter-worker"
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
async def _worker(self):
|
| 129 |
+
"""Background worker that processes queued messaging tasks."""
|
| 130 |
+
logger.info("MessagingRateLimiter worker started")
|
| 131 |
+
while not self._shutdown.is_set():
|
| 132 |
+
try:
|
| 133 |
+
# Get a task from the queue
|
| 134 |
+
async with self._condition:
|
| 135 |
+
while not self._queue_list and not self._shutdown.is_set():
|
| 136 |
+
await self._condition.wait()
|
| 137 |
+
|
| 138 |
+
if self._shutdown.is_set():
|
| 139 |
+
break
|
| 140 |
+
|
| 141 |
+
dedup_key = self._queue_list.popleft()
|
| 142 |
+
func, futures = self._queue_map.pop(dedup_key)
|
| 143 |
+
|
| 144 |
+
# Check for manual pause (FloodWait)
|
| 145 |
+
now = asyncio.get_event_loop().time()
|
| 146 |
+
if self._paused_until > now:
|
| 147 |
+
wait_time = self._paused_until - now
|
| 148 |
+
logger.warning(
|
| 149 |
+
f"Limiter worker paused, waiting {wait_time:.1f}s more..."
|
| 150 |
+
)
|
| 151 |
+
await asyncio.sleep(wait_time)
|
| 152 |
+
|
| 153 |
+
# Wait for rate limit capacity
|
| 154 |
+
async with self.limiter:
|
| 155 |
+
try:
|
| 156 |
+
result = await func()
|
| 157 |
+
for f in futures:
|
| 158 |
+
if not f.done():
|
| 159 |
+
f.set_result(result)
|
| 160 |
+
except Exception as e:
|
| 161 |
+
# Report error to all futures and log it
|
| 162 |
+
for f in futures:
|
| 163 |
+
if not f.done():
|
| 164 |
+
f.set_exception(e)
|
| 165 |
+
|
| 166 |
+
error_msg = str(e).lower()
|
| 167 |
+
if "flood" in error_msg or "wait" in error_msg:
|
| 168 |
+
seconds = 30
|
| 169 |
+
try:
|
| 170 |
+
if hasattr(e, "seconds"):
|
| 171 |
+
seconds = e.seconds
|
| 172 |
+
elif "after " in error_msg:
|
| 173 |
+
# Try to parse "retry after X"
|
| 174 |
+
parts = error_msg.split("after ")
|
| 175 |
+
if len(parts) > 1:
|
| 176 |
+
seconds = int(parts[1].split()[0])
|
| 177 |
+
except Exception:
|
| 178 |
+
pass
|
| 179 |
+
|
| 180 |
+
logger.error(
|
| 181 |
+
f"FloodWait detected! Pausing worker for {seconds}s"
|
| 182 |
+
)
|
| 183 |
+
wait_secs = (
|
| 184 |
+
float(seconds)
|
| 185 |
+
if isinstance(seconds, (int, float, str))
|
| 186 |
+
else 30.0
|
| 187 |
+
)
|
| 188 |
+
self._paused_until = (
|
| 189 |
+
asyncio.get_event_loop().time() + wait_secs
|
| 190 |
+
)
|
| 191 |
+
else:
|
| 192 |
+
logger.error(
|
| 193 |
+
f"Error in limiter worker for key {dedup_key}: {type(e).__name__}: {e}"
|
| 194 |
+
)
|
| 195 |
+
except asyncio.CancelledError:
|
| 196 |
+
break
|
| 197 |
+
except Exception as e:
|
| 198 |
+
logger.error(
|
| 199 |
+
f"MessagingRateLimiter worker critical error: {e}", exc_info=True
|
| 200 |
+
)
|
| 201 |
+
await asyncio.sleep(1)
|
| 202 |
+
|
| 203 |
+
async def shutdown(self, timeout: float = 2.0) -> None:
|
| 204 |
+
"""Stop the background worker so process shutdown doesn't hang."""
|
| 205 |
+
self._shutdown.set()
|
| 206 |
+
try:
|
| 207 |
+
async with self._condition:
|
| 208 |
+
self._condition.notify_all()
|
| 209 |
+
except Exception:
|
| 210 |
+
# Best-effort: condition may be bound to a closing loop.
|
| 211 |
+
pass
|
| 212 |
+
|
| 213 |
+
task = self._worker_task
|
| 214 |
+
if not task or task.done():
|
| 215 |
+
self._worker_task = None
|
| 216 |
+
return
|
| 217 |
+
|
| 218 |
+
task.cancel()
|
| 219 |
+
try:
|
| 220 |
+
await asyncio.wait_for(task, timeout=timeout)
|
| 221 |
+
except TimeoutError:
|
| 222 |
+
logger.warning("MessagingRateLimiter worker did not stop before timeout")
|
| 223 |
+
except asyncio.CancelledError:
|
| 224 |
+
pass
|
| 225 |
+
except Exception as e:
|
| 226 |
+
logger.debug(f"MessagingRateLimiter worker shutdown error: {e}")
|
| 227 |
+
finally:
|
| 228 |
+
self._worker_task = None
|
| 229 |
+
|
| 230 |
+
@classmethod
|
| 231 |
+
async def shutdown_instance(cls, timeout: float = 2.0) -> None:
|
| 232 |
+
"""Shutdown and clear the singleton instance (safe to call multiple times)."""
|
| 233 |
+
inst = cls._instance
|
| 234 |
+
if not inst:
|
| 235 |
+
return
|
| 236 |
+
try:
|
| 237 |
+
await inst.shutdown(timeout=timeout)
|
| 238 |
+
finally:
|
| 239 |
+
cls._instance = None
|
| 240 |
+
|
| 241 |
+
async def _enqueue_internal(self, func, future, dedup_key, front=False):
|
| 242 |
+
await self._enqueue_internal_multi(func, [future], dedup_key, front)
|
| 243 |
+
|
| 244 |
+
async def _enqueue_internal_multi(self, func, futures, dedup_key, front=False):
|
| 245 |
+
async with self._condition:
|
| 246 |
+
if dedup_key in self._queue_map:
|
| 247 |
+
# Compaction: Update existing task with new func, append new futures
|
| 248 |
+
_old_func, old_futures = self._queue_map[dedup_key]
|
| 249 |
+
old_futures.extend(futures)
|
| 250 |
+
self._queue_map[dedup_key] = (func, old_futures)
|
| 251 |
+
logger.debug(
|
| 252 |
+
f"Compacted task for key: {dedup_key} (now {len(old_futures)} futures)"
|
| 253 |
+
)
|
| 254 |
+
else:
|
| 255 |
+
self._queue_map[dedup_key] = (func, futures)
|
| 256 |
+
if front:
|
| 257 |
+
self._queue_list.appendleft(dedup_key)
|
| 258 |
+
else:
|
| 259 |
+
self._queue_list.append(dedup_key)
|
| 260 |
+
self._condition.notify_all()
|
| 261 |
+
|
| 262 |
+
async def enqueue(
|
| 263 |
+
self, func: Callable[[], Awaitable[Any]], dedup_key: str | None = None
|
| 264 |
+
) -> Any:
|
| 265 |
+
"""
|
| 266 |
+
Enqueue a messaging task and return its future result.
|
| 267 |
+
If dedup_key is provided, subsequent tasks with the same key will replace this one.
|
| 268 |
+
"""
|
| 269 |
+
if dedup_key is None:
|
| 270 |
+
# Unique key to avoid deduplication
|
| 271 |
+
dedup_key = f"task_{id(func)}_{asyncio.get_event_loop().time()}"
|
| 272 |
+
|
| 273 |
+
future = asyncio.get_event_loop().create_future()
|
| 274 |
+
await self._enqueue_internal(func, future, dedup_key)
|
| 275 |
+
return await future
|
| 276 |
+
|
| 277 |
+
def fire_and_forget(
|
| 278 |
+
self, func: Callable[[], Awaitable[Any]], dedup_key: str | None = None
|
| 279 |
+
):
|
| 280 |
+
"""Enqueue a task without waiting for the result."""
|
| 281 |
+
if dedup_key is None:
|
| 282 |
+
dedup_key = f"task_{id(func)}_{asyncio.get_event_loop().time()}"
|
| 283 |
+
|
| 284 |
+
future = asyncio.get_event_loop().create_future()
|
| 285 |
+
|
| 286 |
+
async def _wrapped():
|
| 287 |
+
max_retries = 2
|
| 288 |
+
for attempt in range(max_retries + 1):
|
| 289 |
+
try:
|
| 290 |
+
return await self.enqueue(func, dedup_key)
|
| 291 |
+
except Exception as e:
|
| 292 |
+
error_msg = str(e).lower()
|
| 293 |
+
# Only retry transient connectivity issues that might have slipped through
|
| 294 |
+
# or occurred between platform checks.
|
| 295 |
+
if attempt < max_retries and any(
|
| 296 |
+
x in error_msg for x in ["connect", "timeout", "broken"]
|
| 297 |
+
):
|
| 298 |
+
wait = 2**attempt
|
| 299 |
+
logger.warning(
|
| 300 |
+
f"Limiter fire_and_forget transient error (attempt {attempt + 1}): {e}. Retrying in {wait}s..."
|
| 301 |
+
)
|
| 302 |
+
await asyncio.sleep(wait)
|
| 303 |
+
continue
|
| 304 |
+
|
| 305 |
+
logger.error(
|
| 306 |
+
f"Final error in fire_and_forget for key {dedup_key}: {type(e).__name__}: {e}"
|
| 307 |
+
)
|
| 308 |
+
if not future.done():
|
| 309 |
+
future.set_exception(e)
|
| 310 |
+
break
|
| 311 |
+
|
| 312 |
+
_ = asyncio.create_task(_wrapped())
|
Claude_Code/messaging/models.py
ADDED
|
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Platform-agnostic message models."""
|
| 2 |
+
|
| 3 |
+
from dataclasses import dataclass, field
|
| 4 |
+
from datetime import UTC, datetime
|
| 5 |
+
from typing import Any
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
@dataclass
|
| 9 |
+
class IncomingMessage:
|
| 10 |
+
"""
|
| 11 |
+
Platform-agnostic incoming message.
|
| 12 |
+
|
| 13 |
+
Adapters convert platform-specific events to this format.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
text: str
|
| 17 |
+
chat_id: str
|
| 18 |
+
user_id: str
|
| 19 |
+
message_id: str
|
| 20 |
+
platform: str # "telegram", "discord", "slack", etc.
|
| 21 |
+
|
| 22 |
+
# Optional fields
|
| 23 |
+
reply_to_message_id: str | None = None
|
| 24 |
+
# Forum topic ID (Telegram); required when replying in forum supergroups
|
| 25 |
+
message_thread_id: str | None = None
|
| 26 |
+
username: str | None = None
|
| 27 |
+
# Pre-sent status message ID (e.g. "Transcribing voice note..."); handler edits in place
|
| 28 |
+
status_message_id: str | None = None
|
| 29 |
+
timestamp: datetime = field(default_factory=lambda: datetime.now(UTC))
|
| 30 |
+
|
| 31 |
+
# Platform-specific raw event for edge cases
|
| 32 |
+
raw_event: Any = None
|
| 33 |
+
|
| 34 |
+
def is_reply(self) -> bool:
|
| 35 |
+
"""Check if this message is a reply to another message."""
|
| 36 |
+
return self.reply_to_message_id is not None
|
Claude_Code/messaging/platforms/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Messaging platform adapters (Telegram, Discord, etc.)."""
|
| 2 |
+
|
| 3 |
+
from .base import CLISession, MessagingPlatform, SessionManagerInterface
|
| 4 |
+
from .factory import create_messaging_platform
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"CLISession",
|
| 8 |
+
"MessagingPlatform",
|
| 9 |
+
"SessionManagerInterface",
|
| 10 |
+
"create_messaging_platform",
|
| 11 |
+
]
|
Claude_Code/messaging/platforms/base.py
ADDED
|
@@ -0,0 +1,218 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Abstract base class for messaging platforms."""
|
| 2 |
+
|
| 3 |
+
from abc import ABC, abstractmethod
|
| 4 |
+
from collections.abc import AsyncGenerator, Awaitable, Callable
|
| 5 |
+
from typing import (
|
| 6 |
+
Any,
|
| 7 |
+
Protocol,
|
| 8 |
+
runtime_checkable,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
from ..models import IncomingMessage
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
@runtime_checkable
|
| 15 |
+
class CLISession(Protocol):
|
| 16 |
+
"""Protocol for CLI session - avoid circular import from cli package."""
|
| 17 |
+
|
| 18 |
+
def start_task(
|
| 19 |
+
self, prompt: str, session_id: str | None = None, fork_session: bool = False
|
| 20 |
+
) -> AsyncGenerator[dict, Any]:
|
| 21 |
+
"""Start a task in the CLI session."""
|
| 22 |
+
...
|
| 23 |
+
|
| 24 |
+
@property
|
| 25 |
+
@abstractmethod
|
| 26 |
+
def is_busy(self) -> bool:
|
| 27 |
+
"""Check if session is busy."""
|
| 28 |
+
pass
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
@runtime_checkable
|
| 32 |
+
class SessionManagerInterface(Protocol):
|
| 33 |
+
"""
|
| 34 |
+
Protocol for session managers to avoid tight coupling with cli package.
|
| 35 |
+
|
| 36 |
+
Implementations: CLISessionManager
|
| 37 |
+
"""
|
| 38 |
+
|
| 39 |
+
async def get_or_create_session(
|
| 40 |
+
self, session_id: str | None = None
|
| 41 |
+
) -> tuple[CLISession, str, bool]:
|
| 42 |
+
"""
|
| 43 |
+
Get an existing session or create a new one.
|
| 44 |
+
|
| 45 |
+
Returns: Tuple of (session, session_id, is_new_session)
|
| 46 |
+
"""
|
| 47 |
+
...
|
| 48 |
+
|
| 49 |
+
async def register_real_session_id(
|
| 50 |
+
self, temp_id: str, real_session_id: str
|
| 51 |
+
) -> bool:
|
| 52 |
+
"""Register the real session ID from CLI output."""
|
| 53 |
+
...
|
| 54 |
+
|
| 55 |
+
async def stop_all(self) -> None:
|
| 56 |
+
"""Stop all sessions."""
|
| 57 |
+
...
|
| 58 |
+
|
| 59 |
+
async def remove_session(self, session_id: str) -> bool:
|
| 60 |
+
"""Remove a session from the manager."""
|
| 61 |
+
...
|
| 62 |
+
|
| 63 |
+
def get_stats(self) -> dict:
|
| 64 |
+
"""Get session statistics."""
|
| 65 |
+
...
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class MessagingPlatform(ABC):
|
| 69 |
+
"""
|
| 70 |
+
Base class for all messaging platform adapters.
|
| 71 |
+
|
| 72 |
+
Implement this to add support for Telegram, Discord, Slack, etc.
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
name: str = "base"
|
| 76 |
+
|
| 77 |
+
@abstractmethod
|
| 78 |
+
async def start(self) -> None:
|
| 79 |
+
"""Initialize and connect to the messaging platform."""
|
| 80 |
+
pass
|
| 81 |
+
|
| 82 |
+
@abstractmethod
|
| 83 |
+
async def stop(self) -> None:
|
| 84 |
+
"""Disconnect and cleanup resources."""
|
| 85 |
+
pass
|
| 86 |
+
|
| 87 |
+
@abstractmethod
|
| 88 |
+
async def send_message(
|
| 89 |
+
self,
|
| 90 |
+
chat_id: str,
|
| 91 |
+
text: str,
|
| 92 |
+
reply_to: str | None = None,
|
| 93 |
+
parse_mode: str | None = None,
|
| 94 |
+
message_thread_id: str | None = None,
|
| 95 |
+
) -> str:
|
| 96 |
+
"""
|
| 97 |
+
Send a message to a chat.
|
| 98 |
+
|
| 99 |
+
Args:
|
| 100 |
+
chat_id: The chat/channel ID to send to
|
| 101 |
+
text: Message content
|
| 102 |
+
reply_to: Optional message ID to reply to
|
| 103 |
+
parse_mode: Optional formatting mode ("markdown", "html")
|
| 104 |
+
message_thread_id: Optional forum topic ID (Telegram)
|
| 105 |
+
|
| 106 |
+
Returns:
|
| 107 |
+
The message ID of the sent message
|
| 108 |
+
"""
|
| 109 |
+
pass
|
| 110 |
+
|
| 111 |
+
@abstractmethod
|
| 112 |
+
async def edit_message(
|
| 113 |
+
self,
|
| 114 |
+
chat_id: str,
|
| 115 |
+
message_id: str,
|
| 116 |
+
text: str,
|
| 117 |
+
parse_mode: str | None = None,
|
| 118 |
+
) -> None:
|
| 119 |
+
"""
|
| 120 |
+
Edit an existing message.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
chat_id: The chat/channel ID
|
| 124 |
+
message_id: The message ID to edit
|
| 125 |
+
text: New message content
|
| 126 |
+
parse_mode: Optional formatting mode
|
| 127 |
+
"""
|
| 128 |
+
pass
|
| 129 |
+
|
| 130 |
+
@abstractmethod
|
| 131 |
+
async def delete_message(
|
| 132 |
+
self,
|
| 133 |
+
chat_id: str,
|
| 134 |
+
message_id: str,
|
| 135 |
+
) -> None:
|
| 136 |
+
"""
|
| 137 |
+
Delete a message from a chat.
|
| 138 |
+
|
| 139 |
+
Args:
|
| 140 |
+
chat_id: The chat/channel ID
|
| 141 |
+
message_id: The message ID to delete
|
| 142 |
+
"""
|
| 143 |
+
pass
|
| 144 |
+
|
| 145 |
+
@abstractmethod
|
| 146 |
+
async def queue_send_message(
|
| 147 |
+
self,
|
| 148 |
+
chat_id: str,
|
| 149 |
+
text: str,
|
| 150 |
+
reply_to: str | None = None,
|
| 151 |
+
parse_mode: str | None = None,
|
| 152 |
+
fire_and_forget: bool = True,
|
| 153 |
+
message_thread_id: str | None = None,
|
| 154 |
+
) -> str | None:
|
| 155 |
+
"""
|
| 156 |
+
Enqueue a message to be sent.
|
| 157 |
+
|
| 158 |
+
If fire_and_forget is True, returns None immediately.
|
| 159 |
+
Otherwise, waits for the rate limiter and returns message ID.
|
| 160 |
+
"""
|
| 161 |
+
pass
|
| 162 |
+
|
| 163 |
+
@abstractmethod
|
| 164 |
+
async def queue_edit_message(
|
| 165 |
+
self,
|
| 166 |
+
chat_id: str,
|
| 167 |
+
message_id: str,
|
| 168 |
+
text: str,
|
| 169 |
+
parse_mode: str | None = None,
|
| 170 |
+
fire_and_forget: bool = True,
|
| 171 |
+
) -> None:
|
| 172 |
+
"""
|
| 173 |
+
Enqueue a message edit.
|
| 174 |
+
|
| 175 |
+
If fire_and_forget is True, returns immediately.
|
| 176 |
+
Otherwise, waits for the rate limiter.
|
| 177 |
+
"""
|
| 178 |
+
pass
|
| 179 |
+
|
| 180 |
+
@abstractmethod
|
| 181 |
+
async def queue_delete_message(
|
| 182 |
+
self,
|
| 183 |
+
chat_id: str,
|
| 184 |
+
message_id: str,
|
| 185 |
+
fire_and_forget: bool = True,
|
| 186 |
+
) -> None:
|
| 187 |
+
"""
|
| 188 |
+
Enqueue a message deletion.
|
| 189 |
+
|
| 190 |
+
If fire_and_forget is True, returns immediately.
|
| 191 |
+
Otherwise, waits for the rate limiter.
|
| 192 |
+
"""
|
| 193 |
+
pass
|
| 194 |
+
|
| 195 |
+
@abstractmethod
|
| 196 |
+
def on_message(
|
| 197 |
+
self,
|
| 198 |
+
handler: Callable[[IncomingMessage], Awaitable[None]],
|
| 199 |
+
) -> None:
|
| 200 |
+
"""
|
| 201 |
+
Register a message handler callback.
|
| 202 |
+
|
| 203 |
+
The handler will be called for each incoming message.
|
| 204 |
+
|
| 205 |
+
Args:
|
| 206 |
+
handler: Async function that processes incoming messages
|
| 207 |
+
"""
|
| 208 |
+
pass
|
| 209 |
+
|
| 210 |
+
@abstractmethod
|
| 211 |
+
def fire_and_forget(self, task: Awaitable[Any]) -> None:
|
| 212 |
+
"""Execute a coroutine without awaiting it."""
|
| 213 |
+
pass
|
| 214 |
+
|
| 215 |
+
@property
|
| 216 |
+
def is_connected(self) -> bool:
|
| 217 |
+
"""Check if the platform is connected."""
|
| 218 |
+
return False
|
Claude_Code/messaging/platforms/discord.py
ADDED
|
@@ -0,0 +1,561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Discord Platform Adapter
|
| 3 |
+
|
| 4 |
+
Implements MessagingPlatform for Discord using discord.py.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import contextlib
|
| 9 |
+
import os
|
| 10 |
+
import tempfile
|
| 11 |
+
from collections.abc import Awaitable, Callable
|
| 12 |
+
from pathlib import Path
|
| 13 |
+
from typing import Any, cast
|
| 14 |
+
|
| 15 |
+
from loguru import logger
|
| 16 |
+
|
| 17 |
+
from providers.common import get_user_facing_error_message
|
| 18 |
+
|
| 19 |
+
from ..models import IncomingMessage
|
| 20 |
+
from ..rendering.discord_markdown import format_status_discord
|
| 21 |
+
from .base import MessagingPlatform
|
| 22 |
+
|
| 23 |
+
AUDIO_EXTENSIONS = (".ogg", ".mp4", ".mp3", ".wav", ".m4a")
|
| 24 |
+
|
| 25 |
+
_discord_module: Any = None
|
| 26 |
+
try:
|
| 27 |
+
import discord as _discord_import
|
| 28 |
+
|
| 29 |
+
_discord_module = _discord_import
|
| 30 |
+
DISCORD_AVAILABLE = True
|
| 31 |
+
except ImportError:
|
| 32 |
+
DISCORD_AVAILABLE = False
|
| 33 |
+
|
| 34 |
+
DISCORD_MESSAGE_LIMIT = 2000
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _get_discord() -> Any:
|
| 38 |
+
"""Return the discord module. Raises if not available."""
|
| 39 |
+
if not DISCORD_AVAILABLE or _discord_module is None:
|
| 40 |
+
raise ImportError(
|
| 41 |
+
"discord.py is required. Install with: pip install discord.py"
|
| 42 |
+
)
|
| 43 |
+
return _discord_module
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _parse_allowed_channels(raw: str | None) -> set[str]:
|
| 47 |
+
"""Parse comma-separated channel IDs into a set of strings."""
|
| 48 |
+
if not raw or not raw.strip():
|
| 49 |
+
return set()
|
| 50 |
+
return {s.strip() for s in raw.split(",") if s.strip()}
|
| 51 |
+
|
| 52 |
+
|
| 53 |
+
if DISCORD_AVAILABLE and _discord_module is not None:
|
| 54 |
+
_discord = _discord_module
|
| 55 |
+
|
| 56 |
+
class _DiscordClient(_discord.Client):
|
| 57 |
+
"""Internal Discord client that forwards events to DiscordPlatform."""
|
| 58 |
+
|
| 59 |
+
def __init__(
|
| 60 |
+
self,
|
| 61 |
+
platform: DiscordPlatform,
|
| 62 |
+
intents: _discord.Intents,
|
| 63 |
+
) -> None:
|
| 64 |
+
super().__init__(intents=intents)
|
| 65 |
+
self._platform = platform
|
| 66 |
+
|
| 67 |
+
async def on_ready(self) -> None:
|
| 68 |
+
"""Called when the bot is ready."""
|
| 69 |
+
self._platform._connected = True
|
| 70 |
+
logger.info("Discord platform connected")
|
| 71 |
+
|
| 72 |
+
async def on_message(self, message: Any) -> None:
|
| 73 |
+
"""Handle incoming Discord messages."""
|
| 74 |
+
await self._platform._handle_client_message(message)
|
| 75 |
+
else:
|
| 76 |
+
_DiscordClient = None
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class DiscordPlatform(MessagingPlatform):
|
| 80 |
+
"""
|
| 81 |
+
Discord messaging platform adapter.
|
| 82 |
+
|
| 83 |
+
Uses discord.py for Discord access.
|
| 84 |
+
Requires a Bot Token from Discord Developer Portal and message_content intent.
|
| 85 |
+
"""
|
| 86 |
+
|
| 87 |
+
name = "discord"
|
| 88 |
+
|
| 89 |
+
def __init__(
|
| 90 |
+
self,
|
| 91 |
+
bot_token: str | None = None,
|
| 92 |
+
allowed_channel_ids: str | None = None,
|
| 93 |
+
):
|
| 94 |
+
if not DISCORD_AVAILABLE:
|
| 95 |
+
raise ImportError(
|
| 96 |
+
"discord.py is required. Install with: pip install discord.py"
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
self.bot_token = bot_token or os.getenv("DISCORD_BOT_TOKEN")
|
| 100 |
+
raw_channels = allowed_channel_ids or os.getenv("ALLOWED_DISCORD_CHANNELS")
|
| 101 |
+
self.allowed_channel_ids = _parse_allowed_channels(raw_channels)
|
| 102 |
+
|
| 103 |
+
if not self.bot_token:
|
| 104 |
+
logger.warning("DISCORD_BOT_TOKEN not set")
|
| 105 |
+
|
| 106 |
+
discord = _get_discord()
|
| 107 |
+
intents = discord.Intents.default()
|
| 108 |
+
intents.message_content = True
|
| 109 |
+
|
| 110 |
+
assert _DiscordClient is not None
|
| 111 |
+
self._client = _DiscordClient(self, intents)
|
| 112 |
+
self._message_handler: Callable[[IncomingMessage], Awaitable[None]] | None = (
|
| 113 |
+
None
|
| 114 |
+
)
|
| 115 |
+
self._connected = False
|
| 116 |
+
self._limiter: Any | None = None
|
| 117 |
+
self._start_task: asyncio.Task | None = None
|
| 118 |
+
self._pending_voice: dict[tuple[str, str], tuple[str, str]] = {}
|
| 119 |
+
self._pending_voice_lock = asyncio.Lock()
|
| 120 |
+
|
| 121 |
+
async def _handle_client_message(self, message: Any) -> None:
|
| 122 |
+
"""Adapter entry point used by the internal discord client."""
|
| 123 |
+
await self._on_discord_message(message)
|
| 124 |
+
|
| 125 |
+
async def _register_pending_voice(
|
| 126 |
+
self, chat_id: str, voice_msg_id: str, status_msg_id: str
|
| 127 |
+
) -> None:
|
| 128 |
+
"""Register a voice note as pending transcription."""
|
| 129 |
+
async with self._pending_voice_lock:
|
| 130 |
+
self._pending_voice[(chat_id, voice_msg_id)] = (voice_msg_id, status_msg_id)
|
| 131 |
+
self._pending_voice[(chat_id, status_msg_id)] = (
|
| 132 |
+
voice_msg_id,
|
| 133 |
+
status_msg_id,
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
async def cancel_pending_voice(
|
| 137 |
+
self, chat_id: str, reply_id: str
|
| 138 |
+
) -> tuple[str, str] | None:
|
| 139 |
+
"""Cancel a pending voice transcription. Returns (voice_msg_id, status_msg_id) if found."""
|
| 140 |
+
async with self._pending_voice_lock:
|
| 141 |
+
entry = self._pending_voice.pop((chat_id, reply_id), None)
|
| 142 |
+
if entry is None:
|
| 143 |
+
return None
|
| 144 |
+
voice_msg_id, status_msg_id = entry
|
| 145 |
+
self._pending_voice.pop((chat_id, voice_msg_id), None)
|
| 146 |
+
self._pending_voice.pop((chat_id, status_msg_id), None)
|
| 147 |
+
return (voice_msg_id, status_msg_id)
|
| 148 |
+
|
| 149 |
+
async def _is_voice_still_pending(self, chat_id: str, voice_msg_id: str) -> bool:
|
| 150 |
+
"""Check if a voice note is still pending (not cancelled)."""
|
| 151 |
+
async with self._pending_voice_lock:
|
| 152 |
+
return (chat_id, voice_msg_id) in self._pending_voice
|
| 153 |
+
|
| 154 |
+
def _get_audio_attachment(self, message: Any) -> Any | None:
|
| 155 |
+
"""Return first audio attachment, or None."""
|
| 156 |
+
for att in message.attachments:
|
| 157 |
+
ct = (att.content_type or "").lower()
|
| 158 |
+
fn = (att.filename or "").lower()
|
| 159 |
+
if ct.startswith("audio/") or any(
|
| 160 |
+
fn.endswith(ext) for ext in AUDIO_EXTENSIONS
|
| 161 |
+
):
|
| 162 |
+
return att
|
| 163 |
+
return None
|
| 164 |
+
|
| 165 |
+
async def _handle_voice_note(
|
| 166 |
+
self, message: Any, attachment: Any, channel_id: str
|
| 167 |
+
) -> bool:
|
| 168 |
+
"""Handle voice/audio attachment. Returns True if handled."""
|
| 169 |
+
from config.settings import get_settings
|
| 170 |
+
|
| 171 |
+
settings = get_settings()
|
| 172 |
+
if not settings.voice_note_enabled:
|
| 173 |
+
await message.reply("Voice notes are disabled.")
|
| 174 |
+
return True
|
| 175 |
+
|
| 176 |
+
if not self._message_handler:
|
| 177 |
+
return False
|
| 178 |
+
|
| 179 |
+
status_msg_id = await self.queue_send_message(
|
| 180 |
+
channel_id,
|
| 181 |
+
format_status_discord("Transcribing voice note..."),
|
| 182 |
+
reply_to=str(message.id),
|
| 183 |
+
fire_and_forget=False,
|
| 184 |
+
)
|
| 185 |
+
|
| 186 |
+
user_id = str(message.author.id)
|
| 187 |
+
message_id = str(message.id)
|
| 188 |
+
await self._register_pending_voice(channel_id, message_id, str(status_msg_id))
|
| 189 |
+
reply_to = (
|
| 190 |
+
str(message.reference.message_id)
|
| 191 |
+
if message.reference and message.reference.message_id
|
| 192 |
+
else None
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
ext = ".ogg"
|
| 196 |
+
fn = (attachment.filename or "").lower()
|
| 197 |
+
for e in AUDIO_EXTENSIONS:
|
| 198 |
+
if fn.endswith(e):
|
| 199 |
+
ext = e
|
| 200 |
+
break
|
| 201 |
+
ct = attachment.content_type or "audio/ogg"
|
| 202 |
+
if "mp4" in ct or "m4a" in fn:
|
| 203 |
+
ext = ".m4a" if "m4a" in fn else ".mp4"
|
| 204 |
+
elif "mp3" in ct or fn.endswith(".mp3"):
|
| 205 |
+
ext = ".mp3"
|
| 206 |
+
|
| 207 |
+
with tempfile.NamedTemporaryFile(suffix=ext, delete=False) as tmp:
|
| 208 |
+
tmp_path = Path(tmp.name)
|
| 209 |
+
|
| 210 |
+
try:
|
| 211 |
+
await attachment.save(str(tmp_path))
|
| 212 |
+
|
| 213 |
+
from ..transcription import transcribe_audio
|
| 214 |
+
|
| 215 |
+
transcribed = await asyncio.to_thread(
|
| 216 |
+
transcribe_audio,
|
| 217 |
+
tmp_path,
|
| 218 |
+
ct,
|
| 219 |
+
whisper_model=settings.whisper_model,
|
| 220 |
+
whisper_device=settings.whisper_device,
|
| 221 |
+
)
|
| 222 |
+
|
| 223 |
+
if not await self._is_voice_still_pending(channel_id, message_id):
|
| 224 |
+
await self.queue_delete_message(channel_id, str(status_msg_id))
|
| 225 |
+
return True
|
| 226 |
+
|
| 227 |
+
async with self._pending_voice_lock:
|
| 228 |
+
self._pending_voice.pop((channel_id, message_id), None)
|
| 229 |
+
self._pending_voice.pop((channel_id, str(status_msg_id)), None)
|
| 230 |
+
|
| 231 |
+
incoming = IncomingMessage(
|
| 232 |
+
text=transcribed,
|
| 233 |
+
chat_id=channel_id,
|
| 234 |
+
user_id=user_id,
|
| 235 |
+
message_id=message_id,
|
| 236 |
+
platform="discord",
|
| 237 |
+
reply_to_message_id=reply_to,
|
| 238 |
+
username=message.author.display_name,
|
| 239 |
+
raw_event=message,
|
| 240 |
+
status_message_id=status_msg_id,
|
| 241 |
+
)
|
| 242 |
+
|
| 243 |
+
logger.info(
|
| 244 |
+
"DISCORD_VOICE: chat_id={} message_id={} transcribed={!r}",
|
| 245 |
+
channel_id,
|
| 246 |
+
message_id,
|
| 247 |
+
(transcribed[:80] + "..." if len(transcribed) > 80 else transcribed),
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
await self._message_handler(incoming)
|
| 251 |
+
return True
|
| 252 |
+
except ValueError as e:
|
| 253 |
+
await message.reply(get_user_facing_error_message(e)[:200])
|
| 254 |
+
return True
|
| 255 |
+
except ImportError as e:
|
| 256 |
+
await message.reply(get_user_facing_error_message(e)[:200])
|
| 257 |
+
return True
|
| 258 |
+
except Exception as e:
|
| 259 |
+
logger.error(f"Voice transcription failed: {e}")
|
| 260 |
+
await message.reply(
|
| 261 |
+
"Could not transcribe voice note. Please try again or send text."
|
| 262 |
+
)
|
| 263 |
+
return True
|
| 264 |
+
finally:
|
| 265 |
+
with contextlib.suppress(OSError):
|
| 266 |
+
tmp_path.unlink(missing_ok=True)
|
| 267 |
+
|
| 268 |
+
async def _on_discord_message(self, message: Any) -> None:
|
| 269 |
+
"""Handle incoming Discord messages."""
|
| 270 |
+
if message.author.bot:
|
| 271 |
+
return
|
| 272 |
+
|
| 273 |
+
channel_id = str(message.channel.id)
|
| 274 |
+
|
| 275 |
+
if not self.allowed_channel_ids or channel_id not in self.allowed_channel_ids:
|
| 276 |
+
return
|
| 277 |
+
|
| 278 |
+
# Handle voice/audio attachments when message has no text content
|
| 279 |
+
if not message.content:
|
| 280 |
+
audio_att = self._get_audio_attachment(message)
|
| 281 |
+
if audio_att:
|
| 282 |
+
await self._handle_voice_note(message, audio_att, channel_id)
|
| 283 |
+
return
|
| 284 |
+
return
|
| 285 |
+
|
| 286 |
+
user_id = str(message.author.id)
|
| 287 |
+
message_id = str(message.id)
|
| 288 |
+
reply_to = (
|
| 289 |
+
str(message.reference.message_id)
|
| 290 |
+
if message.reference and message.reference.message_id
|
| 291 |
+
else None
|
| 292 |
+
)
|
| 293 |
+
|
| 294 |
+
text_preview = (message.content or "")[:80]
|
| 295 |
+
if len(message.content or "") > 80:
|
| 296 |
+
text_preview += "..."
|
| 297 |
+
logger.info(
|
| 298 |
+
"DISCORD_MSG: chat_id={} message_id={} reply_to={} text_preview={!r}",
|
| 299 |
+
channel_id,
|
| 300 |
+
message_id,
|
| 301 |
+
reply_to,
|
| 302 |
+
text_preview,
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
if not self._message_handler:
|
| 306 |
+
return
|
| 307 |
+
|
| 308 |
+
incoming = IncomingMessage(
|
| 309 |
+
text=message.content,
|
| 310 |
+
chat_id=channel_id,
|
| 311 |
+
user_id=user_id,
|
| 312 |
+
message_id=message_id,
|
| 313 |
+
platform="discord",
|
| 314 |
+
reply_to_message_id=reply_to,
|
| 315 |
+
username=message.author.display_name,
|
| 316 |
+
raw_event=message,
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
try:
|
| 320 |
+
await self._message_handler(incoming)
|
| 321 |
+
except Exception as e:
|
| 322 |
+
logger.error(f"Error handling message: {e}")
|
| 323 |
+
with contextlib.suppress(Exception):
|
| 324 |
+
await self.send_message(
|
| 325 |
+
channel_id,
|
| 326 |
+
format_status_discord(
|
| 327 |
+
"Error:", get_user_facing_error_message(e)[:200]
|
| 328 |
+
),
|
| 329 |
+
reply_to=message_id,
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
def _truncate(self, text: str, limit: int = DISCORD_MESSAGE_LIMIT) -> str:
|
| 333 |
+
"""Truncate text to Discord's message limit."""
|
| 334 |
+
if len(text) <= limit:
|
| 335 |
+
return text
|
| 336 |
+
return text[: limit - 3] + "..."
|
| 337 |
+
|
| 338 |
+
async def start(self) -> None:
|
| 339 |
+
"""Initialize and connect to Discord."""
|
| 340 |
+
if not self.bot_token:
|
| 341 |
+
raise ValueError("DISCORD_BOT_TOKEN is required")
|
| 342 |
+
|
| 343 |
+
from ..limiter import MessagingRateLimiter
|
| 344 |
+
|
| 345 |
+
self._limiter = await MessagingRateLimiter.get_instance()
|
| 346 |
+
|
| 347 |
+
self._start_task = asyncio.create_task(
|
| 348 |
+
self._client.start(self.bot_token),
|
| 349 |
+
name="discord-client-start",
|
| 350 |
+
)
|
| 351 |
+
|
| 352 |
+
max_wait = 30
|
| 353 |
+
waited = 0
|
| 354 |
+
while not self._connected and waited < max_wait:
|
| 355 |
+
await asyncio.sleep(0.5)
|
| 356 |
+
waited += 0.5
|
| 357 |
+
|
| 358 |
+
if not self._connected:
|
| 359 |
+
raise RuntimeError("Discord client failed to connect within timeout")
|
| 360 |
+
|
| 361 |
+
logger.info("Discord platform started")
|
| 362 |
+
|
| 363 |
+
async def stop(self) -> None:
|
| 364 |
+
"""Stop the bot."""
|
| 365 |
+
if self._client.is_closed():
|
| 366 |
+
self._connected = False
|
| 367 |
+
return
|
| 368 |
+
|
| 369 |
+
await self._client.close()
|
| 370 |
+
if self._start_task and not self._start_task.done():
|
| 371 |
+
try:
|
| 372 |
+
await asyncio.wait_for(self._start_task, timeout=5.0)
|
| 373 |
+
except TimeoutError, asyncio.CancelledError:
|
| 374 |
+
self._start_task.cancel()
|
| 375 |
+
with contextlib.suppress(asyncio.CancelledError):
|
| 376 |
+
await self._start_task
|
| 377 |
+
|
| 378 |
+
self._connected = False
|
| 379 |
+
logger.info("Discord platform stopped")
|
| 380 |
+
|
| 381 |
+
async def send_message(
|
| 382 |
+
self,
|
| 383 |
+
chat_id: str,
|
| 384 |
+
text: str,
|
| 385 |
+
reply_to: str | None = None,
|
| 386 |
+
parse_mode: str | None = None,
|
| 387 |
+
message_thread_id: str | None = None,
|
| 388 |
+
) -> str:
|
| 389 |
+
"""Send a message to a channel."""
|
| 390 |
+
channel = self._client.get_channel(int(chat_id))
|
| 391 |
+
if not channel or not hasattr(channel, "send"):
|
| 392 |
+
raise RuntimeError(f"Channel {chat_id} not found")
|
| 393 |
+
|
| 394 |
+
text = self._truncate(text)
|
| 395 |
+
channel = cast(Any, channel)
|
| 396 |
+
|
| 397 |
+
discord = _get_discord()
|
| 398 |
+
if reply_to:
|
| 399 |
+
ref = discord.MessageReference(
|
| 400 |
+
message_id=int(reply_to),
|
| 401 |
+
channel_id=int(chat_id),
|
| 402 |
+
)
|
| 403 |
+
msg = await channel.send(content=text, reference=ref)
|
| 404 |
+
else:
|
| 405 |
+
msg = await channel.send(content=text)
|
| 406 |
+
|
| 407 |
+
return str(msg.id)
|
| 408 |
+
|
| 409 |
+
async def edit_message(
|
| 410 |
+
self,
|
| 411 |
+
chat_id: str,
|
| 412 |
+
message_id: str,
|
| 413 |
+
text: str,
|
| 414 |
+
parse_mode: str | None = None,
|
| 415 |
+
) -> None:
|
| 416 |
+
"""Edit an existing message."""
|
| 417 |
+
channel = self._client.get_channel(int(chat_id))
|
| 418 |
+
if not channel or not hasattr(channel, "fetch_message"):
|
| 419 |
+
raise RuntimeError(f"Channel {chat_id} not found")
|
| 420 |
+
|
| 421 |
+
discord = _get_discord()
|
| 422 |
+
channel = cast(Any, channel)
|
| 423 |
+
try:
|
| 424 |
+
msg = await channel.fetch_message(int(message_id))
|
| 425 |
+
except discord.NotFound:
|
| 426 |
+
return
|
| 427 |
+
|
| 428 |
+
text = self._truncate(text)
|
| 429 |
+
await msg.edit(content=text)
|
| 430 |
+
|
| 431 |
+
async def delete_message(
|
| 432 |
+
self,
|
| 433 |
+
chat_id: str,
|
| 434 |
+
message_id: str,
|
| 435 |
+
) -> None:
|
| 436 |
+
"""Delete a message from a channel."""
|
| 437 |
+
channel = self._client.get_channel(int(chat_id))
|
| 438 |
+
if not channel or not hasattr(channel, "fetch_message"):
|
| 439 |
+
return
|
| 440 |
+
|
| 441 |
+
discord = _get_discord()
|
| 442 |
+
channel = cast(Any, channel)
|
| 443 |
+
try:
|
| 444 |
+
msg = await channel.fetch_message(int(message_id))
|
| 445 |
+
await msg.delete()
|
| 446 |
+
except discord.NotFound, discord.Forbidden:
|
| 447 |
+
pass
|
| 448 |
+
|
| 449 |
+
async def delete_messages(self, chat_id: str, message_ids: list[str]) -> None:
|
| 450 |
+
"""Delete multiple messages (best-effort)."""
|
| 451 |
+
for mid in message_ids:
|
| 452 |
+
await self.delete_message(chat_id, mid)
|
| 453 |
+
|
| 454 |
+
async def queue_send_message(
|
| 455 |
+
self,
|
| 456 |
+
chat_id: str,
|
| 457 |
+
text: str,
|
| 458 |
+
reply_to: str | None = None,
|
| 459 |
+
parse_mode: str | None = None,
|
| 460 |
+
fire_and_forget: bool = True,
|
| 461 |
+
message_thread_id: str | None = None,
|
| 462 |
+
) -> str | None:
|
| 463 |
+
"""Enqueue a message to be sent."""
|
| 464 |
+
if not self._limiter:
|
| 465 |
+
return await self.send_message(
|
| 466 |
+
chat_id, text, reply_to, parse_mode, message_thread_id
|
| 467 |
+
)
|
| 468 |
+
|
| 469 |
+
async def _send():
|
| 470 |
+
return await self.send_message(
|
| 471 |
+
chat_id, text, reply_to, parse_mode, message_thread_id
|
| 472 |
+
)
|
| 473 |
+
|
| 474 |
+
if fire_and_forget:
|
| 475 |
+
self._limiter.fire_and_forget(_send)
|
| 476 |
+
return None
|
| 477 |
+
return await self._limiter.enqueue(_send)
|
| 478 |
+
|
| 479 |
+
async def queue_edit_message(
|
| 480 |
+
self,
|
| 481 |
+
chat_id: str,
|
| 482 |
+
message_id: str,
|
| 483 |
+
text: str,
|
| 484 |
+
parse_mode: str | None = None,
|
| 485 |
+
fire_and_forget: bool = True,
|
| 486 |
+
) -> None:
|
| 487 |
+
"""Enqueue a message edit."""
|
| 488 |
+
if not self._limiter:
|
| 489 |
+
await self.edit_message(chat_id, message_id, text, parse_mode)
|
| 490 |
+
return
|
| 491 |
+
|
| 492 |
+
async def _edit():
|
| 493 |
+
await self.edit_message(chat_id, message_id, text, parse_mode)
|
| 494 |
+
|
| 495 |
+
dedup_key = f"edit:{chat_id}:{message_id}"
|
| 496 |
+
if fire_and_forget:
|
| 497 |
+
self._limiter.fire_and_forget(_edit, dedup_key=dedup_key)
|
| 498 |
+
else:
|
| 499 |
+
await self._limiter.enqueue(_edit, dedup_key=dedup_key)
|
| 500 |
+
|
| 501 |
+
async def queue_delete_message(
|
| 502 |
+
self,
|
| 503 |
+
chat_id: str,
|
| 504 |
+
message_id: str,
|
| 505 |
+
fire_and_forget: bool = True,
|
| 506 |
+
) -> None:
|
| 507 |
+
"""Enqueue a message delete."""
|
| 508 |
+
if not self._limiter:
|
| 509 |
+
await self.delete_message(chat_id, message_id)
|
| 510 |
+
return
|
| 511 |
+
|
| 512 |
+
async def _delete():
|
| 513 |
+
await self.delete_message(chat_id, message_id)
|
| 514 |
+
|
| 515 |
+
dedup_key = f"del:{chat_id}:{message_id}"
|
| 516 |
+
if fire_and_forget:
|
| 517 |
+
self._limiter.fire_and_forget(_delete, dedup_key=dedup_key)
|
| 518 |
+
else:
|
| 519 |
+
await self._limiter.enqueue(_delete, dedup_key=dedup_key)
|
| 520 |
+
|
| 521 |
+
async def queue_delete_messages(
|
| 522 |
+
self,
|
| 523 |
+
chat_id: str,
|
| 524 |
+
message_ids: list[str],
|
| 525 |
+
fire_and_forget: bool = True,
|
| 526 |
+
) -> None:
|
| 527 |
+
"""Enqueue a bulk delete."""
|
| 528 |
+
if not message_ids:
|
| 529 |
+
return
|
| 530 |
+
|
| 531 |
+
if not self._limiter:
|
| 532 |
+
await self.delete_messages(chat_id, message_ids)
|
| 533 |
+
return
|
| 534 |
+
|
| 535 |
+
async def _bulk():
|
| 536 |
+
await self.delete_messages(chat_id, message_ids)
|
| 537 |
+
|
| 538 |
+
dedup_key = f"del_bulk:{chat_id}:{hash(tuple(message_ids))}"
|
| 539 |
+
if fire_and_forget:
|
| 540 |
+
self._limiter.fire_and_forget(_bulk, dedup_key=dedup_key)
|
| 541 |
+
else:
|
| 542 |
+
await self._limiter.enqueue(_bulk, dedup_key=dedup_key)
|
| 543 |
+
|
| 544 |
+
def fire_and_forget(self, task: Awaitable[Any]) -> None:
|
| 545 |
+
"""Execute a coroutine without awaiting it."""
|
| 546 |
+
if asyncio.iscoroutine(task):
|
| 547 |
+
_ = asyncio.create_task(task)
|
| 548 |
+
else:
|
| 549 |
+
_ = asyncio.ensure_future(task)
|
| 550 |
+
|
| 551 |
+
def on_message(
|
| 552 |
+
self,
|
| 553 |
+
handler: Callable[[IncomingMessage], Awaitable[None]],
|
| 554 |
+
) -> None:
|
| 555 |
+
"""Register a message handler callback."""
|
| 556 |
+
self._message_handler = handler
|
| 557 |
+
|
| 558 |
+
@property
|
| 559 |
+
def is_connected(self) -> bool:
|
| 560 |
+
"""Check if connected."""
|
| 561 |
+
return self._connected
|
Claude_Code/messaging/platforms/factory.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Messaging platform factory.
|
| 2 |
+
|
| 3 |
+
Creates the appropriate messaging platform adapter based on configuration.
|
| 4 |
+
To add a new platform (e.g. Discord, Slack):
|
| 5 |
+
1. Create a new class implementing MessagingPlatform in messaging/platforms/
|
| 6 |
+
2. Add a case to create_messaging_platform() below
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
from .base import MessagingPlatform
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def create_messaging_platform(
|
| 15 |
+
platform_type: str,
|
| 16 |
+
**kwargs,
|
| 17 |
+
) -> MessagingPlatform | None:
|
| 18 |
+
"""Create a messaging platform instance based on type.
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
platform_type: Platform identifier ("telegram", "discord", etc.)
|
| 22 |
+
**kwargs: Platform-specific configuration passed to the constructor.
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
Configured MessagingPlatform instance, or None if not configured.
|
| 26 |
+
"""
|
| 27 |
+
if platform_type == "telegram":
|
| 28 |
+
bot_token = kwargs.get("bot_token")
|
| 29 |
+
if not bot_token:
|
| 30 |
+
logger.info("No Telegram bot token configured, skipping platform setup")
|
| 31 |
+
return None
|
| 32 |
+
|
| 33 |
+
from .telegram import TelegramPlatform
|
| 34 |
+
|
| 35 |
+
return TelegramPlatform(
|
| 36 |
+
bot_token=bot_token,
|
| 37 |
+
allowed_user_id=kwargs.get("allowed_user_id"),
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
if platform_type == "discord":
|
| 41 |
+
bot_token = kwargs.get("discord_bot_token")
|
| 42 |
+
if not bot_token:
|
| 43 |
+
logger.info("No Discord bot token configured, skipping platform setup")
|
| 44 |
+
return None
|
| 45 |
+
|
| 46 |
+
from .discord import DiscordPlatform
|
| 47 |
+
|
| 48 |
+
return DiscordPlatform(
|
| 49 |
+
bot_token=bot_token,
|
| 50 |
+
allowed_channel_ids=kwargs.get("allowed_discord_channels"),
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
logger.warning(
|
| 54 |
+
f"Unknown messaging platform: '{platform_type}'. Supported: 'telegram', 'discord'"
|
| 55 |
+
)
|
| 56 |
+
return None
|
Claude_Code/messaging/platforms/telegram.py
ADDED
|
@@ -0,0 +1,661 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Telegram Platform Adapter
|
| 3 |
+
|
| 4 |
+
Implements MessagingPlatform for Telegram using python-telegram-bot.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import asyncio
|
| 8 |
+
import contextlib
|
| 9 |
+
import os
|
| 10 |
+
import tempfile
|
| 11 |
+
from pathlib import Path
|
| 12 |
+
|
| 13 |
+
# Opt-in to future behavior for python-telegram-bot (retry_after as timedelta)
|
| 14 |
+
# This must be set BEFORE importing telegram.error
|
| 15 |
+
os.environ["PTB_TIMEDELTA"] = "1"
|
| 16 |
+
|
| 17 |
+
from collections.abc import Awaitable, Callable
|
| 18 |
+
from typing import TYPE_CHECKING, Any
|
| 19 |
+
|
| 20 |
+
from loguru import logger
|
| 21 |
+
|
| 22 |
+
from providers.common import get_user_facing_error_message
|
| 23 |
+
|
| 24 |
+
if TYPE_CHECKING:
|
| 25 |
+
from telegram import Update
|
| 26 |
+
from telegram.ext import ContextTypes
|
| 27 |
+
|
| 28 |
+
from ..models import IncomingMessage
|
| 29 |
+
from ..rendering.telegram_markdown import escape_md_v2, format_status
|
| 30 |
+
from .base import MessagingPlatform
|
| 31 |
+
|
| 32 |
+
# Optional import - python-telegram-bot may not be installed
|
| 33 |
+
try:
|
| 34 |
+
from telegram import Update
|
| 35 |
+
from telegram.error import NetworkError, RetryAfter, TelegramError
|
| 36 |
+
from telegram.ext import (
|
| 37 |
+
Application,
|
| 38 |
+
CommandHandler,
|
| 39 |
+
ContextTypes,
|
| 40 |
+
MessageHandler,
|
| 41 |
+
filters,
|
| 42 |
+
)
|
| 43 |
+
from telegram.request import HTTPXRequest
|
| 44 |
+
|
| 45 |
+
TELEGRAM_AVAILABLE = True
|
| 46 |
+
except ImportError:
|
| 47 |
+
TELEGRAM_AVAILABLE = False
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
class TelegramPlatform(MessagingPlatform):
|
| 51 |
+
"""
|
| 52 |
+
Telegram messaging platform adapter.
|
| 53 |
+
|
| 54 |
+
Uses python-telegram-bot (BoT API) for Telegram access.
|
| 55 |
+
Requires a Bot Token from @BotFather.
|
| 56 |
+
"""
|
| 57 |
+
|
| 58 |
+
name = "telegram"
|
| 59 |
+
|
| 60 |
+
def __init__(
|
| 61 |
+
self,
|
| 62 |
+
bot_token: str | None = None,
|
| 63 |
+
allowed_user_id: str | None = None,
|
| 64 |
+
):
|
| 65 |
+
if not TELEGRAM_AVAILABLE:
|
| 66 |
+
raise ImportError(
|
| 67 |
+
"python-telegram-bot is required. Install with: pip install python-telegram-bot"
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
self.bot_token = bot_token or os.getenv("TELEGRAM_BOT_TOKEN")
|
| 71 |
+
self.allowed_user_id = allowed_user_id or os.getenv("ALLOWED_TELEGRAM_USER_ID")
|
| 72 |
+
|
| 73 |
+
if not self.bot_token:
|
| 74 |
+
# We don't raise here to allow instantiation for testing/conditional logic,
|
| 75 |
+
# but start() will fail.
|
| 76 |
+
logger.warning("TELEGRAM_BOT_TOKEN not set")
|
| 77 |
+
|
| 78 |
+
self._application: Application | None = None
|
| 79 |
+
self._message_handler: Callable[[IncomingMessage], Awaitable[None]] | None = (
|
| 80 |
+
None
|
| 81 |
+
)
|
| 82 |
+
self._connected = False
|
| 83 |
+
self._limiter: Any | None = None # Will be MessagingRateLimiter
|
| 84 |
+
# Pending voice transcriptions: (chat_id, msg_id) -> (voice_msg_id, status_msg_id)
|
| 85 |
+
self._pending_voice: dict[tuple[str, str], tuple[str, str]] = {}
|
| 86 |
+
self._pending_voice_lock = asyncio.Lock()
|
| 87 |
+
|
| 88 |
+
async def _register_pending_voice(
|
| 89 |
+
self, chat_id: str, voice_msg_id: str, status_msg_id: str
|
| 90 |
+
) -> None:
|
| 91 |
+
"""Register a voice note as pending transcription (for /clear reply during transcription)."""
|
| 92 |
+
async with self._pending_voice_lock:
|
| 93 |
+
self._pending_voice[(chat_id, voice_msg_id)] = (voice_msg_id, status_msg_id)
|
| 94 |
+
self._pending_voice[(chat_id, status_msg_id)] = (
|
| 95 |
+
voice_msg_id,
|
| 96 |
+
status_msg_id,
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
async def cancel_pending_voice(
|
| 100 |
+
self, chat_id: str, reply_id: str
|
| 101 |
+
) -> tuple[str, str] | None:
|
| 102 |
+
"""Cancel a pending voice transcription. Returns (voice_msg_id, status_msg_id) if found."""
|
| 103 |
+
async with self._pending_voice_lock:
|
| 104 |
+
entry = self._pending_voice.pop((chat_id, reply_id), None)
|
| 105 |
+
if entry is None:
|
| 106 |
+
return None
|
| 107 |
+
voice_msg_id, status_msg_id = entry
|
| 108 |
+
self._pending_voice.pop((chat_id, voice_msg_id), None)
|
| 109 |
+
self._pending_voice.pop((chat_id, status_msg_id), None)
|
| 110 |
+
return (voice_msg_id, status_msg_id)
|
| 111 |
+
|
| 112 |
+
async def _is_voice_still_pending(self, chat_id: str, voice_msg_id: str) -> bool:
|
| 113 |
+
"""Check if a voice note is still pending (not cancelled)."""
|
| 114 |
+
async with self._pending_voice_lock:
|
| 115 |
+
return (chat_id, voice_msg_id) in self._pending_voice
|
| 116 |
+
|
| 117 |
+
async def start(self) -> None:
|
| 118 |
+
"""Initialize and connect to Telegram."""
|
| 119 |
+
if not self.bot_token:
|
| 120 |
+
raise ValueError("TELEGRAM_BOT_TOKEN is required")
|
| 121 |
+
|
| 122 |
+
# Configure request with longer timeouts
|
| 123 |
+
request = HTTPXRequest(
|
| 124 |
+
connection_pool_size=8, connect_timeout=30.0, read_timeout=30.0
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
# Build Application
|
| 128 |
+
builder = Application.builder().token(self.bot_token).request(request)
|
| 129 |
+
self._application = builder.build()
|
| 130 |
+
|
| 131 |
+
# Register Internal Handlers
|
| 132 |
+
# We catch ALL text messages and commands to forward them
|
| 133 |
+
self._application.add_handler(
|
| 134 |
+
MessageHandler(filters.TEXT & (~filters.COMMAND), self._on_telegram_message)
|
| 135 |
+
)
|
| 136 |
+
self._application.add_handler(CommandHandler("start", self._on_start_command))
|
| 137 |
+
# Catch-all for other commands if needed, or let them fall through
|
| 138 |
+
self._application.add_handler(
|
| 139 |
+
MessageHandler(filters.COMMAND, self._on_telegram_message)
|
| 140 |
+
)
|
| 141 |
+
# Voice note handler
|
| 142 |
+
self._application.add_handler(
|
| 143 |
+
MessageHandler(filters.VOICE, self._on_telegram_voice)
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
# Initialize internal components with retry logic
|
| 147 |
+
max_retries = 3
|
| 148 |
+
for attempt in range(max_retries):
|
| 149 |
+
try:
|
| 150 |
+
await self._application.initialize()
|
| 151 |
+
await self._application.start()
|
| 152 |
+
|
| 153 |
+
# Start polling (non-blocking way for integration)
|
| 154 |
+
if self._application.updater:
|
| 155 |
+
await self._application.updater.start_polling(
|
| 156 |
+
drop_pending_updates=False
|
| 157 |
+
)
|
| 158 |
+
|
| 159 |
+
self._connected = True
|
| 160 |
+
break
|
| 161 |
+
except (NetworkError, Exception) as e:
|
| 162 |
+
if attempt < max_retries - 1:
|
| 163 |
+
wait_time = 2 * (attempt + 1)
|
| 164 |
+
logger.warning(
|
| 165 |
+
f"Connection failed (attempt {attempt + 1}/{max_retries}): {e}. Retrying in {wait_time}s..."
|
| 166 |
+
)
|
| 167 |
+
await asyncio.sleep(wait_time)
|
| 168 |
+
else:
|
| 169 |
+
logger.error(f"Failed to connect after {max_retries} attempts")
|
| 170 |
+
raise
|
| 171 |
+
|
| 172 |
+
# Initialize rate limiter
|
| 173 |
+
from ..limiter import MessagingRateLimiter
|
| 174 |
+
|
| 175 |
+
self._limiter = await MessagingRateLimiter.get_instance()
|
| 176 |
+
|
| 177 |
+
# Send startup notification
|
| 178 |
+
try:
|
| 179 |
+
target = self.allowed_user_id
|
| 180 |
+
if target:
|
| 181 |
+
startup_text = (
|
| 182 |
+
f"🚀 *{escape_md_v2('Claude Code Proxy is online!')}* "
|
| 183 |
+
f"{escape_md_v2('(Bot API)')}"
|
| 184 |
+
)
|
| 185 |
+
await self.send_message(
|
| 186 |
+
target,
|
| 187 |
+
startup_text,
|
| 188 |
+
)
|
| 189 |
+
except Exception as e:
|
| 190 |
+
logger.warning(f"Could not send startup message: {e}")
|
| 191 |
+
|
| 192 |
+
logger.info("Telegram platform started (Bot API)")
|
| 193 |
+
|
| 194 |
+
async def stop(self) -> None:
|
| 195 |
+
"""Stop the bot."""
|
| 196 |
+
if self._application and self._application.updater:
|
| 197 |
+
await self._application.updater.stop()
|
| 198 |
+
await self._application.stop()
|
| 199 |
+
await self._application.shutdown()
|
| 200 |
+
|
| 201 |
+
self._connected = False
|
| 202 |
+
logger.info("Telegram platform stopped")
|
| 203 |
+
|
| 204 |
+
async def _with_retry(
|
| 205 |
+
self, func: Callable[..., Awaitable[Any]], *args, **kwargs
|
| 206 |
+
) -> Any:
|
| 207 |
+
"""Helper to execute a function with exponential backoff on network errors."""
|
| 208 |
+
max_retries = 3
|
| 209 |
+
for attempt in range(max_retries):
|
| 210 |
+
try:
|
| 211 |
+
return await func(*args, **kwargs)
|
| 212 |
+
except (TimeoutError, NetworkError) as e:
|
| 213 |
+
if "Message is not modified" in str(e):
|
| 214 |
+
return None
|
| 215 |
+
if attempt < max_retries - 1:
|
| 216 |
+
wait_time = 2**attempt # 1s, 2s, 4s
|
| 217 |
+
logger.warning(
|
| 218 |
+
f"Telegram API network error (attempt {attempt + 1}/{max_retries}): {e}. Retrying in {wait_time}s..."
|
| 219 |
+
)
|
| 220 |
+
await asyncio.sleep(wait_time)
|
| 221 |
+
else:
|
| 222 |
+
logger.error(
|
| 223 |
+
f"Telegram API failed after {max_retries} attempts: {e}"
|
| 224 |
+
)
|
| 225 |
+
raise
|
| 226 |
+
except RetryAfter as e:
|
| 227 |
+
# Telegram explicitly tells us to wait (PTB_TIMEDELTA: retry_after is timedelta)
|
| 228 |
+
from datetime import timedelta
|
| 229 |
+
|
| 230 |
+
retry_after = e.retry_after
|
| 231 |
+
if isinstance(retry_after, timedelta):
|
| 232 |
+
wait_secs = retry_after.total_seconds()
|
| 233 |
+
else:
|
| 234 |
+
wait_secs = float(retry_after)
|
| 235 |
+
|
| 236 |
+
logger.warning(f"Rate limited by Telegram, waiting {wait_secs}s...")
|
| 237 |
+
await asyncio.sleep(wait_secs)
|
| 238 |
+
# We don't increment attempt here, as this is a specific instruction
|
| 239 |
+
return await func(*args, **kwargs)
|
| 240 |
+
except TelegramError as e:
|
| 241 |
+
# Non-network Telegram errors
|
| 242 |
+
err_lower = str(e).lower()
|
| 243 |
+
if "message is not modified" in err_lower:
|
| 244 |
+
return None
|
| 245 |
+
# Best-effort no-op cases (common during chat cleanup / /clear).
|
| 246 |
+
if any(
|
| 247 |
+
x in err_lower
|
| 248 |
+
for x in [
|
| 249 |
+
"message to edit not found",
|
| 250 |
+
"message to delete not found",
|
| 251 |
+
"message can't be deleted",
|
| 252 |
+
"message can't be edited",
|
| 253 |
+
"not enough rights to delete",
|
| 254 |
+
]
|
| 255 |
+
):
|
| 256 |
+
return None
|
| 257 |
+
if "Can't parse entities" in str(e) and kwargs.get("parse_mode"):
|
| 258 |
+
logger.warning("Markdown failed, retrying without parse_mode")
|
| 259 |
+
kwargs["parse_mode"] = None
|
| 260 |
+
return await func(*args, **kwargs)
|
| 261 |
+
raise
|
| 262 |
+
|
| 263 |
+
async def send_message(
|
| 264 |
+
self,
|
| 265 |
+
chat_id: str,
|
| 266 |
+
text: str,
|
| 267 |
+
reply_to: str | None = None,
|
| 268 |
+
parse_mode: str | None = "MarkdownV2",
|
| 269 |
+
message_thread_id: str | None = None,
|
| 270 |
+
) -> str:
|
| 271 |
+
"""Send a message to a chat."""
|
| 272 |
+
app = self._application
|
| 273 |
+
if not app or not app.bot:
|
| 274 |
+
raise RuntimeError("Telegram application or bot not initialized")
|
| 275 |
+
|
| 276 |
+
async def _do_send(parse_mode=parse_mode):
|
| 277 |
+
bot = app.bot
|
| 278 |
+
kwargs: dict[str, Any] = {
|
| 279 |
+
"chat_id": chat_id,
|
| 280 |
+
"text": text,
|
| 281 |
+
"reply_to_message_id": int(reply_to) if reply_to else None,
|
| 282 |
+
"parse_mode": parse_mode,
|
| 283 |
+
}
|
| 284 |
+
if message_thread_id is not None:
|
| 285 |
+
kwargs["message_thread_id"] = int(message_thread_id)
|
| 286 |
+
msg = await bot.send_message(**kwargs)
|
| 287 |
+
return str(msg.message_id)
|
| 288 |
+
|
| 289 |
+
return await self._with_retry(_do_send, parse_mode=parse_mode)
|
| 290 |
+
|
| 291 |
+
async def edit_message(
|
| 292 |
+
self,
|
| 293 |
+
chat_id: str,
|
| 294 |
+
message_id: str,
|
| 295 |
+
text: str,
|
| 296 |
+
parse_mode: str | None = "MarkdownV2",
|
| 297 |
+
) -> None:
|
| 298 |
+
"""Edit an existing message."""
|
| 299 |
+
app = self._application
|
| 300 |
+
if not app or not app.bot:
|
| 301 |
+
raise RuntimeError("Telegram application or bot not initialized")
|
| 302 |
+
|
| 303 |
+
async def _do_edit(parse_mode=parse_mode):
|
| 304 |
+
bot = app.bot
|
| 305 |
+
await bot.edit_message_text(
|
| 306 |
+
chat_id=chat_id,
|
| 307 |
+
message_id=int(message_id),
|
| 308 |
+
text=text,
|
| 309 |
+
parse_mode=parse_mode,
|
| 310 |
+
)
|
| 311 |
+
|
| 312 |
+
await self._with_retry(_do_edit, parse_mode=parse_mode)
|
| 313 |
+
|
| 314 |
+
async def delete_message(
|
| 315 |
+
self,
|
| 316 |
+
chat_id: str,
|
| 317 |
+
message_id: str,
|
| 318 |
+
) -> None:
|
| 319 |
+
"""Delete a message from a chat."""
|
| 320 |
+
app = self._application
|
| 321 |
+
if not app or not app.bot:
|
| 322 |
+
raise RuntimeError("Telegram application or bot not initialized")
|
| 323 |
+
|
| 324 |
+
async def _do_delete():
|
| 325 |
+
bot = app.bot
|
| 326 |
+
await bot.delete_message(chat_id=chat_id, message_id=int(message_id))
|
| 327 |
+
|
| 328 |
+
await self._with_retry(_do_delete)
|
| 329 |
+
|
| 330 |
+
async def delete_messages(self, chat_id: str, message_ids: list[str]) -> None:
|
| 331 |
+
"""Delete multiple messages (best-effort)."""
|
| 332 |
+
if not message_ids:
|
| 333 |
+
return
|
| 334 |
+
app = self._application
|
| 335 |
+
if not app or not app.bot:
|
| 336 |
+
raise RuntimeError("Telegram application or bot not initialized")
|
| 337 |
+
|
| 338 |
+
# PTB supports bulk deletion via delete_messages; fall back to per-message.
|
| 339 |
+
bot = app.bot
|
| 340 |
+
if hasattr(bot, "delete_messages"):
|
| 341 |
+
|
| 342 |
+
async def _do_bulk():
|
| 343 |
+
mids = []
|
| 344 |
+
for mid in message_ids:
|
| 345 |
+
try:
|
| 346 |
+
mids.append(int(mid))
|
| 347 |
+
except Exception:
|
| 348 |
+
continue
|
| 349 |
+
if not mids:
|
| 350 |
+
return None
|
| 351 |
+
# delete_messages accepts a sequence of ints (up to 100).
|
| 352 |
+
await bot.delete_messages(chat_id=chat_id, message_ids=mids)
|
| 353 |
+
|
| 354 |
+
await self._with_retry(_do_bulk)
|
| 355 |
+
return
|
| 356 |
+
|
| 357 |
+
for mid in message_ids:
|
| 358 |
+
await self.delete_message(chat_id, mid)
|
| 359 |
+
|
| 360 |
+
async def queue_send_message(
|
| 361 |
+
self,
|
| 362 |
+
chat_id: str,
|
| 363 |
+
text: str,
|
| 364 |
+
reply_to: str | None = None,
|
| 365 |
+
parse_mode: str | None = "MarkdownV2",
|
| 366 |
+
fire_and_forget: bool = True,
|
| 367 |
+
message_thread_id: str | None = None,
|
| 368 |
+
) -> str | None:
|
| 369 |
+
"""Enqueue a message to be sent (using limiter)."""
|
| 370 |
+
# Note: Bot API handles limits better, but we still use our limiter for nice queuing
|
| 371 |
+
if not self._limiter:
|
| 372 |
+
return await self.send_message(
|
| 373 |
+
chat_id, text, reply_to, parse_mode, message_thread_id
|
| 374 |
+
)
|
| 375 |
+
|
| 376 |
+
async def _send():
|
| 377 |
+
return await self.send_message(
|
| 378 |
+
chat_id, text, reply_to, parse_mode, message_thread_id
|
| 379 |
+
)
|
| 380 |
+
|
| 381 |
+
if fire_and_forget:
|
| 382 |
+
self._limiter.fire_and_forget(_send)
|
| 383 |
+
return None
|
| 384 |
+
else:
|
| 385 |
+
return await self._limiter.enqueue(_send)
|
| 386 |
+
|
| 387 |
+
async def queue_edit_message(
|
| 388 |
+
self,
|
| 389 |
+
chat_id: str,
|
| 390 |
+
message_id: str,
|
| 391 |
+
text: str,
|
| 392 |
+
parse_mode: str | None = "MarkdownV2",
|
| 393 |
+
fire_and_forget: bool = True,
|
| 394 |
+
) -> None:
|
| 395 |
+
"""Enqueue a message edit."""
|
| 396 |
+
if not self._limiter:
|
| 397 |
+
return await self.edit_message(chat_id, message_id, text, parse_mode)
|
| 398 |
+
|
| 399 |
+
async def _edit():
|
| 400 |
+
return await self.edit_message(chat_id, message_id, text, parse_mode)
|
| 401 |
+
|
| 402 |
+
dedup_key = f"edit:{chat_id}:{message_id}"
|
| 403 |
+
if fire_and_forget:
|
| 404 |
+
self._limiter.fire_and_forget(_edit, dedup_key=dedup_key)
|
| 405 |
+
else:
|
| 406 |
+
await self._limiter.enqueue(_edit, dedup_key=dedup_key)
|
| 407 |
+
|
| 408 |
+
async def queue_delete_message(
|
| 409 |
+
self,
|
| 410 |
+
chat_id: str,
|
| 411 |
+
message_id: str,
|
| 412 |
+
fire_and_forget: bool = True,
|
| 413 |
+
) -> None:
|
| 414 |
+
"""Enqueue a message delete."""
|
| 415 |
+
if not self._limiter:
|
| 416 |
+
return await self.delete_message(chat_id, message_id)
|
| 417 |
+
|
| 418 |
+
async def _delete():
|
| 419 |
+
return await self.delete_message(chat_id, message_id)
|
| 420 |
+
|
| 421 |
+
dedup_key = f"del:{chat_id}:{message_id}"
|
| 422 |
+
if fire_and_forget:
|
| 423 |
+
self._limiter.fire_and_forget(_delete, dedup_key=dedup_key)
|
| 424 |
+
else:
|
| 425 |
+
await self._limiter.enqueue(_delete, dedup_key=dedup_key)
|
| 426 |
+
|
| 427 |
+
async def queue_delete_messages(
|
| 428 |
+
self,
|
| 429 |
+
chat_id: str,
|
| 430 |
+
message_ids: list[str],
|
| 431 |
+
fire_and_forget: bool = True,
|
| 432 |
+
) -> None:
|
| 433 |
+
"""Enqueue a bulk delete (if supported) or a sequence of deletes."""
|
| 434 |
+
if not message_ids:
|
| 435 |
+
return
|
| 436 |
+
|
| 437 |
+
if not self._limiter:
|
| 438 |
+
return await self.delete_messages(chat_id, message_ids)
|
| 439 |
+
|
| 440 |
+
async def _bulk():
|
| 441 |
+
return await self.delete_messages(chat_id, message_ids)
|
| 442 |
+
|
| 443 |
+
# Dedup by the chunk content; okay to be coarse here.
|
| 444 |
+
dedup_key = f"del_bulk:{chat_id}:{hash(tuple(message_ids))}"
|
| 445 |
+
if fire_and_forget:
|
| 446 |
+
self._limiter.fire_and_forget(_bulk, dedup_key=dedup_key)
|
| 447 |
+
else:
|
| 448 |
+
await self._limiter.enqueue(_bulk, dedup_key=dedup_key)
|
| 449 |
+
|
| 450 |
+
def fire_and_forget(self, task: Awaitable[Any]) -> None:
|
| 451 |
+
"""Execute a coroutine without awaiting it."""
|
| 452 |
+
if asyncio.iscoroutine(task):
|
| 453 |
+
_ = asyncio.create_task(task)
|
| 454 |
+
else:
|
| 455 |
+
_ = asyncio.ensure_future(task)
|
| 456 |
+
|
| 457 |
+
def on_message(
|
| 458 |
+
self,
|
| 459 |
+
handler: Callable[[IncomingMessage], Awaitable[None]],
|
| 460 |
+
) -> None:
|
| 461 |
+
"""Register a message handler callback."""
|
| 462 |
+
self._message_handler = handler
|
| 463 |
+
|
| 464 |
+
@property
|
| 465 |
+
def is_connected(self) -> bool:
|
| 466 |
+
"""Check if connected."""
|
| 467 |
+
return self._connected
|
| 468 |
+
|
| 469 |
+
async def _on_start_command(
|
| 470 |
+
self, update: Update, context: ContextTypes.DEFAULT_TYPE
|
| 471 |
+
) -> None:
|
| 472 |
+
"""Handle /start command."""
|
| 473 |
+
if update.message:
|
| 474 |
+
await update.message.reply_text("👋 Hello! I am the Claude Code Proxy Bot.")
|
| 475 |
+
# We can also treat this as a message if we want it to trigger something
|
| 476 |
+
await self._on_telegram_message(update, context)
|
| 477 |
+
|
| 478 |
+
async def _on_telegram_message(
|
| 479 |
+
self, update: Update, context: ContextTypes.DEFAULT_TYPE
|
| 480 |
+
) -> None:
|
| 481 |
+
"""Handle incoming updates."""
|
| 482 |
+
if (
|
| 483 |
+
not update.message
|
| 484 |
+
or not update.message.text
|
| 485 |
+
or not update.effective_user
|
| 486 |
+
or not update.effective_chat
|
| 487 |
+
):
|
| 488 |
+
return
|
| 489 |
+
|
| 490 |
+
user_id = str(update.effective_user.id)
|
| 491 |
+
chat_id = str(update.effective_chat.id)
|
| 492 |
+
|
| 493 |
+
# Security check
|
| 494 |
+
if self.allowed_user_id and user_id != str(self.allowed_user_id).strip():
|
| 495 |
+
logger.warning(f"Unauthorized access attempt from {user_id}")
|
| 496 |
+
return
|
| 497 |
+
|
| 498 |
+
message_id = str(update.message.message_id)
|
| 499 |
+
reply_to = (
|
| 500 |
+
str(update.message.reply_to_message.message_id)
|
| 501 |
+
if update.message.reply_to_message
|
| 502 |
+
else None
|
| 503 |
+
)
|
| 504 |
+
thread_id = (
|
| 505 |
+
str(update.message.message_thread_id)
|
| 506 |
+
if getattr(update.message, "message_thread_id", None) is not None
|
| 507 |
+
else None
|
| 508 |
+
)
|
| 509 |
+
text_preview = (update.message.text or "")[:80]
|
| 510 |
+
if len(update.message.text or "") > 80:
|
| 511 |
+
text_preview += "..."
|
| 512 |
+
logger.info(
|
| 513 |
+
"TELEGRAM_MSG: chat_id={} message_id={} reply_to={} text_preview={!r}",
|
| 514 |
+
chat_id,
|
| 515 |
+
message_id,
|
| 516 |
+
reply_to,
|
| 517 |
+
text_preview,
|
| 518 |
+
)
|
| 519 |
+
|
| 520 |
+
if not self._message_handler:
|
| 521 |
+
return
|
| 522 |
+
|
| 523 |
+
incoming = IncomingMessage(
|
| 524 |
+
text=update.message.text,
|
| 525 |
+
chat_id=chat_id,
|
| 526 |
+
user_id=user_id,
|
| 527 |
+
message_id=message_id,
|
| 528 |
+
platform="telegram",
|
| 529 |
+
reply_to_message_id=reply_to,
|
| 530 |
+
message_thread_id=thread_id,
|
| 531 |
+
raw_event=update,
|
| 532 |
+
)
|
| 533 |
+
|
| 534 |
+
try:
|
| 535 |
+
await self._message_handler(incoming)
|
| 536 |
+
except Exception as e:
|
| 537 |
+
logger.error(f"Error handling message: {e}")
|
| 538 |
+
with contextlib.suppress(Exception):
|
| 539 |
+
await self.send_message(
|
| 540 |
+
chat_id,
|
| 541 |
+
f"❌ *{escape_md_v2('Error:')}* {escape_md_v2(get_user_facing_error_message(e)[:200])}",
|
| 542 |
+
reply_to=incoming.message_id,
|
| 543 |
+
message_thread_id=thread_id,
|
| 544 |
+
parse_mode="MarkdownV2",
|
| 545 |
+
)
|
| 546 |
+
|
| 547 |
+
async def _on_telegram_voice(
|
| 548 |
+
self, update: Update, context: ContextTypes.DEFAULT_TYPE
|
| 549 |
+
) -> None:
|
| 550 |
+
"""Handle incoming voice messages."""
|
| 551 |
+
if (
|
| 552 |
+
not update.message
|
| 553 |
+
or not update.message.voice
|
| 554 |
+
or not update.effective_user
|
| 555 |
+
or not update.effective_chat
|
| 556 |
+
):
|
| 557 |
+
return
|
| 558 |
+
|
| 559 |
+
from config.settings import get_settings
|
| 560 |
+
|
| 561 |
+
settings = get_settings()
|
| 562 |
+
if not settings.voice_note_enabled:
|
| 563 |
+
await update.message.reply_text("Voice notes are disabled.")
|
| 564 |
+
return
|
| 565 |
+
|
| 566 |
+
user_id = str(update.effective_user.id)
|
| 567 |
+
chat_id = str(update.effective_chat.id)
|
| 568 |
+
|
| 569 |
+
if self.allowed_user_id and user_id != str(self.allowed_user_id).strip():
|
| 570 |
+
logger.warning(f"Unauthorized voice access attempt from {user_id}")
|
| 571 |
+
return
|
| 572 |
+
|
| 573 |
+
if not self._message_handler:
|
| 574 |
+
return
|
| 575 |
+
|
| 576 |
+
thread_id = (
|
| 577 |
+
str(update.message.message_thread_id)
|
| 578 |
+
if getattr(update.message, "message_thread_id", None) is not None
|
| 579 |
+
else None
|
| 580 |
+
)
|
| 581 |
+
status_msg_id = await self.queue_send_message(
|
| 582 |
+
chat_id,
|
| 583 |
+
format_status("⏳", "Transcribing voice note..."),
|
| 584 |
+
reply_to=str(update.message.message_id),
|
| 585 |
+
parse_mode="MarkdownV2",
|
| 586 |
+
fire_and_forget=False,
|
| 587 |
+
message_thread_id=thread_id,
|
| 588 |
+
)
|
| 589 |
+
|
| 590 |
+
message_id = str(update.message.message_id)
|
| 591 |
+
await self._register_pending_voice(chat_id, message_id, str(status_msg_id))
|
| 592 |
+
reply_to = (
|
| 593 |
+
str(update.message.reply_to_message.message_id)
|
| 594 |
+
if update.message.reply_to_message
|
| 595 |
+
else None
|
| 596 |
+
)
|
| 597 |
+
|
| 598 |
+
voice = update.message.voice
|
| 599 |
+
suffix = ".ogg"
|
| 600 |
+
if voice.mime_type and "mpeg" in voice.mime_type:
|
| 601 |
+
suffix = ".mp3"
|
| 602 |
+
elif voice.mime_type and "mp4" in voice.mime_type:
|
| 603 |
+
suffix = ".mp4"
|
| 604 |
+
|
| 605 |
+
with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as tmp:
|
| 606 |
+
tmp_path = Path(tmp.name)
|
| 607 |
+
|
| 608 |
+
try:
|
| 609 |
+
tg_file = await context.bot.get_file(voice.file_id)
|
| 610 |
+
await tg_file.download_to_drive(custom_path=str(tmp_path))
|
| 611 |
+
|
| 612 |
+
from ..transcription import transcribe_audio
|
| 613 |
+
|
| 614 |
+
transcribed = await asyncio.to_thread(
|
| 615 |
+
transcribe_audio,
|
| 616 |
+
tmp_path,
|
| 617 |
+
voice.mime_type or "audio/ogg",
|
| 618 |
+
whisper_model=settings.whisper_model,
|
| 619 |
+
whisper_device=settings.whisper_device,
|
| 620 |
+
)
|
| 621 |
+
|
| 622 |
+
if not await self._is_voice_still_pending(chat_id, message_id):
|
| 623 |
+
await self.queue_delete_message(chat_id, str(status_msg_id))
|
| 624 |
+
return
|
| 625 |
+
|
| 626 |
+
async with self._pending_voice_lock:
|
| 627 |
+
self._pending_voice.pop((chat_id, message_id), None)
|
| 628 |
+
self._pending_voice.pop((chat_id, str(status_msg_id)), None)
|
| 629 |
+
|
| 630 |
+
incoming = IncomingMessage(
|
| 631 |
+
text=transcribed,
|
| 632 |
+
chat_id=chat_id,
|
| 633 |
+
user_id=user_id,
|
| 634 |
+
message_id=message_id,
|
| 635 |
+
platform="telegram",
|
| 636 |
+
reply_to_message_id=reply_to,
|
| 637 |
+
message_thread_id=thread_id,
|
| 638 |
+
raw_event=update,
|
| 639 |
+
status_message_id=status_msg_id,
|
| 640 |
+
)
|
| 641 |
+
|
| 642 |
+
logger.info(
|
| 643 |
+
"TELEGRAM_VOICE: chat_id={} message_id={} transcribed={!r}",
|
| 644 |
+
chat_id,
|
| 645 |
+
message_id,
|
| 646 |
+
(transcribed[:80] + "..." if len(transcribed) > 80 else transcribed),
|
| 647 |
+
)
|
| 648 |
+
|
| 649 |
+
await self._message_handler(incoming)
|
| 650 |
+
except ValueError as e:
|
| 651 |
+
await update.message.reply_text(get_user_facing_error_message(e)[:200])
|
| 652 |
+
except ImportError as e:
|
| 653 |
+
await update.message.reply_text(get_user_facing_error_message(e)[:200])
|
| 654 |
+
except Exception as e:
|
| 655 |
+
logger.error(f"Voice transcription failed: {e}")
|
| 656 |
+
await update.message.reply_text(
|
| 657 |
+
"Could not transcribe voice note. Please try again or send text."
|
| 658 |
+
)
|
| 659 |
+
finally:
|
| 660 |
+
with contextlib.suppress(OSError):
|
| 661 |
+
tmp_path.unlink(missing_ok=True)
|
Claude_Code/messaging/rendering/__init__.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Markdown rendering utilities for messaging platforms."""
|
| 2 |
+
|
| 3 |
+
from .discord_markdown import (
|
| 4 |
+
discord_bold,
|
| 5 |
+
discord_code_inline,
|
| 6 |
+
escape_discord,
|
| 7 |
+
escape_discord_code,
|
| 8 |
+
format_status_discord,
|
| 9 |
+
render_markdown_to_discord,
|
| 10 |
+
)
|
| 11 |
+
from .discord_markdown import (
|
| 12 |
+
format_status as format_status_discord_fn,
|
| 13 |
+
)
|
| 14 |
+
from .telegram_markdown import (
|
| 15 |
+
escape_md_v2,
|
| 16 |
+
escape_md_v2_code,
|
| 17 |
+
escape_md_v2_link_url,
|
| 18 |
+
mdv2_bold,
|
| 19 |
+
mdv2_code_inline,
|
| 20 |
+
render_markdown_to_mdv2,
|
| 21 |
+
)
|
| 22 |
+
from .telegram_markdown import (
|
| 23 |
+
format_status as format_status_telegram_fn,
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
__all__ = [
|
| 27 |
+
"discord_bold",
|
| 28 |
+
"discord_code_inline",
|
| 29 |
+
"escape_discord",
|
| 30 |
+
"escape_discord_code",
|
| 31 |
+
"escape_md_v2",
|
| 32 |
+
"escape_md_v2_code",
|
| 33 |
+
"escape_md_v2_link_url",
|
| 34 |
+
"format_status_discord",
|
| 35 |
+
"format_status_discord_fn",
|
| 36 |
+
"format_status_telegram_fn",
|
| 37 |
+
"mdv2_bold",
|
| 38 |
+
"mdv2_code_inline",
|
| 39 |
+
"render_markdown_to_discord",
|
| 40 |
+
"render_markdown_to_mdv2",
|
| 41 |
+
]
|
Claude_Code/messaging/rendering/discord_markdown.py
ADDED
|
@@ -0,0 +1,365 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Discord markdown utilities.
|
| 2 |
+
|
| 3 |
+
Discord uses standard markdown: **bold**, *italic*, `code`, ```code block```.
|
| 4 |
+
Used by the message handler and Discord platform adapter.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
from markdown_it import MarkdownIt
|
| 10 |
+
|
| 11 |
+
# Discord escapes: \ * _ ` ~ | >
|
| 12 |
+
DISCORD_SPECIAL = set("\\*_`~|>")
|
| 13 |
+
|
| 14 |
+
_MD = MarkdownIt("commonmark", {"html": False, "breaks": False})
|
| 15 |
+
_MD.enable("strikethrough")
|
| 16 |
+
_MD.enable("table")
|
| 17 |
+
|
| 18 |
+
_TABLE_SEP_RE = re.compile(r"^\s*\|?\s*:?-{3,}:?\s*(\|\s*:?-{3,}:?\s*)+\|?\s*$")
|
| 19 |
+
_FENCE_RE = re.compile(r"^\s*```")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _is_gfm_table_header_line(line: str) -> bool:
|
| 23 |
+
"""Check if line is a GFM table header."""
|
| 24 |
+
if "|" not in line:
|
| 25 |
+
return False
|
| 26 |
+
if _TABLE_SEP_RE.match(line):
|
| 27 |
+
return False
|
| 28 |
+
stripped = line.strip()
|
| 29 |
+
parts = [p.strip() for p in stripped.strip("|").split("|")]
|
| 30 |
+
parts = [p for p in parts if p != ""]
|
| 31 |
+
return len(parts) >= 2
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _normalize_gfm_tables(text: str) -> str:
|
| 35 |
+
"""Insert blank line before detected tables outside code blocks."""
|
| 36 |
+
lines = text.splitlines()
|
| 37 |
+
if len(lines) < 2:
|
| 38 |
+
return text
|
| 39 |
+
|
| 40 |
+
out_lines: list[str] = []
|
| 41 |
+
in_fence = False
|
| 42 |
+
|
| 43 |
+
for idx, line in enumerate(lines):
|
| 44 |
+
if _FENCE_RE.match(line):
|
| 45 |
+
in_fence = not in_fence
|
| 46 |
+
out_lines.append(line)
|
| 47 |
+
continue
|
| 48 |
+
|
| 49 |
+
if (
|
| 50 |
+
not in_fence
|
| 51 |
+
and idx + 1 < len(lines)
|
| 52 |
+
and _is_gfm_table_header_line(line)
|
| 53 |
+
and _TABLE_SEP_RE.match(lines[idx + 1])
|
| 54 |
+
and out_lines
|
| 55 |
+
and out_lines[-1].strip() != ""
|
| 56 |
+
):
|
| 57 |
+
m = re.match(r"^(\s*)", line)
|
| 58 |
+
indent = m.group(1) if m else ""
|
| 59 |
+
out_lines.append(indent)
|
| 60 |
+
|
| 61 |
+
out_lines.append(line)
|
| 62 |
+
|
| 63 |
+
return "\n".join(out_lines)
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
def escape_discord(text: str) -> str:
|
| 67 |
+
"""Escape text for Discord markdown (bold, italic, etc.)."""
|
| 68 |
+
return "".join(f"\\{ch}" if ch in DISCORD_SPECIAL else ch for ch in text)
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def escape_discord_code(text: str) -> str:
|
| 72 |
+
"""Escape text for Discord code spans/blocks."""
|
| 73 |
+
return text.replace("\\", "\\\\").replace("`", "\\`")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def discord_bold(text: str) -> str:
|
| 77 |
+
"""Format text as bold in Discord (uses **)."""
|
| 78 |
+
return f"**{escape_discord(text)}**"
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def discord_code_inline(text: str) -> str:
|
| 82 |
+
"""Format text as inline code in Discord."""
|
| 83 |
+
return f"`{escape_discord_code(text)}`"
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
def format_status_discord(label: str, suffix: str | None = None) -> str:
|
| 87 |
+
"""Format a status message for Discord (label in bold, optional suffix)."""
|
| 88 |
+
base = discord_bold(label)
|
| 89 |
+
if suffix:
|
| 90 |
+
return f"{base} {escape_discord(suffix)}"
|
| 91 |
+
return base
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
def format_status(emoji: str, label: str, suffix: str | None = None) -> str:
|
| 95 |
+
"""Format a status message with emoji for Discord (matches Telegram API)."""
|
| 96 |
+
base = f"{emoji} {discord_bold(label)}"
|
| 97 |
+
if suffix:
|
| 98 |
+
return f"{base} {escape_discord(suffix)}"
|
| 99 |
+
return base
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def render_markdown_to_discord(text: str) -> str:
|
| 103 |
+
"""Render common Markdown into Discord-compatible format."""
|
| 104 |
+
if not text:
|
| 105 |
+
return ""
|
| 106 |
+
|
| 107 |
+
text = _normalize_gfm_tables(text)
|
| 108 |
+
tokens = _MD.parse(text)
|
| 109 |
+
|
| 110 |
+
def render_inline_table_plain(children) -> str:
|
| 111 |
+
out: list[str] = []
|
| 112 |
+
for tok in children:
|
| 113 |
+
if tok.type == "text" or tok.type == "code_inline":
|
| 114 |
+
out.append(tok.content)
|
| 115 |
+
elif tok.type in {"softbreak", "hardbreak"}:
|
| 116 |
+
out.append(" ")
|
| 117 |
+
elif tok.type == "image" and tok.content:
|
| 118 |
+
out.append(tok.content)
|
| 119 |
+
return "".join(out)
|
| 120 |
+
|
| 121 |
+
def render_inline(children) -> str:
|
| 122 |
+
out: list[str] = []
|
| 123 |
+
i = 0
|
| 124 |
+
while i < len(children):
|
| 125 |
+
tok = children[i]
|
| 126 |
+
t = tok.type
|
| 127 |
+
if t == "text":
|
| 128 |
+
out.append(escape_discord(tok.content))
|
| 129 |
+
elif t in {"softbreak", "hardbreak"}:
|
| 130 |
+
out.append("\n")
|
| 131 |
+
elif t == "em_open" or t == "em_close":
|
| 132 |
+
out.append("*")
|
| 133 |
+
elif t == "strong_open" or t == "strong_close":
|
| 134 |
+
out.append("**")
|
| 135 |
+
elif t == "s_open" or t == "s_close":
|
| 136 |
+
out.append("~~")
|
| 137 |
+
elif t == "code_inline":
|
| 138 |
+
out.append(f"`{escape_discord_code(tok.content)}`")
|
| 139 |
+
elif t == "link_open":
|
| 140 |
+
href = ""
|
| 141 |
+
if tok.attrs:
|
| 142 |
+
if isinstance(tok.attrs, dict):
|
| 143 |
+
href = tok.attrs.get("href", "")
|
| 144 |
+
else:
|
| 145 |
+
for key, val in tok.attrs:
|
| 146 |
+
if key == "href":
|
| 147 |
+
href = val
|
| 148 |
+
break
|
| 149 |
+
inner_tokens = []
|
| 150 |
+
i += 1
|
| 151 |
+
while i < len(children) and children[i].type != "link_close":
|
| 152 |
+
inner_tokens.append(children[i])
|
| 153 |
+
i += 1
|
| 154 |
+
link_text = ""
|
| 155 |
+
for child in inner_tokens:
|
| 156 |
+
if child.type == "text" or child.type == "code_inline":
|
| 157 |
+
link_text += child.content
|
| 158 |
+
out.append(f"[{escape_discord(link_text)}]({href})")
|
| 159 |
+
elif t == "image":
|
| 160 |
+
href = ""
|
| 161 |
+
alt = tok.content or ""
|
| 162 |
+
if tok.attrs:
|
| 163 |
+
if isinstance(tok.attrs, dict):
|
| 164 |
+
href = tok.attrs.get("src", "")
|
| 165 |
+
else:
|
| 166 |
+
for key, val in tok.attrs:
|
| 167 |
+
if key == "src":
|
| 168 |
+
href = val
|
| 169 |
+
break
|
| 170 |
+
if alt:
|
| 171 |
+
out.append(f"{escape_discord(alt)} ({href})")
|
| 172 |
+
else:
|
| 173 |
+
out.append(href)
|
| 174 |
+
else:
|
| 175 |
+
out.append(escape_discord(tok.content or ""))
|
| 176 |
+
i += 1
|
| 177 |
+
return "".join(out)
|
| 178 |
+
|
| 179 |
+
out: list[str] = []
|
| 180 |
+
list_stack: list[dict] = []
|
| 181 |
+
pending_prefix: str | None = None
|
| 182 |
+
blockquote_level = 0
|
| 183 |
+
in_heading = False
|
| 184 |
+
|
| 185 |
+
def apply_blockquote(val: str) -> str:
|
| 186 |
+
if blockquote_level <= 0:
|
| 187 |
+
return val
|
| 188 |
+
prefix = "> " * blockquote_level
|
| 189 |
+
return prefix + val.replace("\n", "\n" + prefix)
|
| 190 |
+
|
| 191 |
+
i = 0
|
| 192 |
+
while i < len(tokens):
|
| 193 |
+
tok = tokens[i]
|
| 194 |
+
t = tok.type
|
| 195 |
+
if t == "paragraph_open":
|
| 196 |
+
pass
|
| 197 |
+
elif t == "paragraph_close":
|
| 198 |
+
out.append("\n")
|
| 199 |
+
elif t == "heading_open":
|
| 200 |
+
in_heading = True
|
| 201 |
+
elif t == "heading_close":
|
| 202 |
+
in_heading = False
|
| 203 |
+
out.append("\n")
|
| 204 |
+
elif t == "bullet_list_open":
|
| 205 |
+
list_stack.append({"type": "bullet", "index": 1})
|
| 206 |
+
elif t == "bullet_list_close":
|
| 207 |
+
if list_stack:
|
| 208 |
+
list_stack.pop()
|
| 209 |
+
out.append("\n")
|
| 210 |
+
elif t == "ordered_list_open":
|
| 211 |
+
start = 1
|
| 212 |
+
if tok.attrs:
|
| 213 |
+
if isinstance(tok.attrs, dict):
|
| 214 |
+
val = tok.attrs.get("start")
|
| 215 |
+
if val is not None:
|
| 216 |
+
try:
|
| 217 |
+
start = int(val)
|
| 218 |
+
except TypeError, ValueError:
|
| 219 |
+
start = 1
|
| 220 |
+
else:
|
| 221 |
+
for key, val in tok.attrs:
|
| 222 |
+
if key == "start":
|
| 223 |
+
try:
|
| 224 |
+
start = int(val)
|
| 225 |
+
except TypeError, ValueError:
|
| 226 |
+
start = 1
|
| 227 |
+
break
|
| 228 |
+
list_stack.append({"type": "ordered", "index": start})
|
| 229 |
+
elif t == "ordered_list_close":
|
| 230 |
+
if list_stack:
|
| 231 |
+
list_stack.pop()
|
| 232 |
+
out.append("\n")
|
| 233 |
+
elif t == "list_item_open":
|
| 234 |
+
if list_stack:
|
| 235 |
+
top = list_stack[-1]
|
| 236 |
+
if top["type"] == "bullet":
|
| 237 |
+
pending_prefix = "- "
|
| 238 |
+
else:
|
| 239 |
+
pending_prefix = f"{top['index']}. "
|
| 240 |
+
top["index"] += 1
|
| 241 |
+
elif t == "list_item_close":
|
| 242 |
+
out.append("\n")
|
| 243 |
+
elif t == "blockquote_open":
|
| 244 |
+
blockquote_level += 1
|
| 245 |
+
elif t == "blockquote_close":
|
| 246 |
+
blockquote_level = max(0, blockquote_level - 1)
|
| 247 |
+
out.append("\n")
|
| 248 |
+
elif t == "table_open":
|
| 249 |
+
if pending_prefix:
|
| 250 |
+
out.append(apply_blockquote(pending_prefix.rstrip()))
|
| 251 |
+
out.append("\n")
|
| 252 |
+
pending_prefix = None
|
| 253 |
+
|
| 254 |
+
rows: list[list[str]] = []
|
| 255 |
+
row_is_header: list[bool] = []
|
| 256 |
+
|
| 257 |
+
j = i + 1
|
| 258 |
+
in_thead = False
|
| 259 |
+
in_row = False
|
| 260 |
+
current_row: list[str] = []
|
| 261 |
+
current_row_header = False
|
| 262 |
+
|
| 263 |
+
in_cell = False
|
| 264 |
+
cell_parts: list[str] = []
|
| 265 |
+
|
| 266 |
+
while j < len(tokens):
|
| 267 |
+
tt = tokens[j].type
|
| 268 |
+
if tt == "thead_open":
|
| 269 |
+
in_thead = True
|
| 270 |
+
elif tt == "thead_close":
|
| 271 |
+
in_thead = False
|
| 272 |
+
elif tt == "tr_open":
|
| 273 |
+
in_row = True
|
| 274 |
+
current_row = []
|
| 275 |
+
current_row_header = in_thead
|
| 276 |
+
elif tt in {"th_open", "td_open"}:
|
| 277 |
+
in_cell = True
|
| 278 |
+
cell_parts = []
|
| 279 |
+
elif tt == "inline" and in_cell:
|
| 280 |
+
cell_parts.append(
|
| 281 |
+
render_inline_table_plain(tokens[j].children or [])
|
| 282 |
+
)
|
| 283 |
+
elif tt in {"th_close", "td_close"} and in_cell:
|
| 284 |
+
cell = " ".join(cell_parts).strip()
|
| 285 |
+
current_row.append(cell)
|
| 286 |
+
in_cell = False
|
| 287 |
+
cell_parts = []
|
| 288 |
+
elif tt == "tr_close" and in_row:
|
| 289 |
+
rows.append(current_row)
|
| 290 |
+
row_is_header.append(bool(current_row_header))
|
| 291 |
+
in_row = False
|
| 292 |
+
elif tt == "table_close":
|
| 293 |
+
break
|
| 294 |
+
j += 1
|
| 295 |
+
|
| 296 |
+
if rows:
|
| 297 |
+
col_count = max((len(r) for r in rows), default=0)
|
| 298 |
+
norm_rows: list[list[str]] = []
|
| 299 |
+
for r in rows:
|
| 300 |
+
if len(r) < col_count:
|
| 301 |
+
r = r + [""] * (col_count - len(r))
|
| 302 |
+
norm_rows.append(r)
|
| 303 |
+
|
| 304 |
+
widths: list[int] = []
|
| 305 |
+
for c in range(col_count):
|
| 306 |
+
w = max((len(r[c]) for r in norm_rows), default=0)
|
| 307 |
+
widths.append(max(w, 3))
|
| 308 |
+
|
| 309 |
+
def fmt_row(
|
| 310 |
+
r: list[str], _w: list[int] = widths, _c: int = col_count
|
| 311 |
+
) -> str:
|
| 312 |
+
cells = [r[c].ljust(_w[c]) for c in range(_c)]
|
| 313 |
+
return "| " + " | ".join(cells) + " |"
|
| 314 |
+
|
| 315 |
+
def fmt_sep(_w: list[int] = widths, _c: int = col_count) -> str:
|
| 316 |
+
cells = ["-" * _w[c] for c in range(_c)]
|
| 317 |
+
return "| " + " | ".join(cells) + " |"
|
| 318 |
+
|
| 319 |
+
last_header_idx = -1
|
| 320 |
+
for idx, is_h in enumerate(row_is_header):
|
| 321 |
+
if is_h:
|
| 322 |
+
last_header_idx = idx
|
| 323 |
+
|
| 324 |
+
lines: list[str] = []
|
| 325 |
+
for idx, r in enumerate(norm_rows):
|
| 326 |
+
lines.append(fmt_row(r))
|
| 327 |
+
if idx == last_header_idx:
|
| 328 |
+
lines.append(fmt_sep())
|
| 329 |
+
|
| 330 |
+
table_text = "\n".join(lines).rstrip()
|
| 331 |
+
out.append(f"```\n{escape_discord_code(table_text)}\n```")
|
| 332 |
+
out.append("\n")
|
| 333 |
+
|
| 334 |
+
i = j + 1
|
| 335 |
+
continue
|
| 336 |
+
elif t in {"code_block", "fence"}:
|
| 337 |
+
code = escape_discord_code(tok.content.rstrip("\n"))
|
| 338 |
+
out.append(f"```\n{code}\n```")
|
| 339 |
+
out.append("\n")
|
| 340 |
+
elif t == "inline":
|
| 341 |
+
rendered = render_inline(tok.children or [])
|
| 342 |
+
if in_heading:
|
| 343 |
+
rendered = f"**{render_inline(tok.children or [])}**"
|
| 344 |
+
if pending_prefix:
|
| 345 |
+
rendered = pending_prefix + rendered
|
| 346 |
+
pending_prefix = None
|
| 347 |
+
rendered = apply_blockquote(rendered)
|
| 348 |
+
out.append(rendered)
|
| 349 |
+
else:
|
| 350 |
+
if tok.content:
|
| 351 |
+
out.append(escape_discord(tok.content))
|
| 352 |
+
i += 1
|
| 353 |
+
|
| 354 |
+
return "".join(out).rstrip()
|
| 355 |
+
|
| 356 |
+
|
| 357 |
+
__all__ = [
|
| 358 |
+
"discord_bold",
|
| 359 |
+
"discord_code_inline",
|
| 360 |
+
"escape_discord",
|
| 361 |
+
"escape_discord_code",
|
| 362 |
+
"format_status",
|
| 363 |
+
"format_status_discord",
|
| 364 |
+
"render_markdown_to_discord",
|
| 365 |
+
]
|
Claude_Code/messaging/rendering/telegram_markdown.py
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Telegram MarkdownV2 utilities.
|
| 2 |
+
|
| 3 |
+
Renders common Markdown into Telegram MarkdownV2 format.
|
| 4 |
+
Used by the message handler and Telegram platform adapter.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import re
|
| 8 |
+
|
| 9 |
+
from markdown_it import MarkdownIt
|
| 10 |
+
|
| 11 |
+
MDV2_SPECIAL_CHARS = set("\\_*[]()~`>#+-=|{}.!")
|
| 12 |
+
MDV2_LINK_ESCAPE = set("\\)")
|
| 13 |
+
|
| 14 |
+
_MD = MarkdownIt("commonmark", {"html": False, "breaks": False})
|
| 15 |
+
_MD.enable("strikethrough")
|
| 16 |
+
_MD.enable("table")
|
| 17 |
+
|
| 18 |
+
_TABLE_SEP_RE = re.compile(r"^\s*\|?\s*:?-{3,}:?\s*(\|\s*:?-{3,}:?\s*)+\|?\s*$")
|
| 19 |
+
_FENCE_RE = re.compile(r"^\s*```")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _is_gfm_table_header_line(line: str) -> bool:
|
| 23 |
+
"""Check if line is a GFM table header (pipe-delimited, not separator)."""
|
| 24 |
+
if "|" not in line:
|
| 25 |
+
return False
|
| 26 |
+
if _TABLE_SEP_RE.match(line):
|
| 27 |
+
return False
|
| 28 |
+
stripped = line.strip()
|
| 29 |
+
parts = [p.strip() for p in stripped.strip("|").split("|")]
|
| 30 |
+
parts = [p for p in parts if p != ""]
|
| 31 |
+
return len(parts) >= 2
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _normalize_gfm_tables(text: str) -> str:
|
| 35 |
+
"""
|
| 36 |
+
Many LLMs emit tables immediately after a paragraph line (no blank line).
|
| 37 |
+
Markdown-it will treat that as a softbreak within the paragraph, so the
|
| 38 |
+
table extension won't trigger. Insert a blank line before detected tables.
|
| 39 |
+
|
| 40 |
+
We only do this outside fenced code blocks.
|
| 41 |
+
"""
|
| 42 |
+
lines = text.splitlines()
|
| 43 |
+
if len(lines) < 2:
|
| 44 |
+
return text
|
| 45 |
+
|
| 46 |
+
out_lines: list[str] = []
|
| 47 |
+
in_fence = False
|
| 48 |
+
|
| 49 |
+
for idx, line in enumerate(lines):
|
| 50 |
+
if _FENCE_RE.match(line):
|
| 51 |
+
in_fence = not in_fence
|
| 52 |
+
out_lines.append(line)
|
| 53 |
+
continue
|
| 54 |
+
|
| 55 |
+
if (
|
| 56 |
+
not in_fence
|
| 57 |
+
and idx + 1 < len(lines)
|
| 58 |
+
and _is_gfm_table_header_line(line)
|
| 59 |
+
and _TABLE_SEP_RE.match(lines[idx + 1])
|
| 60 |
+
and out_lines
|
| 61 |
+
and out_lines[-1].strip() != ""
|
| 62 |
+
):
|
| 63 |
+
m = re.match(r"^(\s*)", line)
|
| 64 |
+
indent = m.group(1) if m else ""
|
| 65 |
+
out_lines.append(indent)
|
| 66 |
+
|
| 67 |
+
out_lines.append(line)
|
| 68 |
+
|
| 69 |
+
return "\n".join(out_lines)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
def escape_md_v2(text: str) -> str:
|
| 73 |
+
"""Escape text for Telegram MarkdownV2."""
|
| 74 |
+
return "".join(f"\\{ch}" if ch in MDV2_SPECIAL_CHARS else ch for ch in text)
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
def escape_md_v2_code(text: str) -> str:
|
| 78 |
+
"""Escape text for Telegram MarkdownV2 code spans/blocks."""
|
| 79 |
+
return text.replace("\\", "\\\\").replace("`", "\\`")
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
def escape_md_v2_link_url(text: str) -> str:
|
| 83 |
+
"""Escape URL for Telegram MarkdownV2 link destination."""
|
| 84 |
+
return "".join(f"\\{ch}" if ch in MDV2_LINK_ESCAPE else ch for ch in text)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def mdv2_bold(text: str) -> str:
|
| 88 |
+
"""Format text as bold in MarkdownV2."""
|
| 89 |
+
return f"*{escape_md_v2(text)}*"
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def mdv2_code_inline(text: str) -> str:
|
| 93 |
+
"""Format text as inline code in MarkdownV2."""
|
| 94 |
+
return f"`{escape_md_v2_code(text)}`"
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def format_status(emoji: str, label: str, suffix: str | None = None) -> str:
|
| 98 |
+
"""Format a status message with emoji and optional suffix."""
|
| 99 |
+
base = f"{emoji} {mdv2_bold(label)}"
|
| 100 |
+
if suffix:
|
| 101 |
+
return f"{base} {escape_md_v2(suffix)}"
|
| 102 |
+
return base
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
def render_markdown_to_mdv2(text: str) -> str:
|
| 106 |
+
"""Render common Markdown into Telegram MarkdownV2."""
|
| 107 |
+
if not text:
|
| 108 |
+
return ""
|
| 109 |
+
|
| 110 |
+
text = _normalize_gfm_tables(text)
|
| 111 |
+
tokens = _MD.parse(text)
|
| 112 |
+
|
| 113 |
+
def render_inline_table_plain(children) -> str:
|
| 114 |
+
out: list[str] = []
|
| 115 |
+
for tok in children:
|
| 116 |
+
if tok.type == "text" or tok.type == "code_inline":
|
| 117 |
+
out.append(tok.content)
|
| 118 |
+
elif tok.type in {"softbreak", "hardbreak"}:
|
| 119 |
+
out.append(" ")
|
| 120 |
+
elif tok.type == "image" and tok.content:
|
| 121 |
+
out.append(tok.content)
|
| 122 |
+
return "".join(out)
|
| 123 |
+
|
| 124 |
+
def render_inline_plain(children) -> str:
|
| 125 |
+
out: list[str] = []
|
| 126 |
+
for tok in children:
|
| 127 |
+
if tok.type == "text" or tok.type == "code_inline":
|
| 128 |
+
out.append(escape_md_v2(tok.content))
|
| 129 |
+
elif tok.type in {"softbreak", "hardbreak"}:
|
| 130 |
+
out.append("\n")
|
| 131 |
+
return "".join(out)
|
| 132 |
+
|
| 133 |
+
def render_inline(children) -> str:
|
| 134 |
+
out: list[str] = []
|
| 135 |
+
i = 0
|
| 136 |
+
while i < len(children):
|
| 137 |
+
tok = children[i]
|
| 138 |
+
t = tok.type
|
| 139 |
+
if t == "text":
|
| 140 |
+
out.append(escape_md_v2(tok.content))
|
| 141 |
+
elif t in {"softbreak", "hardbreak"}:
|
| 142 |
+
out.append("\n")
|
| 143 |
+
elif t == "em_open" or t == "em_close":
|
| 144 |
+
out.append("_")
|
| 145 |
+
elif t == "strong_open" or t == "strong_close":
|
| 146 |
+
out.append("*")
|
| 147 |
+
elif t == "s_open" or t == "s_close":
|
| 148 |
+
out.append("~")
|
| 149 |
+
elif t == "code_inline":
|
| 150 |
+
out.append(f"`{escape_md_v2_code(tok.content)}`")
|
| 151 |
+
elif t == "link_open":
|
| 152 |
+
href = ""
|
| 153 |
+
if tok.attrs:
|
| 154 |
+
if isinstance(tok.attrs, dict):
|
| 155 |
+
href = tok.attrs.get("href", "")
|
| 156 |
+
else:
|
| 157 |
+
for key, val in tok.attrs:
|
| 158 |
+
if key == "href":
|
| 159 |
+
href = val
|
| 160 |
+
break
|
| 161 |
+
inner_tokens = []
|
| 162 |
+
i += 1
|
| 163 |
+
while i < len(children) and children[i].type != "link_close":
|
| 164 |
+
inner_tokens.append(children[i])
|
| 165 |
+
i += 1
|
| 166 |
+
link_text = ""
|
| 167 |
+
for child in inner_tokens:
|
| 168 |
+
if child.type == "text" or child.type == "code_inline":
|
| 169 |
+
link_text += child.content
|
| 170 |
+
out.append(
|
| 171 |
+
f"[{escape_md_v2(link_text)}]({escape_md_v2_link_url(href)})"
|
| 172 |
+
)
|
| 173 |
+
elif t == "image":
|
| 174 |
+
href = ""
|
| 175 |
+
alt = tok.content or ""
|
| 176 |
+
if tok.attrs:
|
| 177 |
+
if isinstance(tok.attrs, dict):
|
| 178 |
+
href = tok.attrs.get("src", "")
|
| 179 |
+
else:
|
| 180 |
+
for key, val in tok.attrs:
|
| 181 |
+
if key == "src":
|
| 182 |
+
href = val
|
| 183 |
+
break
|
| 184 |
+
if alt:
|
| 185 |
+
out.append(f"{escape_md_v2(alt)} ({escape_md_v2_link_url(href)})")
|
| 186 |
+
else:
|
| 187 |
+
out.append(escape_md_v2_link_url(href))
|
| 188 |
+
else:
|
| 189 |
+
out.append(escape_md_v2(tok.content or ""))
|
| 190 |
+
i += 1
|
| 191 |
+
return "".join(out)
|
| 192 |
+
|
| 193 |
+
out: list[str] = []
|
| 194 |
+
list_stack: list[dict] = []
|
| 195 |
+
pending_prefix: str | None = None
|
| 196 |
+
blockquote_level = 0
|
| 197 |
+
in_heading = False
|
| 198 |
+
|
| 199 |
+
def apply_blockquote(val: str) -> str:
|
| 200 |
+
if blockquote_level <= 0:
|
| 201 |
+
return val
|
| 202 |
+
prefix = "> " * blockquote_level
|
| 203 |
+
return prefix + val.replace("\n", "\n" + prefix)
|
| 204 |
+
|
| 205 |
+
i = 0
|
| 206 |
+
while i < len(tokens):
|
| 207 |
+
tok = tokens[i]
|
| 208 |
+
t = tok.type
|
| 209 |
+
if t == "paragraph_open":
|
| 210 |
+
pass
|
| 211 |
+
elif t == "paragraph_close":
|
| 212 |
+
out.append("\n")
|
| 213 |
+
elif t == "heading_open":
|
| 214 |
+
in_heading = True
|
| 215 |
+
elif t == "heading_close":
|
| 216 |
+
in_heading = False
|
| 217 |
+
out.append("\n")
|
| 218 |
+
elif t == "bullet_list_open":
|
| 219 |
+
list_stack.append({"type": "bullet", "index": 1})
|
| 220 |
+
elif t == "bullet_list_close":
|
| 221 |
+
if list_stack:
|
| 222 |
+
list_stack.pop()
|
| 223 |
+
out.append("\n")
|
| 224 |
+
elif t == "ordered_list_open":
|
| 225 |
+
start = 1
|
| 226 |
+
if tok.attrs:
|
| 227 |
+
if isinstance(tok.attrs, dict):
|
| 228 |
+
val = tok.attrs.get("start")
|
| 229 |
+
if val is not None:
|
| 230 |
+
try:
|
| 231 |
+
start = int(val)
|
| 232 |
+
except TypeError, ValueError:
|
| 233 |
+
start = 1
|
| 234 |
+
else:
|
| 235 |
+
for key, val in tok.attrs:
|
| 236 |
+
if key == "start":
|
| 237 |
+
try:
|
| 238 |
+
start = int(val)
|
| 239 |
+
except TypeError, ValueError:
|
| 240 |
+
start = 1
|
| 241 |
+
break
|
| 242 |
+
list_stack.append({"type": "ordered", "index": start})
|
| 243 |
+
elif t == "ordered_list_close":
|
| 244 |
+
if list_stack:
|
| 245 |
+
list_stack.pop()
|
| 246 |
+
out.append("\n")
|
| 247 |
+
elif t == "list_item_open":
|
| 248 |
+
if list_stack:
|
| 249 |
+
top = list_stack[-1]
|
| 250 |
+
if top["type"] == "bullet":
|
| 251 |
+
pending_prefix = "\\- "
|
| 252 |
+
else:
|
| 253 |
+
pending_prefix = f"{top['index']}\\."
|
| 254 |
+
top["index"] += 1
|
| 255 |
+
pending_prefix += " "
|
| 256 |
+
elif t == "list_item_close":
|
| 257 |
+
out.append("\n")
|
| 258 |
+
elif t == "blockquote_open":
|
| 259 |
+
blockquote_level += 1
|
| 260 |
+
elif t == "blockquote_close":
|
| 261 |
+
blockquote_level = max(0, blockquote_level - 1)
|
| 262 |
+
out.append("\n")
|
| 263 |
+
elif t == "table_open":
|
| 264 |
+
if pending_prefix:
|
| 265 |
+
out.append(apply_blockquote(pending_prefix.rstrip()))
|
| 266 |
+
out.append("\n")
|
| 267 |
+
pending_prefix = None
|
| 268 |
+
|
| 269 |
+
rows: list[list[str]] = []
|
| 270 |
+
row_is_header: list[bool] = []
|
| 271 |
+
|
| 272 |
+
j = i + 1
|
| 273 |
+
in_thead = False
|
| 274 |
+
in_row = False
|
| 275 |
+
current_row: list[str] = []
|
| 276 |
+
current_row_header = False
|
| 277 |
+
|
| 278 |
+
in_cell = False
|
| 279 |
+
cell_parts: list[str] = []
|
| 280 |
+
|
| 281 |
+
while j < len(tokens):
|
| 282 |
+
tt = tokens[j].type
|
| 283 |
+
if tt == "thead_open":
|
| 284 |
+
in_thead = True
|
| 285 |
+
elif tt == "thead_close":
|
| 286 |
+
in_thead = False
|
| 287 |
+
elif tt == "tr_open":
|
| 288 |
+
in_row = True
|
| 289 |
+
current_row = []
|
| 290 |
+
current_row_header = in_thead
|
| 291 |
+
elif tt in {"th_open", "td_open"}:
|
| 292 |
+
in_cell = True
|
| 293 |
+
cell_parts = []
|
| 294 |
+
elif tt == "inline" and in_cell:
|
| 295 |
+
cell_parts.append(
|
| 296 |
+
render_inline_table_plain(tokens[j].children or [])
|
| 297 |
+
)
|
| 298 |
+
elif tt in {"th_close", "td_close"} and in_cell:
|
| 299 |
+
cell = " ".join(cell_parts).strip()
|
| 300 |
+
current_row.append(cell)
|
| 301 |
+
in_cell = False
|
| 302 |
+
cell_parts = []
|
| 303 |
+
elif tt == "tr_close" and in_row:
|
| 304 |
+
rows.append(current_row)
|
| 305 |
+
row_is_header.append(bool(current_row_header))
|
| 306 |
+
in_row = False
|
| 307 |
+
elif tt == "table_close":
|
| 308 |
+
break
|
| 309 |
+
j += 1
|
| 310 |
+
|
| 311 |
+
if rows:
|
| 312 |
+
col_count = max((len(r) for r in rows), default=0)
|
| 313 |
+
norm_rows: list[list[str]] = []
|
| 314 |
+
for r in rows:
|
| 315 |
+
if len(r) < col_count:
|
| 316 |
+
r = r + [""] * (col_count - len(r))
|
| 317 |
+
norm_rows.append(r)
|
| 318 |
+
|
| 319 |
+
widths: list[int] = []
|
| 320 |
+
for c in range(col_count):
|
| 321 |
+
w = max((len(r[c]) for r in norm_rows), default=0)
|
| 322 |
+
widths.append(max(w, 3))
|
| 323 |
+
|
| 324 |
+
def fmt_row(
|
| 325 |
+
r: list[str], _w: list[int] = widths, _c: int = col_count
|
| 326 |
+
) -> str:
|
| 327 |
+
cells = [r[c].ljust(_w[c]) for c in range(_c)]
|
| 328 |
+
return "| " + " | ".join(cells) + " |"
|
| 329 |
+
|
| 330 |
+
def fmt_sep(_w: list[int] = widths, _c: int = col_count) -> str:
|
| 331 |
+
cells = ["-" * _w[c] for c in range(_c)]
|
| 332 |
+
return "| " + " | ".join(cells) + " |"
|
| 333 |
+
|
| 334 |
+
last_header_idx = -1
|
| 335 |
+
for idx, is_h in enumerate(row_is_header):
|
| 336 |
+
if is_h:
|
| 337 |
+
last_header_idx = idx
|
| 338 |
+
|
| 339 |
+
lines: list[str] = []
|
| 340 |
+
for idx, r in enumerate(norm_rows):
|
| 341 |
+
lines.append(fmt_row(r))
|
| 342 |
+
if idx == last_header_idx:
|
| 343 |
+
lines.append(fmt_sep())
|
| 344 |
+
|
| 345 |
+
table_text = "\n".join(lines).rstrip()
|
| 346 |
+
out.append(f"```\n{escape_md_v2_code(table_text)}\n```")
|
| 347 |
+
out.append("\n")
|
| 348 |
+
|
| 349 |
+
i = j + 1
|
| 350 |
+
continue
|
| 351 |
+
elif t in {"code_block", "fence"}:
|
| 352 |
+
code = escape_md_v2_code(tok.content.rstrip("\n"))
|
| 353 |
+
out.append(f"```\n{code}\n```")
|
| 354 |
+
out.append("\n")
|
| 355 |
+
elif t == "inline":
|
| 356 |
+
rendered = render_inline(tok.children or [])
|
| 357 |
+
if in_heading:
|
| 358 |
+
rendered = f"*{render_inline_plain(tok.children or [])}*"
|
| 359 |
+
if pending_prefix:
|
| 360 |
+
rendered = pending_prefix + rendered
|
| 361 |
+
pending_prefix = None
|
| 362 |
+
rendered = apply_blockquote(rendered)
|
| 363 |
+
out.append(rendered)
|
| 364 |
+
else:
|
| 365 |
+
if tok.content:
|
| 366 |
+
out.append(escape_md_v2(tok.content))
|
| 367 |
+
i += 1
|
| 368 |
+
|
| 369 |
+
return "".join(out).rstrip()
|
| 370 |
+
|
| 371 |
+
|
| 372 |
+
__all__ = [
|
| 373 |
+
"escape_md_v2",
|
| 374 |
+
"escape_md_v2_code",
|
| 375 |
+
"escape_md_v2_link_url",
|
| 376 |
+
"format_status",
|
| 377 |
+
"mdv2_bold",
|
| 378 |
+
"mdv2_code_inline",
|
| 379 |
+
"render_markdown_to_mdv2",
|
| 380 |
+
]
|
Claude_Code/messaging/session.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Session Store for Messaging Platforms
|
| 3 |
+
|
| 4 |
+
Provides persistent storage for mapping platform messages to Claude CLI session IDs
|
| 5 |
+
and message trees for conversation continuation.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import json
|
| 9 |
+
import os
|
| 10 |
+
import threading
|
| 11 |
+
from datetime import UTC, datetime
|
| 12 |
+
from typing import Any
|
| 13 |
+
|
| 14 |
+
from loguru import logger
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class SessionStore:
|
| 18 |
+
"""
|
| 19 |
+
Persistent storage for message ↔ Claude session mappings and message trees.
|
| 20 |
+
|
| 21 |
+
Uses a JSON file for storage with thread-safe operations.
|
| 22 |
+
Platform-agnostic: works with any messaging platform.
|
| 23 |
+
"""
|
| 24 |
+
|
| 25 |
+
def __init__(self, storage_path: str = "sessions.json"):
|
| 26 |
+
self.storage_path = storage_path
|
| 27 |
+
self._lock = threading.Lock()
|
| 28 |
+
self._trees: dict[str, dict] = {} # root_id -> tree data
|
| 29 |
+
self._node_to_tree: dict[str, str] = {} # node_id -> root_id
|
| 30 |
+
# Per-chat message ID log used to support best-effort UI clearing (/clear).
|
| 31 |
+
# Key: "{platform}:{chat_id}" -> list of records
|
| 32 |
+
self._message_log: dict[str, list[dict[str, Any]]] = {}
|
| 33 |
+
self._message_log_ids: dict[str, set[str]] = {}
|
| 34 |
+
self._dirty = False
|
| 35 |
+
self._save_timer: threading.Timer | None = None
|
| 36 |
+
self._save_debounce_secs = 0.5
|
| 37 |
+
cap_raw = os.getenv("MAX_MESSAGE_LOG_ENTRIES_PER_CHAT", "").strip()
|
| 38 |
+
try:
|
| 39 |
+
self._message_log_cap: int | None = int(cap_raw) if cap_raw else None
|
| 40 |
+
except ValueError:
|
| 41 |
+
self._message_log_cap = None
|
| 42 |
+
self._load()
|
| 43 |
+
|
| 44 |
+
def _make_chat_key(self, platform: str, chat_id: str) -> str:
|
| 45 |
+
return f"{platform}:{chat_id}"
|
| 46 |
+
|
| 47 |
+
def _load(self) -> None:
|
| 48 |
+
"""Load sessions and trees from disk."""
|
| 49 |
+
if not os.path.exists(self.storage_path):
|
| 50 |
+
return
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
with open(self.storage_path, encoding="utf-8") as f:
|
| 54 |
+
data = json.load(f)
|
| 55 |
+
|
| 56 |
+
# Load trees
|
| 57 |
+
self._trees = data.get("trees", {})
|
| 58 |
+
self._node_to_tree = data.get("node_to_tree", {})
|
| 59 |
+
|
| 60 |
+
# Load message log (optional/backward compatible)
|
| 61 |
+
raw_log = data.get("message_log", {}) or {}
|
| 62 |
+
if isinstance(raw_log, dict):
|
| 63 |
+
self._message_log = {}
|
| 64 |
+
self._message_log_ids = {}
|
| 65 |
+
for chat_key, items in raw_log.items():
|
| 66 |
+
if not isinstance(chat_key, str) or not isinstance(items, list):
|
| 67 |
+
continue
|
| 68 |
+
cleaned: list[dict[str, Any]] = []
|
| 69 |
+
seen: set[str] = set()
|
| 70 |
+
for it in items:
|
| 71 |
+
if not isinstance(it, dict):
|
| 72 |
+
continue
|
| 73 |
+
mid = it.get("message_id")
|
| 74 |
+
if mid is None:
|
| 75 |
+
continue
|
| 76 |
+
mid_s = str(mid)
|
| 77 |
+
if mid_s in seen:
|
| 78 |
+
continue
|
| 79 |
+
seen.add(mid_s)
|
| 80 |
+
cleaned.append(
|
| 81 |
+
{
|
| 82 |
+
"message_id": mid_s,
|
| 83 |
+
"ts": str(it.get("ts") or ""),
|
| 84 |
+
"direction": str(it.get("direction") or ""),
|
| 85 |
+
"kind": str(it.get("kind") or ""),
|
| 86 |
+
}
|
| 87 |
+
)
|
| 88 |
+
self._message_log[chat_key] = cleaned
|
| 89 |
+
self._message_log_ids[chat_key] = seen
|
| 90 |
+
|
| 91 |
+
logger.info(
|
| 92 |
+
f"Loaded {len(self._trees)} trees and "
|
| 93 |
+
f"{sum(len(v) for v in self._message_log.values())} msg_ids from {self.storage_path}"
|
| 94 |
+
)
|
| 95 |
+
except Exception as e:
|
| 96 |
+
logger.error(f"Failed to load sessions: {e}")
|
| 97 |
+
|
| 98 |
+
def _snapshot(self) -> dict:
|
| 99 |
+
"""Snapshot current state for serialization. Caller must hold self._lock."""
|
| 100 |
+
return {
|
| 101 |
+
"trees": dict(self._trees),
|
| 102 |
+
"node_to_tree": dict(self._node_to_tree),
|
| 103 |
+
"message_log": {k: list(v) for k, v in self._message_log.items()},
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
def _write_data(self, data: dict) -> None:
|
| 107 |
+
"""Write data dict to disk. Must be called WITHOUT holding self._lock."""
|
| 108 |
+
with open(self.storage_path, "w", encoding="utf-8") as f:
|
| 109 |
+
json.dump(data, f, indent=2)
|
| 110 |
+
|
| 111 |
+
def _schedule_save(self) -> None:
|
| 112 |
+
"""Schedule a debounced save. Caller must hold self._lock."""
|
| 113 |
+
self._dirty = True
|
| 114 |
+
if self._save_timer is not None:
|
| 115 |
+
self._save_timer.cancel()
|
| 116 |
+
self._save_timer = None
|
| 117 |
+
self._save_timer = threading.Timer(
|
| 118 |
+
self._save_debounce_secs, self._save_from_timer
|
| 119 |
+
)
|
| 120 |
+
self._save_timer.daemon = True
|
| 121 |
+
self._save_timer.start()
|
| 122 |
+
|
| 123 |
+
def _save_from_timer(self) -> None:
|
| 124 |
+
"""Timer callback: save if dirty. Runs in timer thread."""
|
| 125 |
+
with self._lock:
|
| 126 |
+
if not self._dirty:
|
| 127 |
+
self._save_timer = None
|
| 128 |
+
return
|
| 129 |
+
snapshot = self._snapshot()
|
| 130 |
+
self._dirty = False
|
| 131 |
+
self._save_timer = None
|
| 132 |
+
try:
|
| 133 |
+
self._write_data(snapshot)
|
| 134 |
+
except Exception as e:
|
| 135 |
+
logger.error(f"Failed to save sessions: {e}")
|
| 136 |
+
with self._lock:
|
| 137 |
+
self._dirty = True
|
| 138 |
+
|
| 139 |
+
def _flush_save(self) -> dict:
|
| 140 |
+
"""Cancel pending timer and snapshot current state. Caller must hold self._lock.
|
| 141 |
+
Returns snapshot dict; caller must call _write_data(snapshot) outside the lock."""
|
| 142 |
+
if self._save_timer is not None:
|
| 143 |
+
self._save_timer.cancel()
|
| 144 |
+
self._save_timer = None
|
| 145 |
+
self._dirty = False
|
| 146 |
+
return self._snapshot()
|
| 147 |
+
|
| 148 |
+
def flush_pending_save(self) -> None:
|
| 149 |
+
"""Flush any pending debounced save. Call on shutdown to avoid losing data."""
|
| 150 |
+
with self._lock:
|
| 151 |
+
snapshot = self._flush_save()
|
| 152 |
+
try:
|
| 153 |
+
self._write_data(snapshot)
|
| 154 |
+
except Exception as e:
|
| 155 |
+
logger.error(f"Failed to save sessions: {e}")
|
| 156 |
+
with self._lock:
|
| 157 |
+
self._dirty = True
|
| 158 |
+
|
| 159 |
+
def record_message_id(
|
| 160 |
+
self,
|
| 161 |
+
platform: str,
|
| 162 |
+
chat_id: str,
|
| 163 |
+
message_id: str,
|
| 164 |
+
direction: str,
|
| 165 |
+
kind: str,
|
| 166 |
+
) -> None:
|
| 167 |
+
"""Record a message_id for later best-effort deletion (/clear)."""
|
| 168 |
+
if message_id is None:
|
| 169 |
+
return
|
| 170 |
+
|
| 171 |
+
chat_key = self._make_chat_key(str(platform), str(chat_id))
|
| 172 |
+
mid = str(message_id)
|
| 173 |
+
|
| 174 |
+
with self._lock:
|
| 175 |
+
seen = self._message_log_ids.setdefault(chat_key, set())
|
| 176 |
+
if mid in seen:
|
| 177 |
+
return
|
| 178 |
+
|
| 179 |
+
rec = {
|
| 180 |
+
"message_id": mid,
|
| 181 |
+
"ts": datetime.now(UTC).isoformat(),
|
| 182 |
+
"direction": str(direction),
|
| 183 |
+
"kind": str(kind),
|
| 184 |
+
}
|
| 185 |
+
self._message_log.setdefault(chat_key, []).append(rec)
|
| 186 |
+
seen.add(mid)
|
| 187 |
+
|
| 188 |
+
# Optional cap to prevent unbounded growth if configured.
|
| 189 |
+
if self._message_log_cap is not None and self._message_log_cap > 0:
|
| 190 |
+
items = self._message_log.get(chat_key, [])
|
| 191 |
+
if len(items) > self._message_log_cap:
|
| 192 |
+
self._message_log[chat_key] = items[-self._message_log_cap :]
|
| 193 |
+
self._message_log_ids[chat_key] = {
|
| 194 |
+
str(x.get("message_id")) for x in self._message_log[chat_key]
|
| 195 |
+
}
|
| 196 |
+
|
| 197 |
+
self._schedule_save()
|
| 198 |
+
|
| 199 |
+
def get_message_ids_for_chat(self, platform: str, chat_id: str) -> list[str]:
|
| 200 |
+
"""Get all recorded message IDs for a chat (in insertion order)."""
|
| 201 |
+
chat_key = self._make_chat_key(str(platform), str(chat_id))
|
| 202 |
+
with self._lock:
|
| 203 |
+
items = self._message_log.get(chat_key, [])
|
| 204 |
+
return [
|
| 205 |
+
str(x.get("message_id"))
|
| 206 |
+
for x in items
|
| 207 |
+
if x.get("message_id") is not None
|
| 208 |
+
]
|
| 209 |
+
|
| 210 |
+
def clear_all(self) -> None:
|
| 211 |
+
"""Clear all stored sessions/trees/mappings and persist an empty store."""
|
| 212 |
+
with self._lock:
|
| 213 |
+
self._trees.clear()
|
| 214 |
+
self._node_to_tree.clear()
|
| 215 |
+
self._message_log.clear()
|
| 216 |
+
self._message_log_ids.clear()
|
| 217 |
+
snapshot = self._flush_save()
|
| 218 |
+
try:
|
| 219 |
+
self._write_data(snapshot)
|
| 220 |
+
except Exception as e:
|
| 221 |
+
logger.error(f"Failed to save sessions: {e}")
|
| 222 |
+
with self._lock:
|
| 223 |
+
self._dirty = True
|
| 224 |
+
|
| 225 |
+
# ==================== Tree Methods ====================
|
| 226 |
+
|
| 227 |
+
def save_tree(self, root_id: str, tree_data: dict) -> None:
|
| 228 |
+
"""
|
| 229 |
+
Save a message tree.
|
| 230 |
+
|
| 231 |
+
Args:
|
| 232 |
+
root_id: Root node ID of the tree
|
| 233 |
+
tree_data: Serialized tree data from tree.to_dict()
|
| 234 |
+
"""
|
| 235 |
+
with self._lock:
|
| 236 |
+
self._trees[root_id] = tree_data
|
| 237 |
+
|
| 238 |
+
# Update node-to-tree mapping
|
| 239 |
+
for node_id in tree_data.get("nodes", {}):
|
| 240 |
+
self._node_to_tree[node_id] = root_id
|
| 241 |
+
|
| 242 |
+
self._schedule_save()
|
| 243 |
+
logger.debug(f"Saved tree {root_id}")
|
| 244 |
+
|
| 245 |
+
def get_tree(self, root_id: str) -> dict | None:
|
| 246 |
+
"""Get a tree by its root ID."""
|
| 247 |
+
with self._lock:
|
| 248 |
+
return self._trees.get(root_id)
|
| 249 |
+
|
| 250 |
+
def register_node(self, node_id: str, root_id: str) -> None:
|
| 251 |
+
"""Register a node ID to a tree root."""
|
| 252 |
+
with self._lock:
|
| 253 |
+
self._node_to_tree[node_id] = root_id
|
| 254 |
+
self._schedule_save()
|
| 255 |
+
|
| 256 |
+
def remove_node_mappings(self, node_ids: list[str]) -> None:
|
| 257 |
+
"""Remove node IDs from the node-to-tree mapping."""
|
| 258 |
+
with self._lock:
|
| 259 |
+
for nid in node_ids:
|
| 260 |
+
self._node_to_tree.pop(nid, None)
|
| 261 |
+
self._schedule_save()
|
| 262 |
+
|
| 263 |
+
def remove_tree(self, root_id: str) -> None:
|
| 264 |
+
"""Remove a tree and all its node mappings from the store."""
|
| 265 |
+
with self._lock:
|
| 266 |
+
tree_data = self._trees.pop(root_id, None)
|
| 267 |
+
if tree_data:
|
| 268 |
+
for node_id in tree_data.get("nodes", {}):
|
| 269 |
+
self._node_to_tree.pop(node_id, None)
|
| 270 |
+
self._schedule_save()
|
| 271 |
+
|
| 272 |
+
def get_all_trees(self) -> dict[str, dict]:
|
| 273 |
+
"""Get all stored trees (public accessor)."""
|
| 274 |
+
with self._lock:
|
| 275 |
+
return dict(self._trees)
|
| 276 |
+
|
| 277 |
+
def get_node_mapping(self) -> dict[str, str]:
|
| 278 |
+
"""Get the node-to-tree mapping (public accessor)."""
|
| 279 |
+
with self._lock:
|
| 280 |
+
return dict(self._node_to_tree)
|
| 281 |
+
|
| 282 |
+
def sync_from_tree_data(
|
| 283 |
+
self, trees: dict[str, dict], node_to_tree: dict[str, str]
|
| 284 |
+
) -> None:
|
| 285 |
+
"""Sync internal tree state from external data and persist."""
|
| 286 |
+
with self._lock:
|
| 287 |
+
self._trees = trees
|
| 288 |
+
self._node_to_tree = node_to_tree
|
| 289 |
+
self._schedule_save()
|
Claude_Code/messaging/transcript.py
ADDED
|
@@ -0,0 +1,577 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Ordered transcript builder for messaging UIs (Telegram, etc.).
|
| 2 |
+
|
| 3 |
+
This module maintains an ordered list of "segments" that represent what the user
|
| 4 |
+
should see in the chat transcript: thinking, tool calls, tool results, subagent
|
| 5 |
+
headers, and assistant text. It is designed for in-place message editing where
|
| 6 |
+
the transcript grows over time and older content must be truncated.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import json
|
| 12 |
+
import os
|
| 13 |
+
from abc import ABC, abstractmethod
|
| 14 |
+
from collections import deque
|
| 15 |
+
from collections.abc import Callable, Iterable
|
| 16 |
+
from dataclasses import dataclass, field
|
| 17 |
+
from typing import Any
|
| 18 |
+
|
| 19 |
+
from loguru import logger
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _safe_json_dumps(obj: Any) -> str:
|
| 23 |
+
try:
|
| 24 |
+
return json.dumps(obj, indent=2, ensure_ascii=False, sort_keys=True)
|
| 25 |
+
except Exception:
|
| 26 |
+
return str(obj)
|
| 27 |
+
|
| 28 |
+
|
| 29 |
+
@dataclass
|
| 30 |
+
class Segment(ABC):
|
| 31 |
+
kind: str
|
| 32 |
+
|
| 33 |
+
@abstractmethod
|
| 34 |
+
def render(self, ctx: RenderCtx) -> str: ...
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
@dataclass
|
| 38 |
+
class ThinkingSegment(Segment):
|
| 39 |
+
def __init__(self) -> None:
|
| 40 |
+
super().__init__(kind="thinking")
|
| 41 |
+
self._parts: list[str] = []
|
| 42 |
+
|
| 43 |
+
def append(self, t: str) -> None:
|
| 44 |
+
if t:
|
| 45 |
+
self._parts.append(t)
|
| 46 |
+
|
| 47 |
+
@property
|
| 48 |
+
def text(self) -> str:
|
| 49 |
+
return "".join(self._parts)
|
| 50 |
+
|
| 51 |
+
def render(self, ctx: RenderCtx) -> str:
|
| 52 |
+
raw = self.text or ""
|
| 53 |
+
if ctx.thinking_tail_max is not None and len(raw) > ctx.thinking_tail_max:
|
| 54 |
+
raw = "..." + raw[-(ctx.thinking_tail_max - 3) :]
|
| 55 |
+
inner = ctx.escape_code(raw)
|
| 56 |
+
return f"💭 {ctx.bold('Thinking')}\n```\n{inner}\n```"
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
@dataclass
|
| 60 |
+
class TextSegment(Segment):
|
| 61 |
+
def __init__(self) -> None:
|
| 62 |
+
super().__init__(kind="text")
|
| 63 |
+
self._parts: list[str] = []
|
| 64 |
+
|
| 65 |
+
def append(self, t: str) -> None:
|
| 66 |
+
if t:
|
| 67 |
+
self._parts.append(t)
|
| 68 |
+
|
| 69 |
+
@property
|
| 70 |
+
def text(self) -> str:
|
| 71 |
+
return "".join(self._parts)
|
| 72 |
+
|
| 73 |
+
def render(self, ctx: RenderCtx) -> str:
|
| 74 |
+
raw = self.text or ""
|
| 75 |
+
if ctx.text_tail_max is not None and len(raw) > ctx.text_tail_max:
|
| 76 |
+
raw = "..." + raw[-(ctx.text_tail_max - 3) :]
|
| 77 |
+
return ctx.render_markdown(raw)
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
@dataclass
|
| 81 |
+
class ToolCallSegment(Segment):
|
| 82 |
+
tool_use_id: str
|
| 83 |
+
name: str
|
| 84 |
+
closed: bool = False
|
| 85 |
+
indent_level: int = 0
|
| 86 |
+
|
| 87 |
+
def __init__(self, tool_use_id: str, name: str, *, indent_level: int = 0) -> None:
|
| 88 |
+
super().__init__(kind="tool_call")
|
| 89 |
+
self.tool_use_id = str(tool_use_id or "")
|
| 90 |
+
self.name = str(name or "tool")
|
| 91 |
+
self.indent_level = max(0, int(indent_level))
|
| 92 |
+
|
| 93 |
+
def render(self, ctx: RenderCtx) -> str:
|
| 94 |
+
name = ctx.code_inline(self.name)
|
| 95 |
+
# Per UX requirement: do not display tool args/results, only the tool call.
|
| 96 |
+
prefix = " " * self.indent_level
|
| 97 |
+
return f"{prefix}🛠 {ctx.bold('Tool call:')} {name}"
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
@dataclass
|
| 101 |
+
class ToolResultSegment(Segment):
|
| 102 |
+
tool_use_id: str
|
| 103 |
+
name: str | None
|
| 104 |
+
content_text: str
|
| 105 |
+
is_error: bool = False
|
| 106 |
+
|
| 107 |
+
def __init__(
|
| 108 |
+
self,
|
| 109 |
+
tool_use_id: str,
|
| 110 |
+
content: Any,
|
| 111 |
+
*,
|
| 112 |
+
name: str | None = None,
|
| 113 |
+
is_error: bool = False,
|
| 114 |
+
) -> None:
|
| 115 |
+
super().__init__(kind="tool_result")
|
| 116 |
+
self.tool_use_id = str(tool_use_id or "")
|
| 117 |
+
self.name = str(name) if name is not None else None
|
| 118 |
+
self.is_error = bool(is_error)
|
| 119 |
+
if isinstance(content, str):
|
| 120 |
+
self.content_text = content
|
| 121 |
+
else:
|
| 122 |
+
self.content_text = _safe_json_dumps(content)
|
| 123 |
+
|
| 124 |
+
def render(self, ctx: RenderCtx) -> str:
|
| 125 |
+
raw = self.content_text or ""
|
| 126 |
+
if ctx.tool_output_tail_max is not None and len(raw) > ctx.tool_output_tail_max:
|
| 127 |
+
raw = "..." + raw[-(ctx.tool_output_tail_max - 3) :]
|
| 128 |
+
inner = ctx.escape_code(raw)
|
| 129 |
+
label = "Tool error:" if self.is_error else "Tool result:"
|
| 130 |
+
maybe_name = f" {ctx.code_inline(self.name)}" if self.name else ""
|
| 131 |
+
return f"📤 {ctx.bold(label)}{maybe_name}\n```\n{inner}\n```"
|
| 132 |
+
|
| 133 |
+
|
| 134 |
+
@dataclass
|
| 135 |
+
class SubagentSegment(Segment):
|
| 136 |
+
description: str
|
| 137 |
+
tool_calls: int = 0
|
| 138 |
+
tools_used: set[str] = field(default_factory=set)
|
| 139 |
+
current_tool: ToolCallSegment | None = None
|
| 140 |
+
|
| 141 |
+
def __init__(self, description: str) -> None:
|
| 142 |
+
super().__init__(kind="subagent")
|
| 143 |
+
self.description = str(description or "Subagent")
|
| 144 |
+
self.tool_calls = 0
|
| 145 |
+
self.tools_used = set()
|
| 146 |
+
self.current_tool = None
|
| 147 |
+
|
| 148 |
+
def set_current_tool_call(self, tool_use_id: str, name: str) -> ToolCallSegment:
|
| 149 |
+
tool_use_id = str(tool_use_id or "")
|
| 150 |
+
name = str(name or "tool")
|
| 151 |
+
self.tools_used.add(name)
|
| 152 |
+
self.tool_calls += 1
|
| 153 |
+
self.current_tool = ToolCallSegment(tool_use_id, name, indent_level=1)
|
| 154 |
+
return self.current_tool
|
| 155 |
+
|
| 156 |
+
def render(self, ctx: RenderCtx) -> str:
|
| 157 |
+
inner_prefix = " "
|
| 158 |
+
|
| 159 |
+
lines: list[str] = [
|
| 160 |
+
f"🤖 {ctx.bold('Subagent:')} {ctx.code_inline(self.description)}"
|
| 161 |
+
]
|
| 162 |
+
|
| 163 |
+
if self.current_tool is not None:
|
| 164 |
+
try:
|
| 165 |
+
rendered = self.current_tool.render(ctx)
|
| 166 |
+
except Exception:
|
| 167 |
+
rendered = ""
|
| 168 |
+
if rendered:
|
| 169 |
+
lines.append(rendered)
|
| 170 |
+
|
| 171 |
+
tools_used = sorted(self.tools_used)
|
| 172 |
+
tools_set_raw = "{{{}}}".format(", ".join(tools_used)) if tools_used else "{}"
|
| 173 |
+
|
| 174 |
+
# Keep braces inside a code entity so MarkdownV2 doesn't require escaping them.
|
| 175 |
+
lines.append(
|
| 176 |
+
f"{inner_prefix}{ctx.bold('Tools used:')} {ctx.code_inline(tools_set_raw)}"
|
| 177 |
+
)
|
| 178 |
+
lines.append(
|
| 179 |
+
f"{inner_prefix}{ctx.bold('Tool calls:')} {ctx.code_inline(str(self.tool_calls))}"
|
| 180 |
+
)
|
| 181 |
+
return "\n".join(lines)
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
@dataclass
|
| 185 |
+
class ErrorSegment(Segment):
|
| 186 |
+
message: str
|
| 187 |
+
|
| 188 |
+
def __init__(self, message: str) -> None:
|
| 189 |
+
super().__init__(kind="error")
|
| 190 |
+
self.message = str(message or "Unknown error")
|
| 191 |
+
|
| 192 |
+
def render(self, ctx: RenderCtx) -> str:
|
| 193 |
+
return f"⚠️ {ctx.bold('Error:')} {ctx.code_inline(self.message)}"
|
| 194 |
+
|
| 195 |
+
|
| 196 |
+
@dataclass
|
| 197 |
+
class RenderCtx:
|
| 198 |
+
bold: Callable[[str], str]
|
| 199 |
+
code_inline: Callable[[str], str]
|
| 200 |
+
escape_code: Callable[[str], str]
|
| 201 |
+
escape_text: Callable[[str], str]
|
| 202 |
+
render_markdown: Callable[[str], str]
|
| 203 |
+
|
| 204 |
+
thinking_tail_max: int | None = 1000
|
| 205 |
+
tool_input_tail_max: int | None = 1200
|
| 206 |
+
tool_output_tail_max: int | None = 1600
|
| 207 |
+
text_tail_max: int | None = 2000
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
class TranscriptBuffer:
|
| 211 |
+
"""Maintains an ordered, truncatable transcript of events."""
|
| 212 |
+
|
| 213 |
+
def __init__(self, *, show_tool_results: bool = True) -> None:
|
| 214 |
+
self._segments: list[Segment] = []
|
| 215 |
+
self._open_thinking_by_index: dict[int, ThinkingSegment] = {}
|
| 216 |
+
self._open_text_by_index: dict[int, TextSegment] = {}
|
| 217 |
+
|
| 218 |
+
# content_block index -> tool call segment (for streaming tool args)
|
| 219 |
+
self._open_tools_by_index: dict[int, ToolCallSegment] = {}
|
| 220 |
+
|
| 221 |
+
# tool_use_id -> tool name (for tool_result labeling)
|
| 222 |
+
self._tool_name_by_id: dict[str, str] = {}
|
| 223 |
+
|
| 224 |
+
self._show_tool_results = bool(show_tool_results)
|
| 225 |
+
|
| 226 |
+
# subagent context stack. Each entry is the Task tool_use_id we are waiting to close.
|
| 227 |
+
self._subagent_stack: list[str] = []
|
| 228 |
+
# Parallel stack of segments for rendering nested subagents.
|
| 229 |
+
self._subagent_segments: list[SubagentSegment] = []
|
| 230 |
+
self._debug_subagent_stack = os.getenv("DEBUG_SUBAGENT_STACK") == "1"
|
| 231 |
+
|
| 232 |
+
def _in_subagent(self) -> bool:
|
| 233 |
+
return bool(self._subagent_stack)
|
| 234 |
+
|
| 235 |
+
def _subagent_current(self) -> SubagentSegment | None:
|
| 236 |
+
return self._subagent_segments[-1] if self._subagent_segments else None
|
| 237 |
+
|
| 238 |
+
def _task_heading_from_input(self, inp: Any) -> str:
|
| 239 |
+
# We never display full JSON args; only extract a short heading.
|
| 240 |
+
if isinstance(inp, dict):
|
| 241 |
+
desc = str(inp.get("description", "") or "").strip()
|
| 242 |
+
if desc:
|
| 243 |
+
return desc
|
| 244 |
+
subagent_type = str(inp.get("subagent_type", "") or "").strip()
|
| 245 |
+
if subagent_type:
|
| 246 |
+
return subagent_type
|
| 247 |
+
typ = str(inp.get("type", "") or "").strip()
|
| 248 |
+
if typ:
|
| 249 |
+
return typ
|
| 250 |
+
return "Subagent"
|
| 251 |
+
|
| 252 |
+
def _subagent_push(self, tool_id: str, seg: SubagentSegment) -> None:
|
| 253 |
+
# Some providers can omit ids; still track depth for UI suppression.
|
| 254 |
+
tool_id = (
|
| 255 |
+
str(tool_id or "").strip() or f"__task_{len(self._subagent_stack) + 1}"
|
| 256 |
+
)
|
| 257 |
+
self._subagent_stack.append(tool_id)
|
| 258 |
+
self._subagent_segments.append(seg)
|
| 259 |
+
if self._debug_subagent_stack:
|
| 260 |
+
logger.debug(
|
| 261 |
+
"SUBAGENT_STACK: push id=%r depth=%d heading=%r",
|
| 262 |
+
tool_id,
|
| 263 |
+
len(self._subagent_stack),
|
| 264 |
+
getattr(seg, "description", None),
|
| 265 |
+
)
|
| 266 |
+
|
| 267 |
+
def _subagent_pop(self, tool_id: str) -> bool:
|
| 268 |
+
tool_id = str(tool_id or "").strip()
|
| 269 |
+
if not self._subagent_stack:
|
| 270 |
+
return False
|
| 271 |
+
|
| 272 |
+
def _ids_roughly_match(stack_id: str, result_id: str) -> bool:
|
| 273 |
+
if not stack_id or not result_id:
|
| 274 |
+
return False
|
| 275 |
+
if stack_id == result_id:
|
| 276 |
+
return True
|
| 277 |
+
# Some providers emit Task result ids with a suffix/prefix variant.
|
| 278 |
+
# Treat those as the same logical Task invocation.
|
| 279 |
+
return stack_id.startswith(result_id) or result_id.startswith(stack_id)
|
| 280 |
+
|
| 281 |
+
if tool_id:
|
| 282 |
+
# O(1) common case: LIFO - top of stack matches.
|
| 283 |
+
if _ids_roughly_match(self._subagent_stack[-1], tool_id):
|
| 284 |
+
self._subagent_stack.pop()
|
| 285 |
+
if self._subagent_segments:
|
| 286 |
+
self._subagent_segments.pop()
|
| 287 |
+
if self._debug_subagent_stack:
|
| 288 |
+
logger.debug(
|
| 289 |
+
"SUBAGENT_STACK: pop id=%r depth=%d (LIFO)",
|
| 290 |
+
tool_id,
|
| 291 |
+
len(self._subagent_stack),
|
| 292 |
+
)
|
| 293 |
+
return True
|
| 294 |
+
# Pop to the matching id (defensive against non-LIFO emissions).
|
| 295 |
+
idx = -1
|
| 296 |
+
for i in range(len(self._subagent_stack) - 1, -1, -1):
|
| 297 |
+
if _ids_roughly_match(self._subagent_stack[i], tool_id):
|
| 298 |
+
idx = i
|
| 299 |
+
break
|
| 300 |
+
if idx < 0:
|
| 301 |
+
return False
|
| 302 |
+
while len(self._subagent_stack) > idx:
|
| 303 |
+
popped = self._subagent_stack.pop()
|
| 304 |
+
if self._subagent_segments:
|
| 305 |
+
self._subagent_segments.pop()
|
| 306 |
+
if self._debug_subagent_stack:
|
| 307 |
+
logger.debug(
|
| 308 |
+
"SUBAGENT_STACK: pop id=%r depth=%d (matched=%r)",
|
| 309 |
+
popped,
|
| 310 |
+
len(self._subagent_stack),
|
| 311 |
+
tool_id,
|
| 312 |
+
)
|
| 313 |
+
return True
|
| 314 |
+
|
| 315 |
+
# No id in result; only close if we have a synthetic top marker.
|
| 316 |
+
if self._subagent_stack and self._subagent_stack[-1].startswith("__task_"):
|
| 317 |
+
popped = self._subagent_stack.pop()
|
| 318 |
+
if self._subagent_segments:
|
| 319 |
+
self._subagent_segments.pop()
|
| 320 |
+
if self._debug_subagent_stack:
|
| 321 |
+
logger.debug(
|
| 322 |
+
"SUBAGENT_STACK: pop id=%r depth=%d (synthetic)",
|
| 323 |
+
popped,
|
| 324 |
+
len(self._subagent_stack),
|
| 325 |
+
)
|
| 326 |
+
return True
|
| 327 |
+
return False
|
| 328 |
+
|
| 329 |
+
def _ensure_thinking(self) -> ThinkingSegment:
|
| 330 |
+
seg = ThinkingSegment()
|
| 331 |
+
self._segments.append(seg)
|
| 332 |
+
return seg
|
| 333 |
+
|
| 334 |
+
def _ensure_text(self) -> TextSegment:
|
| 335 |
+
seg = TextSegment()
|
| 336 |
+
self._segments.append(seg)
|
| 337 |
+
return seg
|
| 338 |
+
|
| 339 |
+
def apply(self, ev: dict[str, Any]) -> None:
|
| 340 |
+
"""Apply a parsed event to the transcript."""
|
| 341 |
+
et = ev.get("type")
|
| 342 |
+
|
| 343 |
+
# Subagent rules: inside a Task/subagent, we only show tool calls/results.
|
| 344 |
+
if self._in_subagent() and et in (
|
| 345 |
+
"thinking_start",
|
| 346 |
+
"thinking_delta",
|
| 347 |
+
"thinking_chunk",
|
| 348 |
+
"text_start",
|
| 349 |
+
"text_delta",
|
| 350 |
+
"text_chunk",
|
| 351 |
+
):
|
| 352 |
+
return
|
| 353 |
+
|
| 354 |
+
if et == "thinking_start":
|
| 355 |
+
idx = int(ev.get("index", -1))
|
| 356 |
+
if idx >= 0:
|
| 357 |
+
# Defensive: if a provider reuses indices without emitting a stop,
|
| 358 |
+
# close the previous open segment first.
|
| 359 |
+
self.apply({"type": "block_stop", "index": idx})
|
| 360 |
+
seg = self._ensure_thinking()
|
| 361 |
+
if idx >= 0:
|
| 362 |
+
self._open_thinking_by_index[idx] = seg
|
| 363 |
+
return
|
| 364 |
+
if et in ("thinking_delta", "thinking_chunk"):
|
| 365 |
+
idx = int(ev.get("index", -1))
|
| 366 |
+
seg = self._open_thinking_by_index.get(idx)
|
| 367 |
+
if seg is None:
|
| 368 |
+
seg = self._ensure_thinking()
|
| 369 |
+
if idx >= 0:
|
| 370 |
+
self._open_thinking_by_index[idx] = seg
|
| 371 |
+
seg.append(str(ev.get("text", "")))
|
| 372 |
+
return
|
| 373 |
+
if et == "thinking_stop":
|
| 374 |
+
idx = int(ev.get("index", -1))
|
| 375 |
+
if idx >= 0:
|
| 376 |
+
self._open_thinking_by_index.pop(idx, None)
|
| 377 |
+
return
|
| 378 |
+
|
| 379 |
+
if et == "text_start":
|
| 380 |
+
idx = int(ev.get("index", -1))
|
| 381 |
+
if idx >= 0:
|
| 382 |
+
self.apply({"type": "block_stop", "index": idx})
|
| 383 |
+
seg = self._ensure_text()
|
| 384 |
+
if idx >= 0:
|
| 385 |
+
self._open_text_by_index[idx] = seg
|
| 386 |
+
return
|
| 387 |
+
if et in ("text_delta", "text_chunk"):
|
| 388 |
+
idx = int(ev.get("index", -1))
|
| 389 |
+
seg = self._open_text_by_index.get(idx)
|
| 390 |
+
if seg is None:
|
| 391 |
+
seg = self._ensure_text()
|
| 392 |
+
if idx >= 0:
|
| 393 |
+
self._open_text_by_index[idx] = seg
|
| 394 |
+
seg.append(str(ev.get("text", "")))
|
| 395 |
+
return
|
| 396 |
+
if et == "text_stop":
|
| 397 |
+
idx = int(ev.get("index", -1))
|
| 398 |
+
if idx >= 0:
|
| 399 |
+
self._open_text_by_index.pop(idx, None)
|
| 400 |
+
return
|
| 401 |
+
|
| 402 |
+
if et == "tool_use_start":
|
| 403 |
+
idx = int(ev.get("index", -1))
|
| 404 |
+
if idx >= 0:
|
| 405 |
+
self.apply({"type": "block_stop", "index": idx})
|
| 406 |
+
tool_id = str(ev.get("id", "") or "").strip()
|
| 407 |
+
name = str(ev.get("name", "") or "tool")
|
| 408 |
+
if tool_id:
|
| 409 |
+
self._tool_name_by_id[tool_id] = name
|
| 410 |
+
|
| 411 |
+
# Task tool indicates subagent.
|
| 412 |
+
if name == "Task":
|
| 413 |
+
heading = self._task_heading_from_input(ev.get("input"))
|
| 414 |
+
seg = SubagentSegment(heading)
|
| 415 |
+
self._segments.append(seg)
|
| 416 |
+
self._subagent_push(tool_id, seg)
|
| 417 |
+
return
|
| 418 |
+
|
| 419 |
+
# Normal tool call.
|
| 420 |
+
if self._in_subagent():
|
| 421 |
+
parent = self._subagent_current()
|
| 422 |
+
if parent is not None:
|
| 423 |
+
seg = parent.set_current_tool_call(tool_id, name)
|
| 424 |
+
else:
|
| 425 |
+
seg = ToolCallSegment(tool_id, name)
|
| 426 |
+
self._segments.append(seg)
|
| 427 |
+
else:
|
| 428 |
+
seg = ToolCallSegment(tool_id, name)
|
| 429 |
+
self._segments.append(seg)
|
| 430 |
+
|
| 431 |
+
if idx >= 0:
|
| 432 |
+
self._open_tools_by_index[idx] = seg
|
| 433 |
+
return
|
| 434 |
+
|
| 435 |
+
if et == "tool_use_delta":
|
| 436 |
+
# Track open tool by index for tool_use_stop (closing state).
|
| 437 |
+
return
|
| 438 |
+
|
| 439 |
+
if et == "tool_use_stop":
|
| 440 |
+
idx = int(ev.get("index", -1))
|
| 441 |
+
seg = self._open_tools_by_index.pop(idx, None)
|
| 442 |
+
if seg is not None:
|
| 443 |
+
seg.closed = True
|
| 444 |
+
return
|
| 445 |
+
|
| 446 |
+
if et == "block_stop":
|
| 447 |
+
idx = int(ev.get("index", -1))
|
| 448 |
+
if idx in self._open_tools_by_index:
|
| 449 |
+
self.apply({"type": "tool_use_stop", "index": idx})
|
| 450 |
+
return
|
| 451 |
+
if idx in self._open_thinking_by_index:
|
| 452 |
+
self.apply({"type": "thinking_stop", "index": idx})
|
| 453 |
+
return
|
| 454 |
+
if idx in self._open_text_by_index:
|
| 455 |
+
self.apply({"type": "text_stop", "index": idx})
|
| 456 |
+
return
|
| 457 |
+
return
|
| 458 |
+
|
| 459 |
+
if et == "tool_use":
|
| 460 |
+
tool_id = str(ev.get("id", "") or "").strip()
|
| 461 |
+
name = str(ev.get("name", "") or "tool")
|
| 462 |
+
if tool_id:
|
| 463 |
+
self._tool_name_by_id[tool_id] = name
|
| 464 |
+
|
| 465 |
+
if name == "Task":
|
| 466 |
+
heading = self._task_heading_from_input(ev.get("input"))
|
| 467 |
+
seg = SubagentSegment(heading)
|
| 468 |
+
self._segments.append(seg)
|
| 469 |
+
self._subagent_push(tool_id, seg)
|
| 470 |
+
return
|
| 471 |
+
|
| 472 |
+
if self._in_subagent():
|
| 473 |
+
parent = self._subagent_current()
|
| 474 |
+
if parent is not None:
|
| 475 |
+
seg = parent.set_current_tool_call(tool_id, name)
|
| 476 |
+
else:
|
| 477 |
+
seg = ToolCallSegment(tool_id, name)
|
| 478 |
+
self._segments.append(seg)
|
| 479 |
+
else:
|
| 480 |
+
seg = ToolCallSegment(tool_id, name)
|
| 481 |
+
self._segments.append(seg)
|
| 482 |
+
|
| 483 |
+
seg.closed = True
|
| 484 |
+
return
|
| 485 |
+
|
| 486 |
+
if et == "tool_result":
|
| 487 |
+
tool_id = str(ev.get("tool_use_id", "") or "").strip()
|
| 488 |
+
name = self._tool_name_by_id.get(tool_id)
|
| 489 |
+
|
| 490 |
+
# If this was the Task tool result, close subagent context.
|
| 491 |
+
if self._subagent_stack:
|
| 492 |
+
popped = self._subagent_pop(tool_id)
|
| 493 |
+
top = self._subagent_stack[-1] if self._subagent_stack else ""
|
| 494 |
+
looks_like_task_id = "task" in tool_id.lower()
|
| 495 |
+
# Some streams omit Task tool_use ids (synthetic stack ids), but include
|
| 496 |
+
# a real Task id on tool_result (e.g. "functions.Task:0"). Reconcile that.
|
| 497 |
+
if (
|
| 498 |
+
not popped
|
| 499 |
+
and tool_id
|
| 500 |
+
and top.startswith("__task_")
|
| 501 |
+
and (name in (None, "Task"))
|
| 502 |
+
and looks_like_task_id
|
| 503 |
+
):
|
| 504 |
+
self._subagent_pop("")
|
| 505 |
+
|
| 506 |
+
if not self._show_tool_results:
|
| 507 |
+
return
|
| 508 |
+
|
| 509 |
+
seg = ToolResultSegment(
|
| 510 |
+
tool_id,
|
| 511 |
+
ev.get("content"),
|
| 512 |
+
name=name,
|
| 513 |
+
is_error=bool(ev.get("is_error", False)),
|
| 514 |
+
)
|
| 515 |
+
self._segments.append(seg)
|
| 516 |
+
return
|
| 517 |
+
|
| 518 |
+
if et == "error":
|
| 519 |
+
self._segments.append(ErrorSegment(str(ev.get("message", ""))))
|
| 520 |
+
return
|
| 521 |
+
|
| 522 |
+
def render(self, ctx: RenderCtx, *, limit_chars: int, status: str | None) -> str:
|
| 523 |
+
"""Render transcript with truncation (drop oldest segments)."""
|
| 524 |
+
# Filter out empty rendered segments.
|
| 525 |
+
rendered: list[str] = []
|
| 526 |
+
for seg in self._segments:
|
| 527 |
+
try:
|
| 528 |
+
out = seg.render(ctx)
|
| 529 |
+
except Exception:
|
| 530 |
+
continue
|
| 531 |
+
if out:
|
| 532 |
+
rendered.append(out)
|
| 533 |
+
|
| 534 |
+
status_text = f"\n\n{status}" if status else ""
|
| 535 |
+
prefix_marker = ctx.escape_text("... (truncated)\n")
|
| 536 |
+
|
| 537 |
+
def _join(parts: Iterable[str], add_marker: bool) -> str:
|
| 538 |
+
body = "\n".join(parts)
|
| 539 |
+
if add_marker and body:
|
| 540 |
+
body = prefix_marker + body
|
| 541 |
+
return body + status_text if (body or status_text) else status_text
|
| 542 |
+
|
| 543 |
+
# Fast path.
|
| 544 |
+
candidate = _join(rendered, add_marker=False)
|
| 545 |
+
if len(candidate) <= limit_chars:
|
| 546 |
+
return candidate
|
| 547 |
+
|
| 548 |
+
# Drop oldest segments until under limit (keep the tail).
|
| 549 |
+
# Use deque for O(1) popleft; list.pop(0) would be O(n) per iteration.
|
| 550 |
+
parts: deque[str] = deque(rendered)
|
| 551 |
+
dropped = False
|
| 552 |
+
last_part: str | None = None
|
| 553 |
+
while parts:
|
| 554 |
+
candidate = _join(parts, add_marker=True)
|
| 555 |
+
if len(candidate) <= limit_chars:
|
| 556 |
+
return candidate
|
| 557 |
+
last_part = parts.popleft()
|
| 558 |
+
dropped = True
|
| 559 |
+
|
| 560 |
+
# Nothing fits - preserve tail of last segment instead of only marker+status.
|
| 561 |
+
if dropped and last_part:
|
| 562 |
+
budget = limit_chars - len(prefix_marker) - len(status_text)
|
| 563 |
+
if budget > 20:
|
| 564 |
+
if len(last_part) > budget:
|
| 565 |
+
tail = "..." + last_part[-(budget - 3) :]
|
| 566 |
+
else:
|
| 567 |
+
tail = last_part
|
| 568 |
+
candidate = prefix_marker + tail + status_text
|
| 569 |
+
if len(candidate) <= limit_chars:
|
| 570 |
+
return candidate
|
| 571 |
+
|
| 572 |
+
# Fallback: marker + status only.
|
| 573 |
+
if dropped:
|
| 574 |
+
minimal = prefix_marker + status_text.lstrip("\n")
|
| 575 |
+
if len(minimal) <= limit_chars:
|
| 576 |
+
return minimal
|
| 577 |
+
return status or ""
|
Claude_Code/messaging/transcription.py
ADDED
|
@@ -0,0 +1,228 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Voice note transcription for messaging platforms.
|
| 2 |
+
|
| 3 |
+
Supports:
|
| 4 |
+
- Local Whisper (cpu/cuda): Hugging Face transformers pipeline
|
| 5 |
+
- NVIDIA NIM: NVIDIA NIM Whisper/Parakeet
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Any
|
| 11 |
+
|
| 12 |
+
from loguru import logger
|
| 13 |
+
|
| 14 |
+
from config.settings import get_settings
|
| 15 |
+
|
| 16 |
+
# Max file size in bytes (25 MB)
|
| 17 |
+
MAX_AUDIO_SIZE_BYTES = 25 * 1024 * 1024
|
| 18 |
+
|
| 19 |
+
# NVIDIA NIM Whisper model mapping: (function_id, language_code)
|
| 20 |
+
_NIM_MODEL_MAP: dict[str, tuple[str, str]] = {
|
| 21 |
+
"nvidia/parakeet-ctc-0.6b-zh-tw": ("8473f56d-51ef-473c-bb26-efd4f5def2bf", "zh-TW"),
|
| 22 |
+
"nvidia/parakeet-ctc-0.6b-zh-cn": ("9add5ef7-322e-47e0-ad7a-5653fb8d259b", "zh-CN"),
|
| 23 |
+
"nvidia/parakeet-ctc-0.6b-es": ("None", "es-US"),
|
| 24 |
+
"nvidia/parakeet-ctc-0.6b-vi": ("f3dff2bb-99f9-403d-a5f1-f574a757deb0", "vi-VN"),
|
| 25 |
+
"nvidia/parakeet-ctc-1.1b-asr": ("1598d209-5e27-4d3c-8079-4751568b1081", "en-US"),
|
| 26 |
+
"nvidia/parakeet-ctc-0.6b-asr": ("d8dd4e9b-fbf5-4fb0-9dba-8cf436c8d965", "en-US"),
|
| 27 |
+
"nvidia/parakeet-1.1b-rnnt-multilingual-asr": (
|
| 28 |
+
"71203149-d3b7-4460-8231-1be2543a1fca",
|
| 29 |
+
"",
|
| 30 |
+
),
|
| 31 |
+
"openai/whisper-large-v3": ("b702f636-f60c-4a3d-a6f4-f3568c13bd7d", "multi"),
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
# Short model names -> full Hugging Face model IDs (for local Whisper)
|
| 35 |
+
_MODEL_MAP: dict[str, str] = {
|
| 36 |
+
"tiny": "openai/whisper-tiny",
|
| 37 |
+
"base": "openai/whisper-base",
|
| 38 |
+
"small": "openai/whisper-small",
|
| 39 |
+
"medium": "openai/whisper-medium",
|
| 40 |
+
"large-v2": "openai/whisper-large-v2",
|
| 41 |
+
"large-v3": "openai/whisper-large-v3",
|
| 42 |
+
"large-v3-turbo": "openai/whisper-large-v3-turbo",
|
| 43 |
+
}
|
| 44 |
+
|
| 45 |
+
# Lazy-loaded pipelines: (model_id, device) -> pipeline
|
| 46 |
+
_pipeline_cache: dict[tuple[str, str], Any] = {}
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
def _resolve_model_id(whisper_model: str) -> str:
|
| 50 |
+
"""Resolve short name to full Hugging Face model ID."""
|
| 51 |
+
return _MODEL_MAP.get(whisper_model, whisper_model)
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
def _get_pipeline(model_id: str, device: str) -> Any:
|
| 55 |
+
"""Lazy-load transformers Whisper pipeline. Raises ImportError if not installed."""
|
| 56 |
+
global _pipeline_cache
|
| 57 |
+
if device not in ("cpu", "cuda"):
|
| 58 |
+
raise ValueError(f"whisper_device must be 'cpu' or 'cuda', got {device!r}")
|
| 59 |
+
cache_key = (model_id, device)
|
| 60 |
+
if cache_key not in _pipeline_cache:
|
| 61 |
+
try:
|
| 62 |
+
import torch
|
| 63 |
+
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline
|
| 64 |
+
|
| 65 |
+
token = get_settings().hf_token
|
| 66 |
+
if token:
|
| 67 |
+
os.environ["HF_TOKEN"] = token
|
| 68 |
+
|
| 69 |
+
use_cuda = device == "cuda" and torch.cuda.is_available()
|
| 70 |
+
pipe_device = "cuda:0" if use_cuda else "cpu"
|
| 71 |
+
model_dtype = torch.float16 if use_cuda else torch.float32
|
| 72 |
+
|
| 73 |
+
model = AutoModelForSpeechSeq2Seq.from_pretrained(
|
| 74 |
+
model_id,
|
| 75 |
+
dtype=model_dtype,
|
| 76 |
+
low_cpu_mem_usage=True,
|
| 77 |
+
attn_implementation="sdpa",
|
| 78 |
+
)
|
| 79 |
+
model = model.to(pipe_device)
|
| 80 |
+
processor = AutoProcessor.from_pretrained(model_id)
|
| 81 |
+
|
| 82 |
+
pipe = pipeline(
|
| 83 |
+
"automatic-speech-recognition",
|
| 84 |
+
model=model,
|
| 85 |
+
tokenizer=processor.tokenizer,
|
| 86 |
+
feature_extractor=processor.feature_extractor,
|
| 87 |
+
device=pipe_device,
|
| 88 |
+
)
|
| 89 |
+
_pipeline_cache[cache_key] = pipe
|
| 90 |
+
logger.debug(
|
| 91 |
+
f"Loaded Whisper pipeline: model={model_id} device={pipe_device}"
|
| 92 |
+
)
|
| 93 |
+
except ImportError as e:
|
| 94 |
+
raise ImportError(
|
| 95 |
+
"Local Whisper requires the voice_local extra. Install with: uv sync --extra voice_local"
|
| 96 |
+
) from e
|
| 97 |
+
return _pipeline_cache[cache_key]
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
def transcribe_audio(
|
| 101 |
+
file_path: Path,
|
| 102 |
+
mime_type: str,
|
| 103 |
+
*,
|
| 104 |
+
whisper_model: str = "base",
|
| 105 |
+
whisper_device: str = "cpu",
|
| 106 |
+
) -> str:
|
| 107 |
+
"""
|
| 108 |
+
Transcribe audio file to text.
|
| 109 |
+
|
| 110 |
+
Supports:
|
| 111 |
+
- whisper_device="cpu"/"cuda": local Whisper (requires voice_local extra)
|
| 112 |
+
- whisper_device="nvidia_nim": NVIDIA NIM Whisper API (requires voice extra)
|
| 113 |
+
|
| 114 |
+
Args:
|
| 115 |
+
file_path: Path to audio file (OGG, MP3, MP4, WAV, M4A supported)
|
| 116 |
+
mime_type: MIME type of the audio (e.g. "audio/ogg")
|
| 117 |
+
whisper_model: Model ID or short name (local) or NVIDIA NIM model
|
| 118 |
+
whisper_device: "cpu" | "cuda" | "nvidia_nim" (defaults to WHISPER_DEVICE env var)
|
| 119 |
+
|
| 120 |
+
Returns:
|
| 121 |
+
Transcribed text
|
| 122 |
+
|
| 123 |
+
Raises:
|
| 124 |
+
FileNotFoundError: If file does not exist
|
| 125 |
+
ValueError: If file too large
|
| 126 |
+
ImportError: If voice_local extra not installed (for local Whisper)
|
| 127 |
+
"""
|
| 128 |
+
|
| 129 |
+
if not file_path.exists():
|
| 130 |
+
raise FileNotFoundError(f"Audio file not found: {file_path}")
|
| 131 |
+
|
| 132 |
+
size = file_path.stat().st_size
|
| 133 |
+
if size > MAX_AUDIO_SIZE_BYTES:
|
| 134 |
+
raise ValueError(
|
| 135 |
+
f"Audio file too large ({size} bytes). Max {MAX_AUDIO_SIZE_BYTES} bytes."
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if whisper_device == "nvidia_nim":
|
| 139 |
+
return _transcribe_nim(file_path, whisper_model)
|
| 140 |
+
else:
|
| 141 |
+
return _transcribe_local(file_path, whisper_model, whisper_device)
|
| 142 |
+
|
| 143 |
+
|
| 144 |
+
# Whisper expects 16 kHz sample rate
|
| 145 |
+
_WHISPER_SAMPLE_RATE = 16000
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def _load_audio(file_path: Path) -> dict[str, Any]:
|
| 149 |
+
"""Load audio file to waveform dict. No ffmpeg required."""
|
| 150 |
+
import librosa
|
| 151 |
+
|
| 152 |
+
waveform, sr = librosa.load(str(file_path), sr=_WHISPER_SAMPLE_RATE, mono=True)
|
| 153 |
+
return {"array": waveform, "sampling_rate": sr}
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def _transcribe_local(file_path: Path, whisper_model: str, whisper_device: str) -> str:
|
| 157 |
+
"""Transcribe using transformers Whisper pipeline."""
|
| 158 |
+
model_id = _resolve_model_id(whisper_model)
|
| 159 |
+
pipe = _get_pipeline(model_id, whisper_device)
|
| 160 |
+
audio = _load_audio(file_path)
|
| 161 |
+
result = pipe(audio, generate_kwargs={"language": "en", "task": "transcribe"})
|
| 162 |
+
text = result.get("text", "") or ""
|
| 163 |
+
if isinstance(text, list):
|
| 164 |
+
text = " ".join(text) if text else ""
|
| 165 |
+
result_text = text.strip()
|
| 166 |
+
logger.debug(f"Local transcription: {len(result_text)} chars")
|
| 167 |
+
return result_text or "(no speech detected)"
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
def _transcribe_nim(file_path: Path, model: str) -> str:
|
| 171 |
+
"""Transcribe using NVIDIA NIM Whisper API via Riva gRPC client."""
|
| 172 |
+
try:
|
| 173 |
+
import riva.client
|
| 174 |
+
except ImportError as e:
|
| 175 |
+
raise ImportError(
|
| 176 |
+
"NVIDIA NIM transcription requires the voice extra. "
|
| 177 |
+
"Install with: uv sync --extra voice"
|
| 178 |
+
) from e
|
| 179 |
+
|
| 180 |
+
settings = get_settings()
|
| 181 |
+
api_key = settings.nvidia_nim_api_key
|
| 182 |
+
|
| 183 |
+
# Look up function ID and language code from model mapping
|
| 184 |
+
model_config = _NIM_MODEL_MAP.get(model)
|
| 185 |
+
if not model_config:
|
| 186 |
+
raise ValueError(
|
| 187 |
+
f"No NVIDIA NIM config found for model: {model}. "
|
| 188 |
+
f"Supported models: {', '.join(_NIM_MODEL_MAP.keys())}"
|
| 189 |
+
)
|
| 190 |
+
function_id, language_code = model_config
|
| 191 |
+
|
| 192 |
+
# Riva server configuration
|
| 193 |
+
server = "grpc.nvcf.nvidia.com:443"
|
| 194 |
+
|
| 195 |
+
# Auth with SSL and metadata
|
| 196 |
+
auth = riva.client.Auth(
|
| 197 |
+
use_ssl=True,
|
| 198 |
+
uri=server,
|
| 199 |
+
metadata_args=[
|
| 200 |
+
["function-id", function_id],
|
| 201 |
+
["authorization", f"Bearer {api_key}"],
|
| 202 |
+
],
|
| 203 |
+
)
|
| 204 |
+
|
| 205 |
+
asr_service = riva.client.ASRService(auth)
|
| 206 |
+
|
| 207 |
+
# Configure recognition - language_code from model config
|
| 208 |
+
config = riva.client.RecognitionConfig(
|
| 209 |
+
language_code=language_code,
|
| 210 |
+
max_alternatives=1,
|
| 211 |
+
verbatim_transcripts=True,
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
# Read audio file
|
| 215 |
+
with open(file_path, "rb") as f:
|
| 216 |
+
data = f.read()
|
| 217 |
+
|
| 218 |
+
# Perform offline recognition
|
| 219 |
+
response = asr_service.offline_recognize(data, config)
|
| 220 |
+
|
| 221 |
+
# Extract text from response - use getattr for safe attribute access
|
| 222 |
+
transcript = ""
|
| 223 |
+
results = getattr(response, "results", None)
|
| 224 |
+
if results and results[0].alternatives:
|
| 225 |
+
transcript = results[0].alternatives[0].transcript
|
| 226 |
+
|
| 227 |
+
logger.debug(f"NIM transcription: {len(transcript)} chars")
|
| 228 |
+
return transcript or "(no speech detected)"
|
Claude_Code/messaging/trees/__init__.py
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Message tree data structures and queue management."""
|
| 2 |
+
|
| 3 |
+
from .data import MessageNode, MessageState, MessageTree
|
| 4 |
+
from .queue_manager import TreeQueueManager
|
| 5 |
+
|
| 6 |
+
__all__ = [
|
| 7 |
+
"MessageNode",
|
| 8 |
+
"MessageState",
|
| 9 |
+
"MessageTree",
|
| 10 |
+
"TreeQueueManager",
|
| 11 |
+
]
|
Claude_Code/messaging/trees/data.py
ADDED
|
@@ -0,0 +1,482 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Tree data structures for message queue.
|
| 2 |
+
|
| 3 |
+
Contains MessageState, MessageNode, and MessageTree classes.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import asyncio
|
| 7 |
+
from collections import deque
|
| 8 |
+
from contextlib import asynccontextmanager
|
| 9 |
+
from dataclasses import dataclass, field
|
| 10 |
+
from datetime import UTC, datetime
|
| 11 |
+
from enum import Enum
|
| 12 |
+
from typing import Any
|
| 13 |
+
|
| 14 |
+
from loguru import logger
|
| 15 |
+
|
| 16 |
+
from ..models import IncomingMessage
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class _SnapshotQueue:
|
| 20 |
+
"""Queue with snapshot/remove helpers, backed by a deque and a set index."""
|
| 21 |
+
|
| 22 |
+
def __init__(self) -> None:
|
| 23 |
+
self._deque: deque[str] = deque()
|
| 24 |
+
self._set: set[str] = set()
|
| 25 |
+
|
| 26 |
+
async def put(self, item: str) -> None:
|
| 27 |
+
self._deque.append(item)
|
| 28 |
+
self._set.add(item)
|
| 29 |
+
|
| 30 |
+
def put_nowait(self, item: str) -> None:
|
| 31 |
+
self._deque.append(item)
|
| 32 |
+
self._set.add(item)
|
| 33 |
+
|
| 34 |
+
def get_nowait(self) -> str:
|
| 35 |
+
if not self._deque:
|
| 36 |
+
raise asyncio.QueueEmpty()
|
| 37 |
+
item = self._deque.popleft()
|
| 38 |
+
self._set.discard(item)
|
| 39 |
+
return item
|
| 40 |
+
|
| 41 |
+
def qsize(self) -> int:
|
| 42 |
+
return len(self._deque)
|
| 43 |
+
|
| 44 |
+
def get_snapshot(self) -> list[str]:
|
| 45 |
+
"""Return current queue contents in FIFO order (read-only copy)."""
|
| 46 |
+
return list(self._deque)
|
| 47 |
+
|
| 48 |
+
def remove_if_present(self, item: str) -> bool:
|
| 49 |
+
"""Remove item from queue if present (O(1) membership check). Returns True if removed."""
|
| 50 |
+
if item not in self._set:
|
| 51 |
+
return False
|
| 52 |
+
self._set.discard(item)
|
| 53 |
+
self._deque = deque(x for x in self._deque if x != item)
|
| 54 |
+
return True
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
class MessageState(Enum):
|
| 58 |
+
"""State of a message node in the tree."""
|
| 59 |
+
|
| 60 |
+
PENDING = "pending" # Queued, waiting to be processed
|
| 61 |
+
IN_PROGRESS = "in_progress" # Currently being processed by Claude
|
| 62 |
+
COMPLETED = "completed" # Processing finished successfully
|
| 63 |
+
ERROR = "error" # Processing failed
|
| 64 |
+
|
| 65 |
+
|
| 66 |
+
@dataclass
|
| 67 |
+
class MessageNode:
|
| 68 |
+
"""
|
| 69 |
+
A node in the message tree.
|
| 70 |
+
|
| 71 |
+
Each node represents a single message and tracks:
|
| 72 |
+
- Its relationship to parent/children
|
| 73 |
+
- Its processing state
|
| 74 |
+
- Claude session information
|
| 75 |
+
"""
|
| 76 |
+
|
| 77 |
+
node_id: str # Unique ID (typically message_id)
|
| 78 |
+
incoming: IncomingMessage # The original message
|
| 79 |
+
status_message_id: str # Bot's status message ID
|
| 80 |
+
state: MessageState = MessageState.PENDING
|
| 81 |
+
parent_id: str | None = None # Parent node ID (None for root)
|
| 82 |
+
session_id: str | None = None # Claude session ID (forked from parent)
|
| 83 |
+
children_ids: list[str] = field(default_factory=list)
|
| 84 |
+
created_at: datetime = field(default_factory=lambda: datetime.now(UTC))
|
| 85 |
+
completed_at: datetime | None = None
|
| 86 |
+
error_message: str | None = None
|
| 87 |
+
context: Any = None # Additional context if needed
|
| 88 |
+
|
| 89 |
+
def set_context(self, context: Any) -> None:
|
| 90 |
+
self.context = context
|
| 91 |
+
|
| 92 |
+
def to_dict(self) -> dict:
|
| 93 |
+
"""Convert to dictionary for JSON serialization."""
|
| 94 |
+
return {
|
| 95 |
+
"node_id": self.node_id,
|
| 96 |
+
"incoming": {
|
| 97 |
+
"text": self.incoming.text,
|
| 98 |
+
"chat_id": self.incoming.chat_id,
|
| 99 |
+
"user_id": self.incoming.user_id,
|
| 100 |
+
"message_id": self.incoming.message_id,
|
| 101 |
+
"platform": self.incoming.platform,
|
| 102 |
+
"reply_to_message_id": self.incoming.reply_to_message_id,
|
| 103 |
+
"message_thread_id": self.incoming.message_thread_id,
|
| 104 |
+
"username": self.incoming.username,
|
| 105 |
+
},
|
| 106 |
+
"status_message_id": self.status_message_id,
|
| 107 |
+
"state": self.state.value,
|
| 108 |
+
"parent_id": self.parent_id,
|
| 109 |
+
"session_id": self.session_id,
|
| 110 |
+
"children_ids": self.children_ids,
|
| 111 |
+
"created_at": self.created_at.isoformat(),
|
| 112 |
+
"completed_at": self.completed_at.isoformat()
|
| 113 |
+
if self.completed_at
|
| 114 |
+
else None,
|
| 115 |
+
"error_message": self.error_message,
|
| 116 |
+
}
|
| 117 |
+
|
| 118 |
+
@classmethod
|
| 119 |
+
def from_dict(cls, data: dict) -> MessageNode:
|
| 120 |
+
"""Create from dictionary (JSON deserialization)."""
|
| 121 |
+
incoming_data = data["incoming"]
|
| 122 |
+
incoming = IncomingMessage(
|
| 123 |
+
text=incoming_data["text"],
|
| 124 |
+
chat_id=incoming_data["chat_id"],
|
| 125 |
+
user_id=incoming_data["user_id"],
|
| 126 |
+
message_id=incoming_data["message_id"],
|
| 127 |
+
platform=incoming_data["platform"],
|
| 128 |
+
reply_to_message_id=incoming_data.get("reply_to_message_id"),
|
| 129 |
+
message_thread_id=incoming_data.get("message_thread_id"),
|
| 130 |
+
username=incoming_data.get("username"),
|
| 131 |
+
)
|
| 132 |
+
return cls(
|
| 133 |
+
node_id=data["node_id"],
|
| 134 |
+
incoming=incoming,
|
| 135 |
+
status_message_id=data["status_message_id"],
|
| 136 |
+
state=MessageState(data["state"]),
|
| 137 |
+
parent_id=data.get("parent_id"),
|
| 138 |
+
session_id=data.get("session_id"),
|
| 139 |
+
children_ids=data.get("children_ids", []),
|
| 140 |
+
created_at=datetime.fromisoformat(data["created_at"]),
|
| 141 |
+
completed_at=datetime.fromisoformat(data["completed_at"])
|
| 142 |
+
if data.get("completed_at")
|
| 143 |
+
else None,
|
| 144 |
+
error_message=data.get("error_message"),
|
| 145 |
+
)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
class MessageTree:
|
| 149 |
+
"""
|
| 150 |
+
A tree of message nodes with queue functionality.
|
| 151 |
+
|
| 152 |
+
Provides:
|
| 153 |
+
- O(1) node lookup via hashmap
|
| 154 |
+
- Per-tree message queue
|
| 155 |
+
- Thread-safe operations via asyncio.Lock
|
| 156 |
+
"""
|
| 157 |
+
|
| 158 |
+
def __init__(self, root_node: MessageNode):
|
| 159 |
+
"""
|
| 160 |
+
Initialize tree with a root node.
|
| 161 |
+
|
| 162 |
+
Args:
|
| 163 |
+
root_node: The root message node
|
| 164 |
+
"""
|
| 165 |
+
self.root_id = root_node.node_id
|
| 166 |
+
self._nodes: dict[str, MessageNode] = {root_node.node_id: root_node}
|
| 167 |
+
self._status_to_node: dict[str, str] = {
|
| 168 |
+
root_node.status_message_id: root_node.node_id
|
| 169 |
+
}
|
| 170 |
+
self._queue: _SnapshotQueue = _SnapshotQueue()
|
| 171 |
+
self._lock = asyncio.Lock()
|
| 172 |
+
self._is_processing = False
|
| 173 |
+
self._current_node_id: str | None = None
|
| 174 |
+
self._current_task: asyncio.Task | None = None
|
| 175 |
+
|
| 176 |
+
logger.debug(f"Created MessageTree with root {self.root_id}")
|
| 177 |
+
|
| 178 |
+
def set_current_task(self, task: asyncio.Task | None) -> None:
|
| 179 |
+
"""Set the current processing task. Caller must hold lock."""
|
| 180 |
+
self._current_task = task
|
| 181 |
+
|
| 182 |
+
@property
|
| 183 |
+
def is_processing(self) -> bool:
|
| 184 |
+
"""Check if tree is currently processing a message."""
|
| 185 |
+
return self._is_processing
|
| 186 |
+
|
| 187 |
+
async def add_node(
|
| 188 |
+
self,
|
| 189 |
+
node_id: str,
|
| 190 |
+
incoming: IncomingMessage,
|
| 191 |
+
status_message_id: str,
|
| 192 |
+
parent_id: str,
|
| 193 |
+
) -> MessageNode:
|
| 194 |
+
"""
|
| 195 |
+
Add a child node to the tree.
|
| 196 |
+
|
| 197 |
+
Args:
|
| 198 |
+
node_id: Unique ID for the new node
|
| 199 |
+
incoming: The incoming message
|
| 200 |
+
status_message_id: Bot's status message ID
|
| 201 |
+
parent_id: Parent node ID
|
| 202 |
+
|
| 203 |
+
Returns:
|
| 204 |
+
The created MessageNode
|
| 205 |
+
"""
|
| 206 |
+
async with self._lock:
|
| 207 |
+
if parent_id not in self._nodes:
|
| 208 |
+
raise ValueError(f"Parent node {parent_id} not found in tree")
|
| 209 |
+
|
| 210 |
+
node = MessageNode(
|
| 211 |
+
node_id=node_id,
|
| 212 |
+
incoming=incoming,
|
| 213 |
+
status_message_id=status_message_id,
|
| 214 |
+
parent_id=parent_id,
|
| 215 |
+
state=MessageState.PENDING,
|
| 216 |
+
)
|
| 217 |
+
|
| 218 |
+
self._nodes[node_id] = node
|
| 219 |
+
self._status_to_node[status_message_id] = node_id
|
| 220 |
+
self._nodes[parent_id].children_ids.append(node_id)
|
| 221 |
+
|
| 222 |
+
logger.debug(f"Added node {node_id} as child of {parent_id}")
|
| 223 |
+
return node
|
| 224 |
+
|
| 225 |
+
def get_node(self, node_id: str) -> MessageNode | None:
|
| 226 |
+
"""Get a node by ID (O(1) lookup)."""
|
| 227 |
+
return self._nodes.get(node_id)
|
| 228 |
+
|
| 229 |
+
def get_root(self) -> MessageNode:
|
| 230 |
+
"""Get the root node."""
|
| 231 |
+
return self._nodes[self.root_id]
|
| 232 |
+
|
| 233 |
+
def get_children(self, node_id: str) -> list[MessageNode]:
|
| 234 |
+
"""Get all child nodes of a given node."""
|
| 235 |
+
node = self._nodes.get(node_id)
|
| 236 |
+
if not node:
|
| 237 |
+
return []
|
| 238 |
+
return [self._nodes[cid] for cid in node.children_ids if cid in self._nodes]
|
| 239 |
+
|
| 240 |
+
def get_parent(self, node_id: str) -> MessageNode | None:
|
| 241 |
+
"""Get the parent node."""
|
| 242 |
+
node = self._nodes.get(node_id)
|
| 243 |
+
if not node or not node.parent_id:
|
| 244 |
+
return None
|
| 245 |
+
return self._nodes.get(node.parent_id)
|
| 246 |
+
|
| 247 |
+
def get_parent_session_id(self, node_id: str) -> str | None:
|
| 248 |
+
"""
|
| 249 |
+
Get the parent's session ID for forking.
|
| 250 |
+
|
| 251 |
+
Returns None for root nodes.
|
| 252 |
+
"""
|
| 253 |
+
parent = self.get_parent(node_id)
|
| 254 |
+
return parent.session_id if parent else None
|
| 255 |
+
|
| 256 |
+
async def update_state(
|
| 257 |
+
self,
|
| 258 |
+
node_id: str,
|
| 259 |
+
state: MessageState,
|
| 260 |
+
session_id: str | None = None,
|
| 261 |
+
error_message: str | None = None,
|
| 262 |
+
) -> None:
|
| 263 |
+
"""Update a node's state."""
|
| 264 |
+
async with self._lock:
|
| 265 |
+
node = self._nodes.get(node_id)
|
| 266 |
+
if not node:
|
| 267 |
+
logger.warning(f"Node {node_id} not found for state update")
|
| 268 |
+
return
|
| 269 |
+
|
| 270 |
+
node.state = state
|
| 271 |
+
if session_id:
|
| 272 |
+
node.session_id = session_id
|
| 273 |
+
if error_message:
|
| 274 |
+
node.error_message = error_message
|
| 275 |
+
if state in (MessageState.COMPLETED, MessageState.ERROR):
|
| 276 |
+
node.completed_at = datetime.now(UTC)
|
| 277 |
+
|
| 278 |
+
logger.debug(f"Node {node_id} state -> {state.value}")
|
| 279 |
+
|
| 280 |
+
async def enqueue(self, node_id: str) -> int:
|
| 281 |
+
"""
|
| 282 |
+
Add a node to the processing queue.
|
| 283 |
+
|
| 284 |
+
Returns:
|
| 285 |
+
Queue position (1-indexed)
|
| 286 |
+
"""
|
| 287 |
+
async with self._lock:
|
| 288 |
+
await self._queue.put(node_id)
|
| 289 |
+
position = self._queue.qsize()
|
| 290 |
+
logger.debug(f"Enqueued node {node_id}, position {position}")
|
| 291 |
+
return position
|
| 292 |
+
|
| 293 |
+
async def dequeue(self) -> str | None:
|
| 294 |
+
"""
|
| 295 |
+
Get the next node ID from the queue.
|
| 296 |
+
|
| 297 |
+
Returns None if queue is empty.
|
| 298 |
+
"""
|
| 299 |
+
try:
|
| 300 |
+
return self._queue.get_nowait()
|
| 301 |
+
except asyncio.QueueEmpty:
|
| 302 |
+
return None
|
| 303 |
+
|
| 304 |
+
async def get_queue_snapshot(self) -> list[str]:
|
| 305 |
+
"""
|
| 306 |
+
Get a snapshot of the current queue order.
|
| 307 |
+
|
| 308 |
+
Returns:
|
| 309 |
+
List of node IDs in FIFO order.
|
| 310 |
+
"""
|
| 311 |
+
async with self._lock:
|
| 312 |
+
return self._queue.get_snapshot()
|
| 313 |
+
|
| 314 |
+
def get_queue_size(self) -> int:
|
| 315 |
+
"""Get number of messages waiting in queue."""
|
| 316 |
+
return self._queue.qsize()
|
| 317 |
+
|
| 318 |
+
def remove_from_queue(self, node_id: str) -> bool:
|
| 319 |
+
"""
|
| 320 |
+
Remove node_id from the internal queue if present.
|
| 321 |
+
|
| 322 |
+
Caller must hold the tree lock (e.g. via with_lock).
|
| 323 |
+
Returns True if node was removed, False if not in queue.
|
| 324 |
+
"""
|
| 325 |
+
return self._queue.remove_if_present(node_id)
|
| 326 |
+
|
| 327 |
+
@asynccontextmanager
|
| 328 |
+
async def with_lock(self):
|
| 329 |
+
"""Async context manager for tree lock. Use when multiple operations need atomicity."""
|
| 330 |
+
async with self._lock:
|
| 331 |
+
yield
|
| 332 |
+
|
| 333 |
+
def set_processing_state(self, node_id: str | None, is_processing: bool) -> None:
|
| 334 |
+
"""Set processing state. Caller must hold lock for consistency with queue operations."""
|
| 335 |
+
self._is_processing = is_processing
|
| 336 |
+
self._current_node_id = node_id if is_processing else None
|
| 337 |
+
|
| 338 |
+
def clear_current_node(self) -> None:
|
| 339 |
+
"""Clear the currently processing node ID. Caller must hold lock."""
|
| 340 |
+
self._current_node_id = None
|
| 341 |
+
|
| 342 |
+
def is_current_node(self, node_id: str) -> bool:
|
| 343 |
+
"""Check if node_id is the currently processing node."""
|
| 344 |
+
return self._current_node_id == node_id
|
| 345 |
+
|
| 346 |
+
def put_queue_unlocked(self, node_id: str) -> None:
|
| 347 |
+
"""Add node to queue. Caller must hold lock (e.g. via with_lock)."""
|
| 348 |
+
self._queue.put_nowait(node_id)
|
| 349 |
+
|
| 350 |
+
def cancel_current_task(self) -> bool:
|
| 351 |
+
"""Cancel the currently running task. Returns True if a task was cancelled."""
|
| 352 |
+
if self._current_task and not self._current_task.done():
|
| 353 |
+
self._current_task.cancel()
|
| 354 |
+
return True
|
| 355 |
+
return False
|
| 356 |
+
|
| 357 |
+
def set_node_error_sync(self, node: MessageNode, error_message: str) -> None:
|
| 358 |
+
"""Synchronously mark a node as ERROR. Caller must ensure no concurrent access."""
|
| 359 |
+
node.state = MessageState.ERROR
|
| 360 |
+
node.error_message = error_message
|
| 361 |
+
node.completed_at = datetime.now(UTC)
|
| 362 |
+
|
| 363 |
+
def drain_queue_and_mark_cancelled(
|
| 364 |
+
self, error_message: str = "Cancelled by user"
|
| 365 |
+
) -> list[MessageNode]:
|
| 366 |
+
"""
|
| 367 |
+
Drain the queue, mark each node as ERROR, and return affected nodes.
|
| 368 |
+
Does not acquire lock; caller must ensure no concurrent queue access.
|
| 369 |
+
"""
|
| 370 |
+
nodes: list[MessageNode] = []
|
| 371 |
+
while True:
|
| 372 |
+
try:
|
| 373 |
+
node_id = self._queue.get_nowait()
|
| 374 |
+
except asyncio.QueueEmpty:
|
| 375 |
+
break
|
| 376 |
+
node = self._nodes.get(node_id)
|
| 377 |
+
if node:
|
| 378 |
+
self.set_node_error_sync(node, error_message)
|
| 379 |
+
nodes.append(node)
|
| 380 |
+
return nodes
|
| 381 |
+
|
| 382 |
+
def reset_processing_state(self) -> None:
|
| 383 |
+
"""Reset processing flags after cancel/cleanup."""
|
| 384 |
+
self._is_processing = False
|
| 385 |
+
self._current_node_id = None
|
| 386 |
+
|
| 387 |
+
@property
|
| 388 |
+
def current_node_id(self) -> str | None:
|
| 389 |
+
"""Get the ID of the node currently being processed."""
|
| 390 |
+
return self._current_node_id
|
| 391 |
+
|
| 392 |
+
def to_dict(self) -> dict:
|
| 393 |
+
"""Serialize tree to dictionary."""
|
| 394 |
+
return {
|
| 395 |
+
"root_id": self.root_id,
|
| 396 |
+
"nodes": {nid: node.to_dict() for nid, node in self._nodes.items()},
|
| 397 |
+
}
|
| 398 |
+
|
| 399 |
+
def _add_node_from_dict(self, node: MessageNode) -> None:
|
| 400 |
+
"""Register a deserialized node into the tree's internal indices."""
|
| 401 |
+
self._nodes[node.node_id] = node
|
| 402 |
+
self._status_to_node[node.status_message_id] = node.node_id
|
| 403 |
+
|
| 404 |
+
@classmethod
|
| 405 |
+
def from_dict(cls, data: dict) -> MessageTree:
|
| 406 |
+
"""Deserialize tree from dictionary."""
|
| 407 |
+
root_id = data["root_id"]
|
| 408 |
+
nodes_data = data["nodes"]
|
| 409 |
+
|
| 410 |
+
# Create root node first
|
| 411 |
+
root_node = MessageNode.from_dict(nodes_data[root_id])
|
| 412 |
+
tree = cls(root_node)
|
| 413 |
+
|
| 414 |
+
# Add remaining nodes and build status->node index
|
| 415 |
+
for node_id, node_data in nodes_data.items():
|
| 416 |
+
if node_id != root_id:
|
| 417 |
+
node = MessageNode.from_dict(node_data)
|
| 418 |
+
tree._add_node_from_dict(node)
|
| 419 |
+
|
| 420 |
+
return tree
|
| 421 |
+
|
| 422 |
+
def all_nodes(self) -> list[MessageNode]:
|
| 423 |
+
"""Get all nodes in the tree."""
|
| 424 |
+
return list(self._nodes.values())
|
| 425 |
+
|
| 426 |
+
def has_node(self, node_id: str) -> bool:
|
| 427 |
+
"""Check if a node exists in this tree."""
|
| 428 |
+
return node_id in self._nodes
|
| 429 |
+
|
| 430 |
+
def find_node_by_status_message(self, status_msg_id: str) -> MessageNode | None:
|
| 431 |
+
"""Find the node that has this status message ID (O(1) lookup)."""
|
| 432 |
+
node_id = self._status_to_node.get(status_msg_id)
|
| 433 |
+
return self._nodes.get(node_id) if node_id else None
|
| 434 |
+
|
| 435 |
+
def get_descendants(self, node_id: str) -> list[str]:
|
| 436 |
+
"""
|
| 437 |
+
Get node_id and all descendant IDs (subtree).
|
| 438 |
+
|
| 439 |
+
Returns:
|
| 440 |
+
List of node IDs including the given node.
|
| 441 |
+
"""
|
| 442 |
+
if node_id not in self._nodes:
|
| 443 |
+
return []
|
| 444 |
+
result: list[str] = []
|
| 445 |
+
stack = [node_id]
|
| 446 |
+
while stack:
|
| 447 |
+
nid = stack.pop()
|
| 448 |
+
result.append(nid)
|
| 449 |
+
node = self._nodes.get(nid)
|
| 450 |
+
if node:
|
| 451 |
+
stack.extend(node.children_ids)
|
| 452 |
+
return result
|
| 453 |
+
|
| 454 |
+
def remove_branch(self, branch_root_id: str) -> list[MessageNode]:
|
| 455 |
+
"""
|
| 456 |
+
Remove a subtree (branch_root and all descendants) from the tree.
|
| 457 |
+
|
| 458 |
+
Updates parent's children_ids. Caller must hold lock for consistency.
|
| 459 |
+
Does not acquire lock internally.
|
| 460 |
+
|
| 461 |
+
Returns:
|
| 462 |
+
List of removed nodes.
|
| 463 |
+
"""
|
| 464 |
+
if branch_root_id not in self._nodes:
|
| 465 |
+
return []
|
| 466 |
+
|
| 467 |
+
parent = self.get_parent(branch_root_id)
|
| 468 |
+
removed = []
|
| 469 |
+
for nid in self.get_descendants(branch_root_id):
|
| 470 |
+
node = self._nodes.get(nid)
|
| 471 |
+
if node:
|
| 472 |
+
removed.append(node)
|
| 473 |
+
del self._nodes[nid]
|
| 474 |
+
del self._status_to_node[node.status_message_id]
|
| 475 |
+
|
| 476 |
+
if parent and branch_root_id in parent.children_ids:
|
| 477 |
+
parent.children_ids = [
|
| 478 |
+
c for c in parent.children_ids if c != branch_root_id
|
| 479 |
+
]
|
| 480 |
+
|
| 481 |
+
logger.debug(f"Removed branch {branch_root_id} ({len(removed)} nodes)")
|
| 482 |
+
return removed
|
Claude_Code/messaging/trees/processor.py
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Async queue processor for message trees.
|
| 2 |
+
|
| 3 |
+
Handles the async processing lifecycle of tree nodes.
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import asyncio
|
| 7 |
+
from collections.abc import Awaitable, Callable
|
| 8 |
+
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
from providers.common import get_user_facing_error_message
|
| 12 |
+
|
| 13 |
+
from .data import MessageNode, MessageState, MessageTree
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
class TreeQueueProcessor:
|
| 17 |
+
"""
|
| 18 |
+
Handles async queue processing for a single tree.
|
| 19 |
+
|
| 20 |
+
Separates the async processing logic from the data management.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(
|
| 24 |
+
self,
|
| 25 |
+
queue_update_callback: Callable[[MessageTree], Awaitable[None]] | None = None,
|
| 26 |
+
node_started_callback: Callable[[MessageTree, str], Awaitable[None]]
|
| 27 |
+
| None = None,
|
| 28 |
+
):
|
| 29 |
+
self._queue_update_callback = queue_update_callback
|
| 30 |
+
self._node_started_callback = node_started_callback
|
| 31 |
+
|
| 32 |
+
def set_queue_update_callback(
|
| 33 |
+
self,
|
| 34 |
+
queue_update_callback: Callable[[MessageTree], Awaitable[None]] | None,
|
| 35 |
+
) -> None:
|
| 36 |
+
"""Update the callback used to refresh queue positions."""
|
| 37 |
+
self._queue_update_callback = queue_update_callback
|
| 38 |
+
|
| 39 |
+
def set_node_started_callback(
|
| 40 |
+
self,
|
| 41 |
+
node_started_callback: Callable[[MessageTree, str], Awaitable[None]] | None,
|
| 42 |
+
) -> None:
|
| 43 |
+
"""Update the callback used when a queued node starts processing."""
|
| 44 |
+
self._node_started_callback = node_started_callback
|
| 45 |
+
|
| 46 |
+
async def _notify_queue_updated(self, tree: MessageTree) -> None:
|
| 47 |
+
"""Invoke queue update callback if set."""
|
| 48 |
+
if not self._queue_update_callback:
|
| 49 |
+
return
|
| 50 |
+
try:
|
| 51 |
+
await self._queue_update_callback(tree)
|
| 52 |
+
except Exception as e:
|
| 53 |
+
logger.warning(f"Queue update callback failed: {e}")
|
| 54 |
+
|
| 55 |
+
async def _notify_node_started(self, tree: MessageTree, node_id: str) -> None:
|
| 56 |
+
"""Invoke node started callback if set."""
|
| 57 |
+
if not self._node_started_callback:
|
| 58 |
+
return
|
| 59 |
+
try:
|
| 60 |
+
await self._node_started_callback(tree, node_id)
|
| 61 |
+
except Exception as e:
|
| 62 |
+
logger.warning(f"Node started callback failed: {e}")
|
| 63 |
+
|
| 64 |
+
async def process_node(
|
| 65 |
+
self,
|
| 66 |
+
tree: MessageTree,
|
| 67 |
+
node: MessageNode,
|
| 68 |
+
processor: Callable[[str, MessageNode], Awaitable[None]],
|
| 69 |
+
) -> None:
|
| 70 |
+
"""Process a single node and then check the queue."""
|
| 71 |
+
# Skip if already in terminal state (e.g. from error propagation)
|
| 72 |
+
if node.state == MessageState.ERROR:
|
| 73 |
+
logger.info(
|
| 74 |
+
f"Skipping node {node.node_id} as it is already in state {node.state}"
|
| 75 |
+
)
|
| 76 |
+
# Still need to check for next messages
|
| 77 |
+
await self._process_next(tree, processor)
|
| 78 |
+
return
|
| 79 |
+
|
| 80 |
+
try:
|
| 81 |
+
await processor(node.node_id, node)
|
| 82 |
+
except asyncio.CancelledError:
|
| 83 |
+
logger.info(f"Task for node {node.node_id} was cancelled")
|
| 84 |
+
raise
|
| 85 |
+
except Exception as e:
|
| 86 |
+
logger.error(f"Error processing node {node.node_id}: {e}")
|
| 87 |
+
await tree.update_state(
|
| 88 |
+
node.node_id,
|
| 89 |
+
MessageState.ERROR,
|
| 90 |
+
error_message=get_user_facing_error_message(e),
|
| 91 |
+
)
|
| 92 |
+
finally:
|
| 93 |
+
async with tree.with_lock():
|
| 94 |
+
tree.clear_current_node()
|
| 95 |
+
# Check if there are more messages in the queue
|
| 96 |
+
await self._process_next(tree, processor)
|
| 97 |
+
|
| 98 |
+
async def _process_next(
|
| 99 |
+
self,
|
| 100 |
+
tree: MessageTree,
|
| 101 |
+
processor: Callable[[str, MessageNode], Awaitable[None]],
|
| 102 |
+
) -> None:
|
| 103 |
+
"""Process the next message in queue, if any."""
|
| 104 |
+
next_node_id = None
|
| 105 |
+
node = None
|
| 106 |
+
async with tree.with_lock():
|
| 107 |
+
next_node_id = await tree.dequeue()
|
| 108 |
+
|
| 109 |
+
if not next_node_id:
|
| 110 |
+
tree.set_processing_state(None, False)
|
| 111 |
+
logger.debug(f"Tree {tree.root_id} queue empty, marking as free")
|
| 112 |
+
return
|
| 113 |
+
|
| 114 |
+
tree.set_processing_state(next_node_id, True)
|
| 115 |
+
logger.info(f"Processing next queued node {next_node_id}")
|
| 116 |
+
|
| 117 |
+
# Process next node (outside lock)
|
| 118 |
+
node = tree.get_node(next_node_id)
|
| 119 |
+
if node:
|
| 120 |
+
tree.set_current_task(
|
| 121 |
+
asyncio.create_task(self.process_node(tree, node, processor))
|
| 122 |
+
)
|
| 123 |
+
|
| 124 |
+
# Notify that this node has started processing and refresh queue positions.
|
| 125 |
+
if next_node_id:
|
| 126 |
+
await self._notify_node_started(tree, next_node_id)
|
| 127 |
+
await self._notify_queue_updated(tree)
|
| 128 |
+
|
| 129 |
+
async def enqueue_and_start(
|
| 130 |
+
self,
|
| 131 |
+
tree: MessageTree,
|
| 132 |
+
node_id: str,
|
| 133 |
+
processor: Callable[[str, MessageNode], Awaitable[None]],
|
| 134 |
+
) -> bool:
|
| 135 |
+
"""
|
| 136 |
+
Enqueue a node or start processing immediately.
|
| 137 |
+
|
| 138 |
+
Args:
|
| 139 |
+
tree: The message tree
|
| 140 |
+
node_id: Node to process
|
| 141 |
+
processor: Async function to process the node
|
| 142 |
+
|
| 143 |
+
Returns:
|
| 144 |
+
True if queued, False if processing immediately
|
| 145 |
+
"""
|
| 146 |
+
async with tree.with_lock():
|
| 147 |
+
if tree.is_processing:
|
| 148 |
+
tree.put_queue_unlocked(node_id)
|
| 149 |
+
queue_size = tree.get_queue_size()
|
| 150 |
+
logger.info(f"Queued node {node_id}, position {queue_size}")
|
| 151 |
+
return True
|
| 152 |
+
else:
|
| 153 |
+
tree.set_processing_state(node_id, True)
|
| 154 |
+
|
| 155 |
+
# Process outside the lock
|
| 156 |
+
node = tree.get_node(node_id)
|
| 157 |
+
if node:
|
| 158 |
+
tree.set_current_task(
|
| 159 |
+
asyncio.create_task(self.process_node(tree, node, processor))
|
| 160 |
+
)
|
| 161 |
+
return False
|
| 162 |
+
|
| 163 |
+
def cancel_current(self, tree: MessageTree) -> bool:
|
| 164 |
+
"""Cancel the currently running task in a tree."""
|
| 165 |
+
return tree.cancel_current_task()
|