Spaces:
Sleeping
Sleeping
first-features
#1
by macayaven - opened
- .gitignore +32 -0
- .python-version +1 -0
- README.md +112 -4
- agents/__init__.py +7 -0
- agents/cbt_agent.py +242 -0
- app.py +966 -4
- cbt_knowledge/__init__.py +17 -0
- cbt_knowledge/distortions.py +328 -0
- cbt_knowledge/reframing_tools.py +201 -0
- locales/en.json +39 -0
- locales/es.json +39 -0
- mypy.ini +20 -0
- requirements-dev.txt +4 -0
- requirements.txt +21 -0
- ruff.toml +12 -0
- setup.cfg +16 -0
- setup.py +4 -0
- tests/test_agent.py +28 -0
- tests/test_distortions.py +25 -0
- tests/test_reframing_tools.py +8 -0
- ui_components/__init__.py +8 -0
- ui_components/landing.py +42 -0
- ui_components/learn.py +62 -0
.gitignore
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Byte-compiled / cache
|
| 2 |
+
__pycache__/
|
| 3 |
+
*.py[cod]
|
| 4 |
+
*.pyo
|
| 5 |
+
*.pyd
|
| 6 |
+
|
| 7 |
+
# Virtual environments
|
| 8 |
+
.venv/
|
| 9 |
+
venv/
|
| 10 |
+
ENV/
|
| 11 |
+
|
| 12 |
+
# Packaging / build
|
| 13 |
+
*.egg-info/
|
| 14 |
+
dist/
|
| 15 |
+
build/
|
| 16 |
+
|
| 17 |
+
# OS/editor
|
| 18 |
+
.DS_Store
|
| 19 |
+
.idea/
|
| 20 |
+
.vscode/
|
| 21 |
+
|
| 22 |
+
# Local env/config
|
| 23 |
+
.env
|
| 24 |
+
.env.*
|
| 25 |
+
|
| 26 |
+
.claude/
|
| 27 |
+
.codex/
|
| 28 |
+
|
| 29 |
+
CLAUDE.md
|
| 30 |
+
AGENTS.md
|
| 31 |
+
|
| 32 |
+
.*_cache/
|
.python-version
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
3.10.11
|
README.md
CHANGED
|
@@ -1,12 +1,120 @@
|
|
| 1 |
---
|
| 2 |
-
title: Reframe
|
| 3 |
-
emoji:
|
| 4 |
-
colorFrom:
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.47.2
|
|
|
|
| 8 |
app_file: app.py
|
| 9 |
pinned: false
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
title: Reframe Cognitive Reframing Assistant
|
| 3 |
+
emoji: 🧠
|
| 4 |
+
colorFrom: blue
|
| 5 |
colorTo: purple
|
| 6 |
sdk: gradio
|
| 7 |
sdk_version: 5.47.2
|
| 8 |
+
python_version: 3.10
|
| 9 |
app_file: app.py
|
| 10 |
pinned: false
|
| 11 |
+
license: mit
|
| 12 |
+
hf_oauth: true
|
| 13 |
---
|
| 14 |
|
| 15 |
+
# 🧠 reframe: Cognitive Reframing Assistant
|
| 16 |
+
|
| 17 |
+
A tool inspired by CBT (Cognitive Behavioral Therapy) designed to help identify and reframe cognitive distortions.
|
| 18 |
+
|
| 19 |
+
## 🎯 Features
|
| 20 |
+
|
| 21 |
+
- **Educational Landing Page**: Learn about CBT and cognitive reframing
|
| 22 |
+
- **Interactive Chat**: Share thoughts and receive balanced perspectives
|
| 23 |
+
- **Pattern Recognition**: Identifies cognitive distortions in thinking patterns
|
| 24 |
+
- **Similar Situations**: Shows relatable examples with reframed perspectives
|
| 25 |
+
- **Learning Center**: Explore 13+ cognitive distortions with examples
|
| 26 |
+
- **Multilingual**: Support for English and Spanish
|
| 27 |
+
- **Privacy-First**: No message content stored; only pseudonymous usage counters
|
| 28 |
+
- **Agentic LLM**: Uses Hugging Face Inference API (required for chat)
|
| 29 |
+
- **Streaming Chat**: Incremental assistant messages for a smoother UX
|
| 30 |
+
- **Lightweight Memory**: Assistant considers the last N turns (default 6)
|
| 31 |
+
|
| 32 |
+
## 📁 Project Structure
|
| 33 |
+
|
| 34 |
+
```
|
| 35 |
+
re-frame-gradio/
|
| 36 |
+
├── app.py # Main Gradio application
|
| 37 |
+
├── requirements.txt # Python dependencies
|
| 38 |
+
├── README.md # This file
|
| 39 |
+
├── cbt_knowledge/ # CBT knowledge base
|
| 40 |
+
│ ├── __init__.py
|
| 41 |
+
│ ├── distortions.py # Cognitive distortions database
|
| 42 |
+
│ └── reframing_tools.py # Reframing strategies
|
| 43 |
+
├── ui_components/ # UI components
|
| 44 |
+
│ ├── __init__.py
|
| 45 |
+
│ ├── landing.py # Landing page
|
| 46 |
+
│ └── learn.py # Learning center
|
| 47 |
+
└── locales/ # Translations (optional)
|
| 48 |
+
├── en.json # English
|
| 49 |
+
└── es.json # Spanish
|
| 50 |
+
```
|
| 51 |
+
|
| 52 |
+
## 🧩 Cognitive Distortions Covered
|
| 53 |
+
|
| 54 |
+
1. **All-or-Nothing Thinking** - Black and white thinking
|
| 55 |
+
2. **Fortune Telling** - Negative predictions
|
| 56 |
+
3. **Catastrophizing** - Expecting the worst
|
| 57 |
+
4. **Mind Reading** - Assuming others' thoughts
|
| 58 |
+
5. **Mental Filter** - Focusing on negatives
|
| 59 |
+
6. **Should Statements** - Rigid rules
|
| 60 |
+
7. **Labeling** - Global negative labels
|
| 61 |
+
8. **Personalization** - Self-blame
|
| 62 |
+
9. **Emotional Reasoning** - Feelings as facts
|
| 63 |
+
10. **Discounting Positives** - Dismissing good things
|
| 64 |
+
11. **Jumping to Conclusions** - Assumptions without evidence
|
| 65 |
+
12. **Magnification/Minimization** - Distorting importance
|
| 66 |
+
13. **Overgeneralization** - Broad conclusions from single events
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
## 🤝 How It Works
|
| 70 |
+
|
| 71 |
+
1. **Share Your Thoughts**: Type what's on your mind in the chat
|
| 72 |
+
2. **Pattern Detection**: The system identifies thinking patterns
|
| 73 |
+
3. **Balanced Perspectives**: Get alternative ways to view the situation
|
| 74 |
+
4. **Similar Examples**: See how others reframed similar thoughts
|
| 75 |
+
5. **Learn More**: Explore specific distortions in the Learn tab
|
| 76 |
+
|
| 77 |
+
## ⚠️ Important Disclaimers
|
| 78 |
+
|
| 79 |
+
- This is intended to be a **helpful tool**, **NEVER** a replacement for professional therapy
|
| 80 |
+
- **NOT** intended for crisis situations or medical advice
|
| 81 |
+
- If you're in crisis, please contact emergency services or a crisis helpline
|
| 82 |
+
- No message content is stored. For quotas and observability, the app records per-user interaction counters using salted hashes and aggregated metrics (no raw usernames, emails, or IPs).
|
| 83 |
+
|
| 84 |
+
## 🔐 Login, Quotas, and Owner Controls
|
| 85 |
+
|
| 86 |
+
This Space supports "Login with Hugging Face" for stable and secure per-user quotas.
|
| 87 |
+
|
| 88 |
+
- The UI includes a Login button. Once logged in, the app receives an OAuth profile and derives a pseudonymous, salted user id.
|
| 89 |
+
- Per-user daily interactions are capped via `HF_AGENT_MAX_INTERACTIONS_PER_USER` (default: 12).
|
| 90 |
+
- An Owner tab is visible only to the Space owner.
|
| 91 |
+
- Environment variables:
|
| 92 |
+
- `HUGGINGFACEHUB_API_TOKEN` — required: Inference API token (set as a Secret). `HF_TOKEN` is also accepted, but prefer this name.
|
| 93 |
+
- `HF_AGENT_MAX_INTERACTIONS_PER_USER` — per-user daily cap (default: 12)
|
| 94 |
+
- `USAGE_SALT` — required: random salt for hashing user identifiers
|
| 95 |
+
- `AGENT_CALL_LOG_PATH` — path to persist counters (e.g., `/data/agent_calls.json`)
|
| 96 |
+
- `APP_METRICS_PATH` — path to privacy-preserving metrics (e.g., `/data/app_metrics.json`)
|
| 97 |
+
- `HF_HOME` — recommended: set to `/data/.huggingface` to persist cache across restarts
|
| 98 |
+
|
| 99 |
+
Notes:
|
| 100 |
+
- The app never stores raw usernames, emails, IPs, or message content.
|
| 101 |
+
- Configure persistent storage for those JSON files if you want counters to survive Space restarts. If using persistent storage, set `HF_HOME=/data/.huggingface` so model/cache downloads persist too.
|
| 102 |
+
|
| 103 |
+
### Owner-only Admin
|
| 104 |
+
|
| 105 |
+
Set the environment variable `OWNER_USER` to your Hugging Face username. The Owner tab will only be shown when the logged-in user matches this username.
|
| 106 |
+
|
| 107 |
+
- `OWNER_USER` — required to enable Owner tab (e.g., `carlos`)
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
## 🙏 Acknowledgments
|
| 111 |
+
|
| 112 |
+
- Inspired by CBT principles
|
| 113 |
+
- Original prototype built with Google ADK
|
| 114 |
+
- Built with Gradio and Hugging Face
|
| 115 |
+
|
| 116 |
+
## 📄 License
|
| 117 |
+
|
| 118 |
+
MIT License - See LICENSE file for details
|
| 119 |
+
|
| 120 |
+
---
|
agents/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CBT Agent module backed by Hugging Face Inference API
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from .cbt_agent import CBTAgent
|
| 6 |
+
|
| 7 |
+
__all__ = ['CBTAgent']
|
agents/cbt_agent.py
ADDED
|
@@ -0,0 +1,242 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CBT Agent implementation using Hugging Face Inference API directly.
|
| 3 |
+
|
| 4 |
+
This avoids smolagents version drift and works cleanly on Spaces.
|
| 5 |
+
Adds optional provider, billing, and timeout support plus true
|
| 6 |
+
token streaming for lower-latency responses in Spaces.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import os
|
| 10 |
+
from collections.abc import Iterable
|
| 11 |
+
|
| 12 |
+
from huggingface_hub import InferenceClient
|
| 13 |
+
|
| 14 |
+
try: # Newer hub versions expose a specific timeout error
|
| 15 |
+
from huggingface_hub import InferenceTimeoutError
|
| 16 |
+
except Exception: # pragma: no cover
|
| 17 |
+
|
| 18 |
+
class InferenceTimeoutError(Exception): # type: ignore
|
| 19 |
+
pass
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class CBTAgent:
|
| 23 |
+
"""Agent that composes warm CBT responses using HF Inference API.
|
| 24 |
+
|
| 25 |
+
Args:
|
| 26 |
+
model_name: HF model id or endpoint URL to use.
|
| 27 |
+
provider: Optional provider routing (e.g., "together", "groq", "hf-inference").
|
| 28 |
+
bill_to: Optional org to bill (Enterprise Hub).
|
| 29 |
+
timeout: Request timeout in seconds.
|
| 30 |
+
use_local: Ignored; kept for backward compatibility.
|
| 31 |
+
"""
|
| 32 |
+
|
| 33 |
+
def __init__(
|
| 34 |
+
self,
|
| 35 |
+
model_name: str | None = None,
|
| 36 |
+
*,
|
| 37 |
+
provider: str | None = None,
|
| 38 |
+
bill_to: str | None = None,
|
| 39 |
+
timeout: float | None = None,
|
| 40 |
+
use_local: bool = False, # use_local ignored
|
| 41 |
+
):
|
| 42 |
+
self.model_name = model_name or os.getenv("MODEL_NAME", "meta-llama/Llama-3.1-8B-Instruct")
|
| 43 |
+
|
| 44 |
+
# Optional routing / billing / timeout via env overrides
|
| 45 |
+
self.provider = provider or os.getenv("HF_PROVIDER")
|
| 46 |
+
self.bill_to = bill_to or os.getenv("HF_BILL_TO")
|
| 47 |
+
# Parse env timeout if not provided
|
| 48 |
+
if timeout is None:
|
| 49 |
+
try:
|
| 50 |
+
timeout_env = os.getenv("HF_TIMEOUT_SECONDS")
|
| 51 |
+
timeout = float(timeout_env) if timeout_env else None
|
| 52 |
+
except Exception:
|
| 53 |
+
timeout = None
|
| 54 |
+
self.timeout = timeout
|
| 55 |
+
|
| 56 |
+
token = os.getenv("HUGGINGFACEHUB_API_TOKEN") or os.getenv("HF_TOKEN")
|
| 57 |
+
if not token:
|
| 58 |
+
raise RuntimeError("HF token missing: set HF_TOKEN or HUGGINGFACEHUB_API_TOKEN")
|
| 59 |
+
|
| 60 |
+
# Initialize client with explicit named params to satisfy typing
|
| 61 |
+
self.client = InferenceClient(
|
| 62 |
+
model=self.model_name,
|
| 63 |
+
provider=(self.provider if self.provider is not None else None), # type: ignore[arg-type]
|
| 64 |
+
token=token,
|
| 65 |
+
timeout=self.timeout,
|
| 66 |
+
bill_to=self.bill_to,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
def _get_system_prompt(self) -> str:
|
| 70 |
+
return (
|
| 71 |
+
"You are a compassionate CBT assistant. Identify likely cognitive distortions, "
|
| 72 |
+
"validate feelings, and suggest balanced, realistic alternatives. Keep responses "
|
| 73 |
+
"concise, supportive, and practical. This is self-help guidance, not therapy."
|
| 74 |
+
)
|
| 75 |
+
|
| 76 |
+
def _generate(self, prompt: str, messages: list[dict] | None = None) -> str:
|
| 77 |
+
# Prefer chat-completions if available
|
| 78 |
+
try:
|
| 79 |
+
msgs = messages or [
|
| 80 |
+
{"role": "system", "content": self._get_system_prompt()},
|
| 81 |
+
{"role": "user", "content": prompt},
|
| 82 |
+
]
|
| 83 |
+
if hasattr(self.client, "chat") and hasattr(self.client.chat, "completions"):
|
| 84 |
+
resp = self.client.chat.completions.create(
|
| 85 |
+
model=self.model_name,
|
| 86 |
+
messages=msgs,
|
| 87 |
+
max_tokens=400,
|
| 88 |
+
temperature=0.6,
|
| 89 |
+
)
|
| 90 |
+
msg = getattr(resp.choices[0], "message", None)
|
| 91 |
+
return getattr(msg, "content", "") or ""
|
| 92 |
+
if hasattr(self.client, "chat_completion"):
|
| 93 |
+
resp = self.client.chat_completion(
|
| 94 |
+
messages=msgs,
|
| 95 |
+
max_tokens=400,
|
| 96 |
+
temperature=0.6,
|
| 97 |
+
)
|
| 98 |
+
choices = resp.get("choices") or []
|
| 99 |
+
if choices:
|
| 100 |
+
return choices[0].get("message", {}).get("content", "")
|
| 101 |
+
except Exception:
|
| 102 |
+
pass
|
| 103 |
+
|
| 104 |
+
# Fallback to plain text generation
|
| 105 |
+
return self.client.text_generation(
|
| 106 |
+
prompt if prompt else (messages[-1]["content"] if messages else ""),
|
| 107 |
+
max_new_tokens=400,
|
| 108 |
+
temperature=0.6,
|
| 109 |
+
return_full_text=False,
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
def _build_messages(self, message: str, context: list[dict] | None = None) -> list[dict]:
|
| 113 |
+
messages: list[dict] = [
|
| 114 |
+
{"role": "system", "content": self._get_system_prompt()},
|
| 115 |
+
]
|
| 116 |
+
if context:
|
| 117 |
+
turns = context[-3:]
|
| 118 |
+
for turn in turns:
|
| 119 |
+
u = turn.get("user", "")
|
| 120 |
+
a = turn.get("assistant", "")
|
| 121 |
+
if u:
|
| 122 |
+
messages.append({"role": "user", "content": u})
|
| 123 |
+
if a:
|
| 124 |
+
messages.append({"role": "assistant", "content": a})
|
| 125 |
+
messages.append({"role": "user", "content": message})
|
| 126 |
+
return messages
|
| 127 |
+
|
| 128 |
+
def analyze_thought(self, thought: str) -> dict:
|
| 129 |
+
from cbt_knowledge import detect_distortions, find_similar_situations
|
| 130 |
+
|
| 131 |
+
distortions = detect_distortions(thought)
|
| 132 |
+
codes = [c for c, _ in distortions]
|
| 133 |
+
prompt = (
|
| 134 |
+
f"Thought: {thought}\n"
|
| 135 |
+
f"Detected codes: {', '.join(codes) if codes else 'none'}\n"
|
| 136 |
+
"Write a gentle, balanced reframe (2-4 sentences). Validate feelings first."
|
| 137 |
+
)
|
| 138 |
+
reframe = self._generate(prompt)
|
| 139 |
+
|
| 140 |
+
primary = codes[0] if codes else None
|
| 141 |
+
similar = find_similar_situations(primary) if primary else []
|
| 142 |
+
return {
|
| 143 |
+
"thought": thought,
|
| 144 |
+
"distortions": distortions,
|
| 145 |
+
"reframe": reframe,
|
| 146 |
+
"similar_situations": similar,
|
| 147 |
+
}
|
| 148 |
+
|
| 149 |
+
def generate_response(self, message: str, context: list[dict] | None = None) -> str:
|
| 150 |
+
"""Return a full assistant response (non-streaming)."""
|
| 151 |
+
messages = self._build_messages(message, context)
|
| 152 |
+
return self._generate(prompt="", messages=messages)
|
| 153 |
+
|
| 154 |
+
def stream_generate_response(
|
| 155 |
+
self, message: str, context: list[dict] | None = None
|
| 156 |
+
) -> Iterable[str]:
|
| 157 |
+
"""Yield assistant tokens via provider streaming if supported.
|
| 158 |
+
|
| 159 |
+
Falls back to a single full message chunk when streaming
|
| 160 |
+
is not supported by the provider/client.
|
| 161 |
+
"""
|
| 162 |
+
messages = self._build_messages(message, context)
|
| 163 |
+
|
| 164 |
+
# Try OpenAI-compatible chat streaming first
|
| 165 |
+
try:
|
| 166 |
+
if hasattr(self.client, "chat") and hasattr(self.client.chat, "completions"):
|
| 167 |
+
stream = self.client.chat.completions.create(
|
| 168 |
+
model=self.model_name,
|
| 169 |
+
messages=messages,
|
| 170 |
+
temperature=0.6,
|
| 171 |
+
max_tokens=400,
|
| 172 |
+
stream=True,
|
| 173 |
+
)
|
| 174 |
+
for chunk in stream: # ChatCompletionStreamOutput-like
|
| 175 |
+
# Handle both object-like and dict-like chunks
|
| 176 |
+
delta = (
|
| 177 |
+
getattr(chunk.choices[0], "delta", None)
|
| 178 |
+
if hasattr(chunk, "choices")
|
| 179 |
+
else None
|
| 180 |
+
)
|
| 181 |
+
content = None
|
| 182 |
+
if delta is not None:
|
| 183 |
+
content = getattr(delta, "content", None)
|
| 184 |
+
elif isinstance(chunk, dict):
|
| 185 |
+
content = chunk.get("choices", [{}])[0].get("delta", {}).get("content")
|
| 186 |
+
if content:
|
| 187 |
+
yield content
|
| 188 |
+
return
|
| 189 |
+
if hasattr(self.client, "chat_completion"):
|
| 190 |
+
stream = self.client.chat_completion(
|
| 191 |
+
messages=messages,
|
| 192 |
+
temperature=0.6,
|
| 193 |
+
max_tokens=400,
|
| 194 |
+
stream=True,
|
| 195 |
+
)
|
| 196 |
+
for chunk in stream:
|
| 197 |
+
# older dict-shaped stream
|
| 198 |
+
content = (chunk.get("choices") or [{}])[0].get("delta", {}).get("content")
|
| 199 |
+
if content:
|
| 200 |
+
yield content
|
| 201 |
+
return
|
| 202 |
+
except InferenceTimeoutError as e: # pragma: no cover
|
| 203 |
+
# Surface a short message; UI will present it
|
| 204 |
+
yield f"[timeout] {e}"
|
| 205 |
+
return
|
| 206 |
+
except Exception:
|
| 207 |
+
# Fall through to plain generation
|
| 208 |
+
pass
|
| 209 |
+
|
| 210 |
+
# Fallback: text generation streaming if available
|
| 211 |
+
try:
|
| 212 |
+
gen = self.client.text_generation(
|
| 213 |
+
messages[-1]["content"],
|
| 214 |
+
max_new_tokens=400,
|
| 215 |
+
temperature=0.6,
|
| 216 |
+
stream=True,
|
| 217 |
+
return_full_text=False,
|
| 218 |
+
)
|
| 219 |
+
for token in gen:
|
| 220 |
+
if token:
|
| 221 |
+
yield str(token)
|
| 222 |
+
return
|
| 223 |
+
except Exception:
|
| 224 |
+
pass
|
| 225 |
+
|
| 226 |
+
# Final fallback: non-streaming
|
| 227 |
+
yield self.generate_response(message, context)
|
| 228 |
+
|
| 229 |
+
def get_crisis_response(self) -> str:
|
| 230 |
+
return (
|
| 231 |
+
"I'm concerned about what you're sharing and want you to know that support is "
|
| 232 |
+
"available.\n\n"
|
| 233 |
+
"Immediate help: see international resources at "
|
| 234 |
+
"https://blog.opencounseling.com/suicide-hotlines/\n\n"
|
| 235 |
+
"You don't have to go through this alone. Please reach out to a professional who "
|
| 236 |
+
"can provide the support you deserve."
|
| 237 |
+
)
|
| 238 |
+
|
| 239 |
+
|
| 240 |
+
def create_simple_cbt_assistant():
|
| 241 |
+
"""Backwards-compatible alias."""
|
| 242 |
+
return CBTAgent()
|
app.py
CHANGED
|
@@ -1,7 +1,969 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
|
| 3 |
-
|
| 4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Re-frame: Cognitive Reframing Assistant
|
| 3 |
+
A Gradio-based CBT tool for identifying and reframing cognitive distortions
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
import hashlib
|
| 7 |
+
import json
|
| 8 |
+
import os
|
| 9 |
+
from datetime import datetime
|
| 10 |
+
from typing import Optional
|
| 11 |
+
|
| 12 |
import gradio as gr
|
| 13 |
|
| 14 |
+
# Import our CBT knowledge base
|
| 15 |
+
from cbt_knowledge import (
|
| 16 |
+
COGNITIVE_DISTORTIONS,
|
| 17 |
+
find_similar_situations,
|
| 18 |
+
)
|
| 19 |
+
|
| 20 |
+
# Import UI components
|
| 21 |
+
from ui_components.landing import create_landing_tab
|
| 22 |
+
from ui_components.learn import create_learn_tab
|
| 23 |
+
|
| 24 |
+
# Agentic LLM support (Hugging Face Inference API)
|
| 25 |
+
try:
|
| 26 |
+
from agents import CBTAgent
|
| 27 |
+
|
| 28 |
+
AGENT_AVAILABLE = True
|
| 29 |
+
except Exception:
|
| 30 |
+
CBTAgent = None # type: ignore
|
| 31 |
+
AGENT_AVAILABLE = False
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
# Load translations
|
| 35 |
+
def load_translations():
|
| 36 |
+
"""Load translation files for internationalization"""
|
| 37 |
+
translations = {}
|
| 38 |
+
for lang in ['en', 'es']:
|
| 39 |
+
try:
|
| 40 |
+
with open(f'locales/{lang}.json', encoding='utf-8') as f:
|
| 41 |
+
translations[lang] = json.load(f)
|
| 42 |
+
except FileNotFoundError:
|
| 43 |
+
# Fallback to embedded translations if files don't exist
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
# Fallback translations
|
| 47 |
+
if 'en' not in translations:
|
| 48 |
+
translations['en'] = {
|
| 49 |
+
"app_title": "🧠 re-frame: Cognitive Reframing Assistant",
|
| 50 |
+
"app_description": "Using CBT principles to help you find balanced perspectives",
|
| 51 |
+
"welcome": {
|
| 52 |
+
"title": "Welcome to re-frame",
|
| 53 |
+
"subtitle": "Find a kinder perspective",
|
| 54 |
+
"description": (
|
| 55 |
+
"Using ideas from Cognitive Behavioral Therapy (CBT), we help you notice "
|
| 56 |
+
"thinking patterns and explore gentler, more balanced perspectives."
|
| 57 |
+
),
|
| 58 |
+
"how_it_works": "How it works",
|
| 59 |
+
"step1": "Share your thoughts",
|
| 60 |
+
"step1_desc": "Tell us what's on your mind",
|
| 61 |
+
"step2": "Identify patterns",
|
| 62 |
+
"step2_desc": "We'll help spot thinking traps",
|
| 63 |
+
"step3": "Find balance",
|
| 64 |
+
"step3_desc": "Explore alternative perspectives",
|
| 65 |
+
"start_chat": "Start Chat",
|
| 66 |
+
"disclaimer": "Important: This is a self-help tool, not therapy or medical advice.",
|
| 67 |
+
"privacy": "Privacy: No data is stored beyond your session.",
|
| 68 |
+
},
|
| 69 |
+
"chat": {
|
| 70 |
+
"title": "Chat",
|
| 71 |
+
"placeholder": "Share what's on your mind...",
|
| 72 |
+
"send": "Send",
|
| 73 |
+
"clear": "New Session",
|
| 74 |
+
"thinking": "Thinking...",
|
| 75 |
+
"distortions_found": "Thinking patterns identified:",
|
| 76 |
+
"reframe_suggestion": "Alternative perspective:",
|
| 77 |
+
"similar_situations": "Similar situations:",
|
| 78 |
+
"try_this": "You might try:",
|
| 79 |
+
},
|
| 80 |
+
"learn": {
|
| 81 |
+
"title": "Learn",
|
| 82 |
+
"select_distortion": "Select a thinking pattern to explore",
|
| 83 |
+
"definition": "Definition",
|
| 84 |
+
"examples": "Common Examples",
|
| 85 |
+
"strategies": "Reframing Strategies",
|
| 86 |
+
"actions": "Small Steps to Try",
|
| 87 |
+
},
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
if 'es' not in translations:
|
| 91 |
+
translations['es'] = {
|
| 92 |
+
"app_title": "🧠 re-frame: Asistente de Reencuadre Cognitivo",
|
| 93 |
+
"app_description": (
|
| 94 |
+
"Usando principios de TCC para ayudarte a encontrar perspectivas equilibradas"
|
| 95 |
+
),
|
| 96 |
+
"welcome": {
|
| 97 |
+
"title": "Bienvenido a re-frame",
|
| 98 |
+
"subtitle": "Encuentra una perspectiva más amable",
|
| 99 |
+
"description": (
|
| 100 |
+
"Usando ideas de la Terapia Cognitivo-Conductual (TCC), te ayudamos a notar "
|
| 101 |
+
"patrones de pensamiento y explorar perspectivas más gentiles y equilibradas."
|
| 102 |
+
),
|
| 103 |
+
"how_it_works": "Cómo funciona",
|
| 104 |
+
"step1": "Comparte tus pensamientos",
|
| 105 |
+
"step1_desc": "Cuéntanos qué piensas",
|
| 106 |
+
"step2": "Identifica patrones",
|
| 107 |
+
"step2_desc": "Te ayudamos a detectar trampas mentales",
|
| 108 |
+
"step3": "Encuentra balance",
|
| 109 |
+
"step3_desc": "Explora perspectivas alternativas",
|
| 110 |
+
"start_chat": "Iniciar Chat",
|
| 111 |
+
"disclaimer": (
|
| 112 |
+
"Importante: Esta es una herramienta de autoayuda, "
|
| 113 |
+
"no terapia ni consejo médico."
|
| 114 |
+
),
|
| 115 |
+
"privacy": "Privacidad: No se almacenan datos más allá de tu sesión.",
|
| 116 |
+
},
|
| 117 |
+
"chat": {
|
| 118 |
+
"title": "Chat",
|
| 119 |
+
"placeholder": "Comparte lo que piensas...",
|
| 120 |
+
"send": "Enviar",
|
| 121 |
+
"clear": "Nueva Sesión",
|
| 122 |
+
"thinking": "Pensando...",
|
| 123 |
+
"distortions_found": "Patrones de pensamiento identificados:",
|
| 124 |
+
"reframe_suggestion": "Perspectiva alternativa:",
|
| 125 |
+
"similar_situations": "Situaciones similares:",
|
| 126 |
+
"try_this": "Podrías intentar:",
|
| 127 |
+
},
|
| 128 |
+
"learn": {
|
| 129 |
+
"title": "Aprender",
|
| 130 |
+
"select_distortion": "Selecciona un patrón de pensamiento para explorar",
|
| 131 |
+
"definition": "Definición",
|
| 132 |
+
"examples": "Ejemplos Comunes",
|
| 133 |
+
"strategies": "Estrategias de Reencuadre",
|
| 134 |
+
"actions": "Pequeños Pasos a Intentar",
|
| 135 |
+
},
|
| 136 |
+
}
|
| 137 |
+
|
| 138 |
+
return translations
|
| 139 |
+
|
| 140 |
+
|
| 141 |
+
class CBTChatbot:
|
| 142 |
+
"""Main chatbot class for handling CBT conversations"""
|
| 143 |
+
|
| 144 |
+
def __init__(self, language='en', memory_size: int = 6):
|
| 145 |
+
self.language = language
|
| 146 |
+
self.translations = load_translations()
|
| 147 |
+
self.t = self.translations.get(language, self.translations['en'])
|
| 148 |
+
self.conversation_history: list[list[str]] = []
|
| 149 |
+
self.identified_distortions: list[tuple[str, float]] = []
|
| 150 |
+
self.memory_size = max(2, int(memory_size))
|
| 151 |
+
|
| 152 |
+
def _history_to_context(self, history: list[list[str]]) -> list[dict]:
|
| 153 |
+
"""Convert Chatbot history [[user, assistant], ...] to agent context[{user,assistant}]"""
|
| 154 |
+
ctx: list[dict] = []
|
| 155 |
+
for turn in history or []:
|
| 156 |
+
if isinstance(turn, list | tuple) and len(turn) == 2:
|
| 157 |
+
ctx.append({"user": turn[0] or "", "assistant": turn[1] or ""})
|
| 158 |
+
return ctx[-self.memory_size :]
|
| 159 |
+
|
| 160 |
+
def process_message(
|
| 161 |
+
self,
|
| 162 |
+
message: str,
|
| 163 |
+
history: list[list[str]],
|
| 164 |
+
use_agent: bool = False,
|
| 165 |
+
agent: Optional["CBTAgent"] = None,
|
| 166 |
+
) -> tuple[list[list[str]], str, str, str]:
|
| 167 |
+
"""
|
| 168 |
+
Process user message and generate response with CBT analysis
|
| 169 |
+
|
| 170 |
+
Returns:
|
| 171 |
+
- Updated chat history
|
| 172 |
+
- Identified distortions display
|
| 173 |
+
- Reframe suggestion
|
| 174 |
+
- Similar situations display
|
| 175 |
+
"""
|
| 176 |
+
if not message or message.strip() == "":
|
| 177 |
+
return history or [], "", "", ""
|
| 178 |
+
|
| 179 |
+
# Add user message to history
|
| 180 |
+
history = history or []
|
| 181 |
+
|
| 182 |
+
# Agentic path only: remove non-LLM fallback
|
| 183 |
+
if use_agent and agent is not None:
|
| 184 |
+
try:
|
| 185 |
+
analysis = agent.analyze_thought(message)
|
| 186 |
+
response = agent.generate_response(
|
| 187 |
+
message, context=self._history_to_context(history)
|
| 188 |
+
)
|
| 189 |
+
|
| 190 |
+
distortions_display = self._format_distortions(analysis.get("distortions", []))
|
| 191 |
+
reframe_display = analysis.get("reframe", "")
|
| 192 |
+
primary = analysis.get("distortions", [])
|
| 193 |
+
primary_code = primary[0][0] if primary else None
|
| 194 |
+
situations_display = (
|
| 195 |
+
self._format_similar_situations(primary_code) if primary_code else ""
|
| 196 |
+
)
|
| 197 |
+
except Exception as e:
|
| 198 |
+
# Do not fallback to local heuristics
|
| 199 |
+
history.append([message, f"Agent error: {e}"])
|
| 200 |
+
return history, "", "", ""
|
| 201 |
+
else:
|
| 202 |
+
# Non-agent mode disabled
|
| 203 |
+
history.append(
|
| 204 |
+
[message, "Agent-only mode: please enable the agent to generate responses."]
|
| 205 |
+
)
|
| 206 |
+
return history, "", "", ""
|
| 207 |
+
|
| 208 |
+
# Update history with memory cap
|
| 209 |
+
history.append([message, response])
|
| 210 |
+
if len(history) > self.memory_size:
|
| 211 |
+
history = history[-self.memory_size :]
|
| 212 |
+
|
| 213 |
+
return history, distortions_display, reframe_display, situations_display
|
| 214 |
+
|
| 215 |
+
def _format_distortions(self, distortions: list[tuple[str, float]]) -> str:
|
| 216 |
+
"""Format detected distortions for display"""
|
| 217 |
+
if not distortions:
|
| 218 |
+
return ""
|
| 219 |
+
|
| 220 |
+
lines = [f"### {self.t['chat']['distortions_found']}\n"]
|
| 221 |
+
for code, confidence in distortions[:3]: # Show top 3
|
| 222 |
+
for _key, info in COGNITIVE_DISTORTIONS.items():
|
| 223 |
+
if info['code'] == code:
|
| 224 |
+
lines.append(f"**{info['name']}** ({confidence * 100:.0f}% match)")
|
| 225 |
+
lines.append(f"*{info['definition']}*\n")
|
| 226 |
+
break
|
| 227 |
+
|
| 228 |
+
return "\n".join(lines)
|
| 229 |
+
|
| 230 |
+
def _format_similar_situations(self, distortion_code: str) -> str:
|
| 231 |
+
"""Format similar situations for display"""
|
| 232 |
+
situations = find_similar_situations(distortion_code, num_situations=2)
|
| 233 |
+
if not situations:
|
| 234 |
+
return ""
|
| 235 |
+
|
| 236 |
+
lines = [f"### {self.t['chat']['similar_situations']}\n"]
|
| 237 |
+
for i, situation in enumerate(situations, 1):
|
| 238 |
+
lines.append(f"**Example {i}:** {situation['situation']}")
|
| 239 |
+
lines.append(f"*Distorted:* \"{situation['distorted']}\"")
|
| 240 |
+
lines.append(f"*Reframed:* \"{situation['reframed']}\"\n")
|
| 241 |
+
|
| 242 |
+
return "\n".join(lines)
|
| 243 |
+
|
| 244 |
+
def clear_session(self):
|
| 245 |
+
"""Clear the conversation session"""
|
| 246 |
+
self.conversation_history = []
|
| 247 |
+
self.identified_distortions = []
|
| 248 |
+
return [], "", "", ""
|
| 249 |
+
|
| 250 |
+
|
| 251 |
+
def create_app(language='en'):
|
| 252 |
+
"""Create and configure the Gradio application"""
|
| 253 |
+
|
| 254 |
+
# Initialize chatbot
|
| 255 |
+
chatbot = CBTChatbot(language)
|
| 256 |
+
t = chatbot.t
|
| 257 |
+
|
| 258 |
+
# Custom CSS for better styling
|
| 259 |
+
custom_css = """
|
| 260 |
+
.gradio-container {
|
| 261 |
+
font-family: 'Inter', -apple-system, BlinkMacSystemFont, sans-serif;
|
| 262 |
+
}
|
| 263 |
+
.gr-button-primary {
|
| 264 |
+
background-color: #2563eb !important;
|
| 265 |
+
border-color: #2563eb !important;
|
| 266 |
+
}
|
| 267 |
+
.gr-button-primary:hover {
|
| 268 |
+
background-color: #1e40af !important;
|
| 269 |
+
}
|
| 270 |
+
.info-box {
|
| 271 |
+
background-color: #f0f9ff;
|
| 272 |
+
border: 1px solid #3b82f6;
|
| 273 |
+
border-radius: 8px;
|
| 274 |
+
padding: 12px;
|
| 275 |
+
margin: 8px 0;
|
| 276 |
+
}
|
| 277 |
+
"""
|
| 278 |
+
|
| 279 |
+
with gr.Blocks(title=t['app_title'], theme=gr.themes.Soft(), css=custom_css) as app:
|
| 280 |
+
gr.Markdown(f"# {t['app_title']}")
|
| 281 |
+
gr.Markdown(f"*{t['app_description']}*")
|
| 282 |
+
|
| 283 |
+
with gr.Tabs():
|
| 284 |
+
# Welcome Tab
|
| 285 |
+
with gr.Tab(t['welcome']['title']):
|
| 286 |
+
create_landing_tab(t['welcome'])
|
| 287 |
+
|
| 288 |
+
# Chat Tab
|
| 289 |
+
with gr.Tab(t['chat']['title']):
|
| 290 |
+
# Settings row (agentic only)
|
| 291 |
+
with gr.Row():
|
| 292 |
+
model_name = gr.Textbox(
|
| 293 |
+
label="Model (HF)",
|
| 294 |
+
value=os.getenv("MODEL_NAME", "meta-llama/Llama-3.1-8B-Instruct"),
|
| 295 |
+
info=(
|
| 296 |
+
"Requires HF Inference API token (HF_TOKEN or HUGGINGFACEHUB_API_TOKEN)"
|
| 297 |
+
),
|
| 298 |
+
)
|
| 299 |
+
gr.Markdown("Agentic mode only. No local fallback. Set `HF_TOKEN` in Secrets.")
|
| 300 |
+
gr.LoginButton()
|
| 301 |
+
billing_notice = gr.Markdown("")
|
| 302 |
+
|
| 303 |
+
with gr.Row():
|
| 304 |
+
with gr.Column(scale=2):
|
| 305 |
+
chatbot_ui = gr.Chatbot(height=400, label="Conversation")
|
| 306 |
+
|
| 307 |
+
with gr.Row():
|
| 308 |
+
msg_input = gr.Textbox(
|
| 309 |
+
label="", placeholder=t['chat']['placeholder'], scale=4
|
| 310 |
+
)
|
| 311 |
+
send_btn = gr.Button(t['chat']['send'], variant="primary", scale=1)
|
| 312 |
+
|
| 313 |
+
clear_btn = gr.Button(t['chat']['clear'], variant="secondary")
|
| 314 |
+
|
| 315 |
+
with gr.Column(scale=1):
|
| 316 |
+
gr.Markdown("### Analysis")
|
| 317 |
+
distortions_output = gr.Markdown(label="Patterns Detected")
|
| 318 |
+
reframe_output = gr.Markdown(label="Reframe Suggestion")
|
| 319 |
+
situations_output = gr.Markdown(label="Similar Situations")
|
| 320 |
+
|
| 321 |
+
# Internal state for agent instance and agentic enable flag
|
| 322 |
+
agent_state = gr.State(value=None)
|
| 323 |
+
agentic_enabled_state = gr.State(value=True)
|
| 324 |
+
# Admin runtime settings (e.g., per-user limit override)
|
| 325 |
+
admin_state = gr.State(value={"per_user_limit_override": None})
|
| 326 |
+
|
| 327 |
+
# Connect chat interface (streaming)
|
| 328 |
+
def _ensure_hf_token_env():
|
| 329 |
+
# Honor either HF_TOKEN or HUGGINGFACEHUB_API_TOKEN
|
| 330 |
+
token = os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")
|
| 331 |
+
if token and not os.getenv("HUGGINGFACEHUB_API_TOKEN"):
|
| 332 |
+
os.environ["HUGGINGFACEHUB_API_TOKEN"] = token
|
| 333 |
+
|
| 334 |
+
def _stream_chunks(text: str, chunk_words: int = 12):
|
| 335 |
+
words = (text or "").split()
|
| 336 |
+
buf = []
|
| 337 |
+
for i, w in enumerate(words, 1):
|
| 338 |
+
buf.append(w)
|
| 339 |
+
if i % chunk_words == 0:
|
| 340 |
+
yield " ".join(buf)
|
| 341 |
+
buf = []
|
| 342 |
+
if buf:
|
| 343 |
+
yield " ".join(buf)
|
| 344 |
+
|
| 345 |
+
# Budget guard helpers
|
| 346 |
+
def _get_call_log_path():
|
| 347 |
+
return os.getenv("AGENT_CALL_LOG_PATH", "/tmp/agent_calls.json")
|
| 348 |
+
|
| 349 |
+
# Simple privacy-preserving metrics (no raw PII/content)
|
| 350 |
+
def _get_metrics_path():
|
| 351 |
+
return os.getenv("APP_METRICS_PATH", "/tmp/app_metrics.json")
|
| 352 |
+
|
| 353 |
+
def _load_call_log():
|
| 354 |
+
try:
|
| 355 |
+
with open(_get_call_log_path(), encoding="utf-8") as f:
|
| 356 |
+
return json.load(f)
|
| 357 |
+
except Exception:
|
| 358 |
+
return {}
|
| 359 |
+
|
| 360 |
+
def _load_metrics():
|
| 361 |
+
try:
|
| 362 |
+
with open(_get_metrics_path(), encoding="utf-8") as f:
|
| 363 |
+
return json.load(f)
|
| 364 |
+
except Exception:
|
| 365 |
+
return {}
|
| 366 |
+
|
| 367 |
+
def _save_call_log(data):
|
| 368 |
+
try:
|
| 369 |
+
with open(_get_call_log_path(), "w", encoding="utf-8") as f:
|
| 370 |
+
json.dump(data, f)
|
| 371 |
+
except Exception:
|
| 372 |
+
pass
|
| 373 |
+
|
| 374 |
+
def _save_metrics(data):
|
| 375 |
+
try:
|
| 376 |
+
with open(_get_metrics_path(), "w", encoding="utf-8") as f:
|
| 377 |
+
json.dump(data, f)
|
| 378 |
+
except Exception:
|
| 379 |
+
pass
|
| 380 |
+
|
| 381 |
+
def _today_key():
|
| 382 |
+
return datetime.utcnow().strftime("%Y-%m-%d")
|
| 383 |
+
|
| 384 |
+
# Metrics helpers
|
| 385 |
+
def _metrics_today():
|
| 386 |
+
m = _load_metrics()
|
| 387 |
+
return m.get(_today_key(), {})
|
| 388 |
+
|
| 389 |
+
def _write_metrics_today(d):
|
| 390 |
+
m = _load_metrics()
|
| 391 |
+
m[_today_key()] = d
|
| 392 |
+
_save_metrics(m)
|
| 393 |
+
|
| 394 |
+
def _inc_metric(key: str, inc: int = 1):
|
| 395 |
+
d = _metrics_today()
|
| 396 |
+
d[key] = int(d.get(key, 0)) + inc
|
| 397 |
+
_write_metrics_today(d)
|
| 398 |
+
|
| 399 |
+
def _record_distortion_counts(codes: list[str]):
|
| 400 |
+
if not codes:
|
| 401 |
+
return
|
| 402 |
+
d = _metrics_today()
|
| 403 |
+
dist = d.get("distortion_counts", {})
|
| 404 |
+
if not isinstance(dist, dict):
|
| 405 |
+
dist = {}
|
| 406 |
+
for c in codes:
|
| 407 |
+
dist[c] = int(dist.get(c, 0)) + 1
|
| 408 |
+
d["distortion_counts"] = dist
|
| 409 |
+
_write_metrics_today(d)
|
| 410 |
+
|
| 411 |
+
def _record_response_chars(n: int):
|
| 412 |
+
d = _metrics_today()
|
| 413 |
+
d["response_chars_total"] = int(d.get("response_chars_total", 0)) + max(
|
| 414 |
+
0, int(n)
|
| 415 |
+
)
|
| 416 |
+
d["response_count"] = int(d.get("response_count", 0)) + 1
|
| 417 |
+
_write_metrics_today(d)
|
| 418 |
+
|
| 419 |
+
def _calls_today():
|
| 420 |
+
data = _load_call_log()
|
| 421 |
+
return int(data.get(_today_key(), 0))
|
| 422 |
+
|
| 423 |
+
def _inc_calls_today():
|
| 424 |
+
data = _load_call_log()
|
| 425 |
+
key = _today_key()
|
| 426 |
+
data[key] = int(data.get(key, 0)) + 1
|
| 427 |
+
_save_call_log(data)
|
| 428 |
+
|
| 429 |
+
def _agentic_budget_allows():
|
| 430 |
+
hard = os.getenv("HF_AGENT_HARD_DISABLE", "").lower() in ("1", "true", "yes")
|
| 431 |
+
if hard:
|
| 432 |
+
return False
|
| 433 |
+
limit = os.getenv("HF_AGENT_MAX_CALLS_PER_DAY")
|
| 434 |
+
if not limit:
|
| 435 |
+
return True
|
| 436 |
+
try:
|
| 437 |
+
limit_i = int(limit)
|
| 438 |
+
except Exception:
|
| 439 |
+
return True
|
| 440 |
+
return _calls_today() < max(0, limit_i)
|
| 441 |
+
|
| 442 |
+
def respond_stream(
|
| 443 |
+
message,
|
| 444 |
+
history,
|
| 445 |
+
model_value,
|
| 446 |
+
agent_obj,
|
| 447 |
+
agentic_ok,
|
| 448 |
+
admin_settings,
|
| 449 |
+
request: "gr.Request",
|
| 450 |
+
profile: "gr.OAuthProfile | None" = None,
|
| 451 |
+
):
|
| 452 |
+
if not message:
|
| 453 |
+
yield history, "", "", "", agent_obj, "", agentic_ok
|
| 454 |
+
return
|
| 455 |
+
|
| 456 |
+
budget_ok = _agentic_budget_allows()
|
| 457 |
+
notice = ""
|
| 458 |
+
|
| 459 |
+
# Compute user id (salted hash) for per-user quotas
|
| 460 |
+
def _user_id(req: "gr.Request", prof: "gr.OAuthProfile | None") -> str:
|
| 461 |
+
try:
|
| 462 |
+
salt = os.getenv("USAGE_SALT", "reframe_salt")
|
| 463 |
+
# Prefer OAuth profile when available
|
| 464 |
+
if prof is not None:
|
| 465 |
+
# Try common fields in OAuth profile
|
| 466 |
+
username = None
|
| 467 |
+
for key in (
|
| 468 |
+
"preferred_username",
|
| 469 |
+
"username",
|
| 470 |
+
"login",
|
| 471 |
+
"name",
|
| 472 |
+
"sub",
|
| 473 |
+
"id",
|
| 474 |
+
):
|
| 475 |
+
try:
|
| 476 |
+
if hasattr(prof, key):
|
| 477 |
+
username = getattr(prof, key)
|
| 478 |
+
elif isinstance(prof, dict) and key in prof:
|
| 479 |
+
username = prof[key]
|
| 480 |
+
if username:
|
| 481 |
+
break
|
| 482 |
+
except Exception:
|
| 483 |
+
pass
|
| 484 |
+
raw = f"oauth:{username or 'unknown'}"
|
| 485 |
+
# req is expected to be provided by Gradio
|
| 486 |
+
elif getattr(req, "username", None):
|
| 487 |
+
raw = f"user:{req.username}"
|
| 488 |
+
else:
|
| 489 |
+
ip = getattr(getattr(req, "client", None), "host", "?")
|
| 490 |
+
ua = (
|
| 491 |
+
dict(req.headers).get("user-agent", "?")
|
| 492 |
+
if getattr(req, "headers", None)
|
| 493 |
+
else "?"
|
| 494 |
+
)
|
| 495 |
+
sess = getattr(req, "session_hash", None) or "?"
|
| 496 |
+
raw = f"ipua:{ip}|{ua}|{sess}"
|
| 497 |
+
return hashlib.sha256(f"{salt}|{raw}".encode()).hexdigest()
|
| 498 |
+
except Exception:
|
| 499 |
+
return "anon"
|
| 500 |
+
|
| 501 |
+
user_id = _user_id(request, profile)
|
| 502 |
+
|
| 503 |
+
# Per-user interaction quota (counts 1 per message)
|
| 504 |
+
def _interactions_today(uid: str) -> int:
|
| 505 |
+
data = _load_call_log()
|
| 506 |
+
day = _today_key()
|
| 507 |
+
day_blob = data.get(day, {}) if isinstance(data.get(day, {}), dict) else {}
|
| 508 |
+
inter = (
|
| 509 |
+
day_blob.get("interactions", {})
|
| 510 |
+
if isinstance(day_blob.get("interactions", {}), dict)
|
| 511 |
+
else {}
|
| 512 |
+
)
|
| 513 |
+
return int(inter.get(uid, 0))
|
| 514 |
+
|
| 515 |
+
def _inc_interactions_today(uid: str):
|
| 516 |
+
data = _load_call_log()
|
| 517 |
+
day = _today_key()
|
| 518 |
+
day_blob = data.get(day, {}) if isinstance(data.get(day, {}), dict) else {}
|
| 519 |
+
inter = (
|
| 520 |
+
day_blob.get("interactions", {})
|
| 521 |
+
if isinstance(day_blob.get("interactions", {}), dict)
|
| 522 |
+
else {}
|
| 523 |
+
)
|
| 524 |
+
inter[uid] = int(inter.get(uid, 0)) + 1
|
| 525 |
+
day_blob["interactions"] = inter
|
| 526 |
+
data[day] = day_blob
|
| 527 |
+
_save_call_log(data)
|
| 528 |
+
|
| 529 |
+
max_interactions_env = os.getenv("HF_AGENT_MAX_INTERACTIONS_PER_USER")
|
| 530 |
+
try:
|
| 531 |
+
# Default to a generous 12 if not configured
|
| 532 |
+
per_user_limit_env = (
|
| 533 |
+
int(max_interactions_env) if max_interactions_env else 12
|
| 534 |
+
)
|
| 535 |
+
except Exception:
|
| 536 |
+
per_user_limit_env = 12
|
| 537 |
+
per_user_limit = per_user_limit_env
|
| 538 |
+
# Admin override (runtime)
|
| 539 |
+
try:
|
| 540 |
+
override = None
|
| 541 |
+
if isinstance(admin_settings, dict):
|
| 542 |
+
override = admin_settings.get("per_user_limit_override")
|
| 543 |
+
if isinstance(override, int | float) and int(override) > 0:
|
| 544 |
+
per_user_limit = int(override)
|
| 545 |
+
except Exception:
|
| 546 |
+
pass
|
| 547 |
+
if per_user_limit is not None and _interactions_today(user_id) >= max(
|
| 548 |
+
0, per_user_limit
|
| 549 |
+
):
|
| 550 |
+
_inc_metric("blocked_interactions")
|
| 551 |
+
yield (
|
| 552 |
+
history or [],
|
| 553 |
+
"",
|
| 554 |
+
"",
|
| 555 |
+
"",
|
| 556 |
+
agent_obj,
|
| 557 |
+
f"Per-user limit reached ({per_user_limit} interactions).",
|
| 558 |
+
agentic_ok,
|
| 559 |
+
)
|
| 560 |
+
return
|
| 561 |
+
if not AGENT_AVAILABLE:
|
| 562 |
+
yield (
|
| 563 |
+
history or [],
|
| 564 |
+
"",
|
| 565 |
+
"",
|
| 566 |
+
"",
|
| 567 |
+
agent_obj,
|
| 568 |
+
"Agent not available. Check HF token and model name.",
|
| 569 |
+
agentic_ok,
|
| 570 |
+
)
|
| 571 |
+
return
|
| 572 |
+
if not agentic_ok:
|
| 573 |
+
yield (
|
| 574 |
+
history or [],
|
| 575 |
+
"",
|
| 576 |
+
"",
|
| 577 |
+
"",
|
| 578 |
+
agent_obj,
|
| 579 |
+
"Agentic mode disabled due to a prior quota/billing error.",
|
| 580 |
+
agentic_ok,
|
| 581 |
+
)
|
| 582 |
+
return
|
| 583 |
+
if not budget_ok:
|
| 584 |
+
yield (
|
| 585 |
+
history or [],
|
| 586 |
+
"",
|
| 587 |
+
"",
|
| 588 |
+
"",
|
| 589 |
+
agent_obj,
|
| 590 |
+
"Daily budget reached. Set HF_AGENT_MAX_CALLS_PER_DAY or try tomorrow.",
|
| 591 |
+
agentic_ok,
|
| 592 |
+
)
|
| 593 |
+
return
|
| 594 |
+
# Count one interaction for this user upfront
|
| 595 |
+
_inc_interactions_today(user_id)
|
| 596 |
+
|
| 597 |
+
# Lazily initialize agent if requested
|
| 598 |
+
_ensure_hf_token_env()
|
| 599 |
+
if agent_obj is None:
|
| 600 |
+
try:
|
| 601 |
+
agent_obj = CBTAgent(model_name=model_value)
|
| 602 |
+
except Exception as e:
|
| 603 |
+
err = str(e)
|
| 604 |
+
yield (
|
| 605 |
+
history or [],
|
| 606 |
+
"",
|
| 607 |
+
"",
|
| 608 |
+
"",
|
| 609 |
+
agent_obj,
|
| 610 |
+
f"Agent failed to initialize: {err}",
|
| 611 |
+
agentic_ok,
|
| 612 |
+
)
|
| 613 |
+
return
|
| 614 |
+
|
| 615 |
+
# Prepare side panels first for a snappy UI
|
| 616 |
+
try:
|
| 617 |
+
analysis = agent_obj.analyze_thought(message)
|
| 618 |
+
distortions_display = chatbot._format_distortions(
|
| 619 |
+
analysis.get("distortions", [])
|
| 620 |
+
)
|
| 621 |
+
reframe_display = analysis.get("reframe", "")
|
| 622 |
+
primary = analysis.get("distortions", [])
|
| 623 |
+
primary_code = primary[0][0] if primary else None
|
| 624 |
+
situations_display = (
|
| 625 |
+
chatbot._format_similar_situations(primary_code) if primary_code else ""
|
| 626 |
+
)
|
| 627 |
+
# Metrics: record this interaction
|
| 628 |
+
_inc_metric("total_interactions")
|
| 629 |
+
_record_distortion_counts([c for c, _ in analysis.get("distortions", [])])
|
| 630 |
+
_inc_calls_today()
|
| 631 |
+
except Exception as e:
|
| 632 |
+
distortions_display = reframe_display = situations_display = ""
|
| 633 |
+
# Detect quota/billing signals and permanently disable agent for this run
|
| 634 |
+
msg = str(e).lower()
|
| 635 |
+
if any(
|
| 636 |
+
k in msg
|
| 637 |
+
for k in [
|
| 638 |
+
"quota",
|
| 639 |
+
"limit",
|
| 640 |
+
"billing",
|
| 641 |
+
"payment",
|
| 642 |
+
"insufficient",
|
| 643 |
+
"402",
|
| 644 |
+
"429",
|
| 645 |
+
]
|
| 646 |
+
):
|
| 647 |
+
agentic_ok = False
|
| 648 |
+
notice = "Agentic mode disabled due to quota/billing error."
|
| 649 |
+
else:
|
| 650 |
+
notice = f"Agent analysis failed: {e}"
|
| 651 |
+
_inc_metric("agent_errors")
|
| 652 |
+
yield (
|
| 653 |
+
history or [],
|
| 654 |
+
distortions_display,
|
| 655 |
+
reframe_display,
|
| 656 |
+
situations_display,
|
| 657 |
+
agent_obj,
|
| 658 |
+
notice,
|
| 659 |
+
agentic_ok,
|
| 660 |
+
)
|
| 661 |
+
return
|
| 662 |
+
|
| 663 |
+
# Start streaming the assistant reply
|
| 664 |
+
history = history or []
|
| 665 |
+
history.append([message, ""]) # placeholder for assistant
|
| 666 |
+
# Enforce memory cap while streaming
|
| 667 |
+
if len(history) > chatbot.memory_size:
|
| 668 |
+
history = history[-chatbot.memory_size :]
|
| 669 |
+
|
| 670 |
+
# Choose response source: true token streaming via HF Inference
|
| 671 |
+
try:
|
| 672 |
+
_inc_calls_today()
|
| 673 |
+
stream = getattr(agent_obj, "stream_generate_response", None)
|
| 674 |
+
if callable(stream):
|
| 675 |
+
token_iter = stream(
|
| 676 |
+
message, context=chatbot._history_to_context(history[:-1])
|
| 677 |
+
)
|
| 678 |
+
else:
|
| 679 |
+
# Fallback to non-streaming
|
| 680 |
+
def _one_shot():
|
| 681 |
+
yield agent_obj.generate_response(
|
| 682 |
+
message, context=chatbot._history_to_context(history[:-1])
|
| 683 |
+
)
|
| 684 |
+
|
| 685 |
+
token_iter = _one_shot()
|
| 686 |
+
except Exception as e:
|
| 687 |
+
_inc_metric("agent_errors")
|
| 688 |
+
yield (
|
| 689 |
+
history,
|
| 690 |
+
distortions_display,
|
| 691 |
+
reframe_display,
|
| 692 |
+
situations_display,
|
| 693 |
+
agent_obj,
|
| 694 |
+
f"Agent response failed: {e}",
|
| 695 |
+
agentic_ok,
|
| 696 |
+
)
|
| 697 |
+
return
|
| 698 |
+
|
| 699 |
+
acc = ""
|
| 700 |
+
for chunk in token_iter:
|
| 701 |
+
if not chunk:
|
| 702 |
+
continue
|
| 703 |
+
acc += str(chunk)
|
| 704 |
+
history[-1][1] = acc
|
| 705 |
+
# yield streaming frame
|
| 706 |
+
yield (
|
| 707 |
+
history,
|
| 708 |
+
distortions_display,
|
| 709 |
+
reframe_display,
|
| 710 |
+
situations_display,
|
| 711 |
+
agent_obj,
|
| 712 |
+
notice,
|
| 713 |
+
agentic_ok,
|
| 714 |
+
)
|
| 715 |
+
|
| 716 |
+
# Final yield ensures the last state is consistent
|
| 717 |
+
_record_response_chars(len(acc))
|
| 718 |
+
# Show remaining interactions
|
| 719 |
+
try:
|
| 720 |
+
remaining = (
|
| 721 |
+
None
|
| 722 |
+
if per_user_limit is None
|
| 723 |
+
else max(0, per_user_limit - _interactions_today(user_id))
|
| 724 |
+
)
|
| 725 |
+
if remaining is not None:
|
| 726 |
+
notice = (
|
| 727 |
+
notice + f"\nRemaining interactions today: {remaining}"
|
| 728 |
+
).strip()
|
| 729 |
+
except Exception:
|
| 730 |
+
pass
|
| 731 |
+
yield (
|
| 732 |
+
history,
|
| 733 |
+
distortions_display,
|
| 734 |
+
reframe_display,
|
| 735 |
+
situations_display,
|
| 736 |
+
agent_obj,
|
| 737 |
+
notice,
|
| 738 |
+
agentic_ok,
|
| 739 |
+
)
|
| 740 |
+
|
| 741 |
+
def clear_input():
|
| 742 |
+
return ""
|
| 743 |
+
|
| 744 |
+
msg_input.submit(
|
| 745 |
+
respond_stream,
|
| 746 |
+
inputs=[
|
| 747 |
+
msg_input,
|
| 748 |
+
chatbot_ui,
|
| 749 |
+
model_name,
|
| 750 |
+
agent_state,
|
| 751 |
+
agentic_enabled_state,
|
| 752 |
+
admin_state,
|
| 753 |
+
],
|
| 754 |
+
outputs=[
|
| 755 |
+
chatbot_ui,
|
| 756 |
+
distortions_output,
|
| 757 |
+
reframe_output,
|
| 758 |
+
situations_output,
|
| 759 |
+
agent_state,
|
| 760 |
+
billing_notice,
|
| 761 |
+
agentic_enabled_state,
|
| 762 |
+
],
|
| 763 |
+
).then(clear_input, outputs=[msg_input])
|
| 764 |
+
|
| 765 |
+
send_btn.click(
|
| 766 |
+
respond_stream,
|
| 767 |
+
inputs=[
|
| 768 |
+
msg_input,
|
| 769 |
+
chatbot_ui,
|
| 770 |
+
model_name,
|
| 771 |
+
agent_state,
|
| 772 |
+
agentic_enabled_state,
|
| 773 |
+
admin_state,
|
| 774 |
+
],
|
| 775 |
+
outputs=[
|
| 776 |
+
chatbot_ui,
|
| 777 |
+
distortions_output,
|
| 778 |
+
reframe_output,
|
| 779 |
+
situations_output,
|
| 780 |
+
agent_state,
|
| 781 |
+
billing_notice,
|
| 782 |
+
agentic_enabled_state,
|
| 783 |
+
],
|
| 784 |
+
).then(clear_input, outputs=[msg_input])
|
| 785 |
+
|
| 786 |
+
def _clear_session_and_notice():
|
| 787 |
+
h, d, r, s = chatbot.clear_session()
|
| 788 |
+
return h, d, r, s, ""
|
| 789 |
+
|
| 790 |
+
clear_btn.click(
|
| 791 |
+
_clear_session_and_notice,
|
| 792 |
+
outputs=[
|
| 793 |
+
chatbot_ui,
|
| 794 |
+
distortions_output,
|
| 795 |
+
reframe_output,
|
| 796 |
+
situations_output,
|
| 797 |
+
billing_notice,
|
| 798 |
+
],
|
| 799 |
+
)
|
| 800 |
+
|
| 801 |
+
# Learn Tab
|
| 802 |
+
with gr.Tab(t['learn']['title']):
|
| 803 |
+
create_learn_tab(t['learn'], COGNITIVE_DISTORTIONS)
|
| 804 |
+
|
| 805 |
+
# Owner Tab (visible content only for Space owner)
|
| 806 |
+
with gr.Tab("Owner"):
|
| 807 |
+
# Locked panel shown to non-admins
|
| 808 |
+
locked_panel = gr.Column(visible=True)
|
| 809 |
+
with locked_panel:
|
| 810 |
+
gr.Markdown("### Owner only\nPlease log in with your Hugging Face account.")
|
| 811 |
+
|
| 812 |
+
# Admin panel
|
| 813 |
+
admin_panel = gr.Column(visible=False)
|
| 814 |
+
with admin_panel:
|
| 815 |
+
gr.Markdown("## Admin Dashboard")
|
| 816 |
+
admin_summary = gr.Markdown("")
|
| 817 |
+
admin_limit_info = gr.Markdown("")
|
| 818 |
+
with gr.Row():
|
| 819 |
+
override_tb = gr.Textbox(
|
| 820 |
+
label="Per-user interaction limit override (blank to clear)"
|
| 821 |
+
)
|
| 822 |
+
set_override_btn = gr.Button("Set Limit Override", variant="secondary")
|
| 823 |
+
refresh_btn = gr.Button("Refresh Metrics", variant="secondary")
|
| 824 |
+
|
| 825 |
+
def _owner_is(profile: "gr.OAuthProfile | None") -> bool:
|
| 826 |
+
try:
|
| 827 |
+
owner = (os.getenv("OWNER_USER") or "").strip().lower()
|
| 828 |
+
if not owner:
|
| 829 |
+
return False
|
| 830 |
+
# Try common profile fields
|
| 831 |
+
username = None
|
| 832 |
+
for key in ("preferred_username", "username", "login", "name", "sub", "id"):
|
| 833 |
+
try:
|
| 834 |
+
if hasattr(profile, key):
|
| 835 |
+
username = getattr(profile, key)
|
| 836 |
+
elif isinstance(profile, dict) and key in profile:
|
| 837 |
+
username = profile[key]
|
| 838 |
+
if username:
|
| 839 |
+
break
|
| 840 |
+
except Exception:
|
| 841 |
+
pass
|
| 842 |
+
if not username:
|
| 843 |
+
return False
|
| 844 |
+
return str(username).lower() == owner
|
| 845 |
+
except Exception:
|
| 846 |
+
return False
|
| 847 |
+
|
| 848 |
+
def _metrics_paths():
|
| 849 |
+
return (
|
| 850 |
+
os.getenv("APP_METRICS_PATH", "/tmp/app_metrics.json"),
|
| 851 |
+
os.getenv("AGENT_CALL_LOG_PATH", "/tmp/agent_calls.json"),
|
| 852 |
+
)
|
| 853 |
+
|
| 854 |
+
def _read_json(path: str) -> dict:
|
| 855 |
+
try:
|
| 856 |
+
with open(path, encoding="utf-8") as f:
|
| 857 |
+
return json.load(f)
|
| 858 |
+
except Exception:
|
| 859 |
+
return {}
|
| 860 |
+
|
| 861 |
+
def _summarize_metrics_md() -> str:
|
| 862 |
+
mpath, _ = _metrics_paths()
|
| 863 |
+
data = _read_json(mpath)
|
| 864 |
+
if not data:
|
| 865 |
+
return "No metrics recorded yet."
|
| 866 |
+
# Summarize last 7 days
|
| 867 |
+
days = sorted(data.keys())[-7:]
|
| 868 |
+
total = blocked = errors = resp_chars = resp_count = 0
|
| 869 |
+
dist_counts: dict[str, int] = {}
|
| 870 |
+
for d in days:
|
| 871 |
+
day = data.get(d, {}) or {}
|
| 872 |
+
total += int(day.get("total_interactions", 0))
|
| 873 |
+
blocked += int(day.get("blocked_interactions", 0))
|
| 874 |
+
errors += int(day.get("agent_errors", 0))
|
| 875 |
+
resp_chars += int(day.get("response_chars_total", 0))
|
| 876 |
+
resp_count += int(day.get("response_count", 0))
|
| 877 |
+
dist = day.get("distortion_counts", {})
|
| 878 |
+
if isinstance(dist, dict):
|
| 879 |
+
for k, v in dist.items():
|
| 880 |
+
dist_counts[k] = int(dist_counts.get(k, 0)) + int(v)
|
| 881 |
+
avg_len = (resp_chars / resp_count) if resp_count else 0
|
| 882 |
+
top = sorted(dist_counts.items(), key=lambda x: x[1], reverse=True)[:5]
|
| 883 |
+
lines = [
|
| 884 |
+
"### Usage (last 7 days)",
|
| 885 |
+
f"- Total interactions: {total}",
|
| 886 |
+
f"- Blocked interactions: {blocked}",
|
| 887 |
+
f"- Agent errors: {errors}",
|
| 888 |
+
f"- Avg response length: {avg_len:.0f} chars",
|
| 889 |
+
"",
|
| 890 |
+
"### Top cognitive patterns",
|
| 891 |
+
]
|
| 892 |
+
if top:
|
| 893 |
+
for k, v in top:
|
| 894 |
+
lines.append(f"- {k}: {v}")
|
| 895 |
+
else:
|
| 896 |
+
lines.append("- None recorded")
|
| 897 |
+
return "\n".join(lines)
|
| 898 |
+
|
| 899 |
+
def _limit_info_md(settings: dict | None) -> str:
|
| 900 |
+
env_val = os.getenv("HF_AGENT_MAX_INTERACTIONS_PER_USER")
|
| 901 |
+
try:
|
| 902 |
+
env_limit = int(env_val) if env_val else 12
|
| 903 |
+
except Exception:
|
| 904 |
+
env_limit = 12
|
| 905 |
+
override = None
|
| 906 |
+
if isinstance(settings, dict):
|
| 907 |
+
override = settings.get("per_user_limit_override")
|
| 908 |
+
effective = (
|
| 909 |
+
int(override)
|
| 910 |
+
if isinstance(override, int | float) and int(override) > 0
|
| 911 |
+
else env_limit
|
| 912 |
+
)
|
| 913 |
+
return (
|
| 914 |
+
f"Per-user daily limit: {effective} (env: {env_limit}, override: "
|
| 915 |
+
f"{override if override else 'None'})"
|
| 916 |
+
)
|
| 917 |
+
|
| 918 |
+
def show_admin(profile: "gr.OAuthProfile | None"):
|
| 919 |
+
visible = _owner_is(profile)
|
| 920 |
+
return (
|
| 921 |
+
gr.update(visible=visible),
|
| 922 |
+
gr.update(visible=not visible),
|
| 923 |
+
_summarize_metrics_md() if visible else "",
|
| 924 |
+
_limit_info_md(admin_state.value if hasattr(admin_state, "value") else None)
|
| 925 |
+
if visible
|
| 926 |
+
else "",
|
| 927 |
+
)
|
| 928 |
+
|
| 929 |
+
def admin_set_limit(override_text: str, settings: dict | None):
|
| 930 |
+
# Only update runtime state; does not change env var
|
| 931 |
+
try:
|
| 932 |
+
if settings is None or not isinstance(settings, dict):
|
| 933 |
+
settings = {"per_user_limit_override": None}
|
| 934 |
+
override = None
|
| 935 |
+
if override_text and override_text.strip():
|
| 936 |
+
override = int(override_text.strip())
|
| 937 |
+
if override <= 0:
|
| 938 |
+
override = None
|
| 939 |
+
settings["per_user_limit_override"] = override
|
| 940 |
+
except Exception:
|
| 941 |
+
settings = {"per_user_limit_override": None}
|
| 942 |
+
return settings, _limit_info_md(settings)
|
| 943 |
+
|
| 944 |
+
def admin_refresh():
|
| 945 |
+
return _summarize_metrics_md()
|
| 946 |
+
|
| 947 |
+
# Wire admin interactions
|
| 948 |
+
set_override_btn.click(
|
| 949 |
+
admin_set_limit,
|
| 950 |
+
inputs=[override_tb, admin_state],
|
| 951 |
+
outputs=[admin_state, admin_limit_info],
|
| 952 |
+
)
|
| 953 |
+
refresh_btn.click(admin_refresh, outputs=[admin_summary])
|
| 954 |
+
|
| 955 |
+
# Gate admin panel visibility on load (OAuth)
|
| 956 |
+
try:
|
| 957 |
+
app.load(show_admin, outputs=[admin_panel, locked_panel, admin_summary, admin_limit_info])
|
| 958 |
+
except Exception:
|
| 959 |
+
# If OAuth not available, keep admin hidden
|
| 960 |
+
pass
|
| 961 |
+
|
| 962 |
+
# Enable queue for Spaces / ZeroGPU compatibility
|
| 963 |
+
return app.queue()
|
| 964 |
+
|
| 965 |
|
| 966 |
+
# Launch the app
|
| 967 |
+
if __name__ == "__main__":
|
| 968 |
+
app = create_app(language='en')
|
| 969 |
+
app.launch(share=False, show_error=True, show_api=False)
|
cbt_knowledge/__init__.py
ADDED
|
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
CBT Knowledge Base Module
|
| 3 |
+
|
| 4 |
+
Contains cognitive distortions, reframing strategies, and therapeutic principles.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from .distortions import COGNITIVE_DISTORTIONS, detect_distortions, get_distortion_by_code
|
| 8 |
+
from .reframing_tools import (
|
| 9 |
+
find_similar_situations,
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
__all__ = [
|
| 13 |
+
'COGNITIVE_DISTORTIONS',
|
| 14 |
+
'get_distortion_by_code',
|
| 15 |
+
'detect_distortions',
|
| 16 |
+
'find_similar_situations',
|
| 17 |
+
]
|
cbt_knowledge/distortions.py
ADDED
|
@@ -0,0 +1,328 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Cognitive Distortions Database
|
| 3 |
+
|
| 4 |
+
Complete taxonomy of cognitive distortions with definitions, examples, and reframing strategies.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
from typing import Any
|
| 8 |
+
|
| 9 |
+
COGNITIVE_DISTORTIONS = {
|
| 10 |
+
"mind_reading": {
|
| 11 |
+
"code": "MW",
|
| 12 |
+
"name": "Mind Reading",
|
| 13 |
+
"definition": "Assuming you know what others are thinking without evidence",
|
| 14 |
+
"examples": [
|
| 15 |
+
"They think I'm incompetent",
|
| 16 |
+
"Everyone can see how anxious I am",
|
| 17 |
+
"She must think I'm boring",
|
| 18 |
+
],
|
| 19 |
+
"reframing_strategies": [
|
| 20 |
+
"What evidence do I have for what they're thinking?",
|
| 21 |
+
"What else might they be thinking?",
|
| 22 |
+
"How could I find out what they actually think?",
|
| 23 |
+
],
|
| 24 |
+
"micro_actions": [
|
| 25 |
+
"Ask one person directly about their thoughts",
|
| 26 |
+
"Notice when predictions about others' thoughts were wrong",
|
| 27 |
+
],
|
| 28 |
+
},
|
| 29 |
+
"fortune_telling": {
|
| 30 |
+
"code": "FT",
|
| 31 |
+
"name": "Fortune Telling",
|
| 32 |
+
"definition": "Predicting the future negatively as if it's certain",
|
| 33 |
+
"examples": [
|
| 34 |
+
"I'll definitely fail the interview",
|
| 35 |
+
"Things will never get better",
|
| 36 |
+
"This relationship is doomed to fail",
|
| 37 |
+
],
|
| 38 |
+
"reframing_strategies": [
|
| 39 |
+
"What are other possible outcomes?",
|
| 40 |
+
"What's the most likely outcome based on past experience?",
|
| 41 |
+
"How certain am I really about this prediction?",
|
| 42 |
+
],
|
| 43 |
+
"micro_actions": [
|
| 44 |
+
"Write down 3 alternative outcomes",
|
| 45 |
+
"Track prediction accuracy for one week",
|
| 46 |
+
],
|
| 47 |
+
},
|
| 48 |
+
"catastrophizing": {
|
| 49 |
+
"code": "CT",
|
| 50 |
+
"name": "Catastrophizing",
|
| 51 |
+
"definition": "Blowing things out of proportion or imagining worst-case scenarios",
|
| 52 |
+
"examples": [
|
| 53 |
+
"This mistake will ruin my entire career",
|
| 54 |
+
"If I panic, I'll completely lose control",
|
| 55 |
+
"One bad grade means I'll never succeed",
|
| 56 |
+
],
|
| 57 |
+
"reframing_strategies": [
|
| 58 |
+
"What's the worst, best, and most likely outcome?",
|
| 59 |
+
"How have I coped with difficulties before?",
|
| 60 |
+
"Will this matter in 5 years?",
|
| 61 |
+
],
|
| 62 |
+
"micro_actions": [
|
| 63 |
+
"List past situations you successfully coped with",
|
| 64 |
+
"Rate actual vs predicted severity of one worry",
|
| 65 |
+
],
|
| 66 |
+
},
|
| 67 |
+
"all_or_nothing": {
|
| 68 |
+
"code": "AO",
|
| 69 |
+
"name": "All-or-Nothing Thinking",
|
| 70 |
+
"definition": "Seeing things in black-and-white categories with no middle ground",
|
| 71 |
+
"examples": [
|
| 72 |
+
"If I'm not perfect, I'm a failure",
|
| 73 |
+
"Either they love me or they hate me",
|
| 74 |
+
"If I can't do it all, I shouldn't do any of it",
|
| 75 |
+
],
|
| 76 |
+
"reframing_strategies": [
|
| 77 |
+
"What would the middle ground look like?",
|
| 78 |
+
"Can I rate this on a scale of 0-100?",
|
| 79 |
+
"What are the shades of gray here?",
|
| 80 |
+
],
|
| 81 |
+
"micro_actions": [
|
| 82 |
+
"Rate one achievement on a 0-100 scale",
|
| 83 |
+
"Find 3 partial successes in your day",
|
| 84 |
+
],
|
| 85 |
+
},
|
| 86 |
+
"mental_filter": {
|
| 87 |
+
"code": "MF",
|
| 88 |
+
"name": "Mental Filter",
|
| 89 |
+
"definition": "Focusing exclusively on negatives while filtering out positives",
|
| 90 |
+
"examples": [
|
| 91 |
+
"The whole presentation was terrible (despite mostly positive feedback)",
|
| 92 |
+
"My day was ruined (one bad thing among many good)",
|
| 93 |
+
"All I can think about is that one criticism",
|
| 94 |
+
],
|
| 95 |
+
"reframing_strategies": [
|
| 96 |
+
"What positive aspects am I overlooking?",
|
| 97 |
+
"What would a balanced view include?",
|
| 98 |
+
"What went well, even if small?",
|
| 99 |
+
],
|
| 100 |
+
"micro_actions": [
|
| 101 |
+
"Write 3 good things that happened today",
|
| 102 |
+
"Ask someone else what went well",
|
| 103 |
+
],
|
| 104 |
+
},
|
| 105 |
+
"personalization": {
|
| 106 |
+
"code": "PR",
|
| 107 |
+
"name": "Personalization",
|
| 108 |
+
"definition": "Blaming yourself for things outside your control",
|
| 109 |
+
"examples": [
|
| 110 |
+
"It's my fault they're in a bad mood",
|
| 111 |
+
"The team failed because of me",
|
| 112 |
+
"If I had done better, this wouldn't have happened",
|
| 113 |
+
],
|
| 114 |
+
"reframing_strategies": [
|
| 115 |
+
"What other factors contributed?",
|
| 116 |
+
"What was actually within my control?",
|
| 117 |
+
"Would I blame a friend this much?",
|
| 118 |
+
],
|
| 119 |
+
"micro_actions": [
|
| 120 |
+
"Create a responsibility pie chart",
|
| 121 |
+
"List factors outside your control",
|
| 122 |
+
],
|
| 123 |
+
},
|
| 124 |
+
"labeling": {
|
| 125 |
+
"code": "LB",
|
| 126 |
+
"name": "Labeling",
|
| 127 |
+
"definition": "Attaching global negative labels based on single instances",
|
| 128 |
+
"examples": ["I'm a loser", "They're completely selfish", "I'm such an idiot"],
|
| 129 |
+
"reframing_strategies": [
|
| 130 |
+
"What specific behavior am I reacting to?",
|
| 131 |
+
"Does one action define a whole person?",
|
| 132 |
+
"What evidence contradicts this label?",
|
| 133 |
+
],
|
| 134 |
+
"micro_actions": [
|
| 135 |
+
"Replace one label with specific behavior description",
|
| 136 |
+
"List 3 qualities that contradict the label",
|
| 137 |
+
],
|
| 138 |
+
},
|
| 139 |
+
"should_statements": {
|
| 140 |
+
"code": "SH",
|
| 141 |
+
"name": "Should Statements",
|
| 142 |
+
"definition": "Rigid rules about how things must be, leading to guilt or frustration",
|
| 143 |
+
"examples": [
|
| 144 |
+
"I should always be productive",
|
| 145 |
+
"They should understand without me explaining",
|
| 146 |
+
"I must never make mistakes",
|
| 147 |
+
],
|
| 148 |
+
"reframing_strategies": [
|
| 149 |
+
"What would I prefer instead of 'should'?",
|
| 150 |
+
"Where did this rule come from?",
|
| 151 |
+
"What happens if this 'should' isn't met?",
|
| 152 |
+
],
|
| 153 |
+
"micro_actions": [
|
| 154 |
+
"Replace 'should' with 'would like to'",
|
| 155 |
+
"Question one 'should' rule's origin",
|
| 156 |
+
],
|
| 157 |
+
},
|
| 158 |
+
"emotional_reasoning": {
|
| 159 |
+
"code": "ER",
|
| 160 |
+
"name": "Emotional Reasoning",
|
| 161 |
+
"definition": "Believing something is true because it feels true",
|
| 162 |
+
"examples": [
|
| 163 |
+
"I feel worthless, so I must be worthless",
|
| 164 |
+
"I feel anxious, so there must be danger",
|
| 165 |
+
"I feel guilty, so I must have done something wrong",
|
| 166 |
+
],
|
| 167 |
+
"reframing_strategies": [
|
| 168 |
+
"What are the facts separate from feelings?",
|
| 169 |
+
"Have my feelings been wrong before?",
|
| 170 |
+
"What would I tell a friend feeling this way?",
|
| 171 |
+
],
|
| 172 |
+
"micro_actions": [
|
| 173 |
+
"List facts vs feelings about one situation",
|
| 174 |
+
"Notice when feelings didn't match reality",
|
| 175 |
+
],
|
| 176 |
+
},
|
| 177 |
+
"discounting_positives": {
|
| 178 |
+
"code": "DP",
|
| 179 |
+
"name": "Discounting Positives",
|
| 180 |
+
"definition": "Dismissing positive experiences or achievements as not counting",
|
| 181 |
+
"examples": [
|
| 182 |
+
"They only complimented me to be nice",
|
| 183 |
+
"I only succeeded because it was easy",
|
| 184 |
+
"Anyone could have done what I did",
|
| 185 |
+
],
|
| 186 |
+
"reframing_strategies": [
|
| 187 |
+
"What would it mean to fully accept this positive?",
|
| 188 |
+
"How do I explain others' successes?",
|
| 189 |
+
"What effort did I actually put in?",
|
| 190 |
+
],
|
| 191 |
+
"micro_actions": [
|
| 192 |
+
"Accept one compliment at face value",
|
| 193 |
+
"Write down your role in one success",
|
| 194 |
+
],
|
| 195 |
+
},
|
| 196 |
+
"jumping_to_conclusions": {
|
| 197 |
+
"code": "JC",
|
| 198 |
+
"name": "Jumping to Conclusions",
|
| 199 |
+
"definition": "Making negative assumptions without sufficient evidence",
|
| 200 |
+
"examples": [
|
| 201 |
+
"They didn't text back, they must hate me",
|
| 202 |
+
"I made one mistake, I'll be fired",
|
| 203 |
+
"They looked away, they must be bored",
|
| 204 |
+
],
|
| 205 |
+
"reframing_strategies": [
|
| 206 |
+
"What evidence do I have for this conclusion?",
|
| 207 |
+
"What are other possible explanations?",
|
| 208 |
+
"Am I confusing possibility with probability?",
|
| 209 |
+
],
|
| 210 |
+
"micro_actions": [
|
| 211 |
+
"List 3 alternative explanations",
|
| 212 |
+
"Test one assumption by asking directly",
|
| 213 |
+
],
|
| 214 |
+
},
|
| 215 |
+
"magnification_minimization": {
|
| 216 |
+
"code": "MM",
|
| 217 |
+
"name": "Magnification/Minimization",
|
| 218 |
+
"definition": "Exaggerating negatives or minimizing positives disproportionately",
|
| 219 |
+
"examples": [
|
| 220 |
+
"This tiny mistake ruins everything",
|
| 221 |
+
"My achievements don't really count",
|
| 222 |
+
"Their small flaw makes them terrible",
|
| 223 |
+
],
|
| 224 |
+
"reframing_strategies": [
|
| 225 |
+
"How would an outside observer rate this?",
|
| 226 |
+
"Am I using a double standard?",
|
| 227 |
+
"What's the actual size/importance of this?",
|
| 228 |
+
],
|
| 229 |
+
"micro_actions": [
|
| 230 |
+
"Rate importance on 1-10 scale",
|
| 231 |
+
"Ask someone else for perspective",
|
| 232 |
+
],
|
| 233 |
+
},
|
| 234 |
+
"overgeneralization": {
|
| 235 |
+
"code": "OG",
|
| 236 |
+
"name": "Overgeneralization",
|
| 237 |
+
"definition": "Making broad conclusions from single events",
|
| 238 |
+
"examples": [
|
| 239 |
+
"I failed once, I always fail",
|
| 240 |
+
"One person rejected me, nobody likes me",
|
| 241 |
+
"This always happens to me",
|
| 242 |
+
],
|
| 243 |
+
"reframing_strategies": [
|
| 244 |
+
"Is this always true, or just sometimes?",
|
| 245 |
+
"What are the exceptions to this pattern?",
|
| 246 |
+
"Am I using words like 'always' or 'never' accurately?",
|
| 247 |
+
],
|
| 248 |
+
"micro_actions": [
|
| 249 |
+
"Find 3 exceptions to the pattern",
|
| 250 |
+
"Replace 'always/never' with 'sometimes'",
|
| 251 |
+
],
|
| 252 |
+
},
|
| 253 |
+
}
|
| 254 |
+
|
| 255 |
+
|
| 256 |
+
def get_distortion_by_code(code: str) -> dict[str, Any] | None:
|
| 257 |
+
"""Get distortion details by its code."""
|
| 258 |
+
for distortion in COGNITIVE_DISTORTIONS.values():
|
| 259 |
+
if distortion["code"] == code:
|
| 260 |
+
return distortion
|
| 261 |
+
return None
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def detect_distortions(thought_text: str) -> list:
|
| 265 |
+
"""
|
| 266 |
+
Analyze text for potential cognitive distortions.
|
| 267 |
+
Returns list of likely distortion codes with confidence scores.
|
| 268 |
+
"""
|
| 269 |
+
thought_lower = thought_text.lower()
|
| 270 |
+
detected = []
|
| 271 |
+
|
| 272 |
+
# All-or-nothing thinking
|
| 273 |
+
if any(
|
| 274 |
+
word in thought_lower
|
| 275 |
+
for word in [
|
| 276 |
+
"always",
|
| 277 |
+
"never",
|
| 278 |
+
"everyone",
|
| 279 |
+
"everything",
|
| 280 |
+
"none",
|
| 281 |
+
"nothing",
|
| 282 |
+
"completely",
|
| 283 |
+
"totally",
|
| 284 |
+
]
|
| 285 |
+
):
|
| 286 |
+
detected.append(("AO", 0.8))
|
| 287 |
+
|
| 288 |
+
# Fortune telling
|
| 289 |
+
future_words = ["will", "going to", "definitely", "won't", "can't", "surely"]
|
| 290 |
+
negative_outcomes = ["fail", "disaster", "terrible", "awful", "ruin", "mess up"]
|
| 291 |
+
if any(word in thought_lower for word in future_words) and any(
|
| 292 |
+
neg in thought_lower for neg in negative_outcomes
|
| 293 |
+
):
|
| 294 |
+
detected.append(("FT", 0.7))
|
| 295 |
+
|
| 296 |
+
# Should statements
|
| 297 |
+
if any(word in thought_lower for word in ["should", "must", "have to", "ought", "need to"]):
|
| 298 |
+
detected.append(("SH", 0.9))
|
| 299 |
+
|
| 300 |
+
# Labeling
|
| 301 |
+
if "i am" in thought_lower or "i'm" in thought_lower or "im " in thought_lower:
|
| 302 |
+
labels = ["stupid", "loser", "failure", "worthless", "idiot", "incompetent", "pathetic"]
|
| 303 |
+
if any(label in thought_lower for label in labels):
|
| 304 |
+
detected.append(("LB", 0.9))
|
| 305 |
+
|
| 306 |
+
# Mind reading
|
| 307 |
+
if any(
|
| 308 |
+
phrase in thought_lower
|
| 309 |
+
for phrase in ["they think", "he thinks", "she thinks", "everyone thinks", "people think"]
|
| 310 |
+
):
|
| 311 |
+
detected.append(("MW", 0.7))
|
| 312 |
+
|
| 313 |
+
# Catastrophizing
|
| 314 |
+
if any(
|
| 315 |
+
word in thought_lower
|
| 316 |
+
for word in ["disaster", "catastrophe", "ruin", "destroy", "horrible", "terrible", "worst"]
|
| 317 |
+
):
|
| 318 |
+
detected.append(("CT", 0.8))
|
| 319 |
+
|
| 320 |
+
# Emotional reasoning
|
| 321 |
+
if "i feel" in thought_lower and any(
|
| 322 |
+
word in thought_lower for word in ["so i must", "therefore", "that means"]
|
| 323 |
+
):
|
| 324 |
+
detected.append(("ER", 0.6))
|
| 325 |
+
|
| 326 |
+
# Sort by confidence and return
|
| 327 |
+
detected.sort(key=lambda x: x[1], reverse=True)
|
| 328 |
+
return detected
|
cbt_knowledge/reframing_tools.py
ADDED
|
@@ -0,0 +1,201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Reframing Tools and Strategies
|
| 3 |
+
|
| 4 |
+
Functions for generating reframes, finding similar situations, and suggesting micro-actions.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import random
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def find_similar_situations(distortion_code: str, num_situations: int = 3) -> list[dict]:
|
| 11 |
+
"""
|
| 12 |
+
Find similar situations that involve the same cognitive distortion.
|
| 13 |
+
|
| 14 |
+
Args:
|
| 15 |
+
distortion_code: The cognitive distortion code
|
| 16 |
+
num_situations: Number of situations to return
|
| 17 |
+
|
| 18 |
+
Returns:
|
| 19 |
+
List of similar situation examples with reframes
|
| 20 |
+
"""
|
| 21 |
+
# Database of example situations for each distortion
|
| 22 |
+
situation_database = {
|
| 23 |
+
"AO": [
|
| 24 |
+
{
|
| 25 |
+
"situation": "Missing one deadline at work",
|
| 26 |
+
"distorted": "I'm a complete failure at my job",
|
| 27 |
+
"reframed": (
|
| 28 |
+
"I missed one deadline, but I've met many others. I can learn from this and "
|
| 29 |
+
"improve my time management."
|
| 30 |
+
),
|
| 31 |
+
},
|
| 32 |
+
{
|
| 33 |
+
"situation": "Friend cancels plans",
|
| 34 |
+
"distorted": "They hate me and never want to see me",
|
| 35 |
+
"reframed": (
|
| 36 |
+
"They cancelled once, which could be for many reasons. Our friendship has many "
|
| 37 |
+
"positive moments."
|
| 38 |
+
),
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"situation": "Making a mistake in presentation",
|
| 42 |
+
"distorted": "The entire presentation was a disaster",
|
| 43 |
+
"reframed": (
|
| 44 |
+
"I made one error, but conveyed most information well. The audience was still "
|
| 45 |
+
"engaged."
|
| 46 |
+
),
|
| 47 |
+
},
|
| 48 |
+
],
|
| 49 |
+
"FT": [
|
| 50 |
+
{
|
| 51 |
+
"situation": "Job interview tomorrow",
|
| 52 |
+
"distorted": "I'll definitely mess up and embarrass myself",
|
| 53 |
+
"reframed": (
|
| 54 |
+
"Interviews can go many ways. I've prepared and have relevant experience to "
|
| 55 |
+
"share."
|
| 56 |
+
),
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"situation": "Starting a new project",
|
| 60 |
+
"distorted": "This will never work out",
|
| 61 |
+
"reframed": (
|
| 62 |
+
"New projects have challenges and possibilities. I can adapt as I learn more."
|
| 63 |
+
),
|
| 64 |
+
},
|
| 65 |
+
{
|
| 66 |
+
"situation": "Asking someone out",
|
| 67 |
+
"distorted": "They'll definitely reject me",
|
| 68 |
+
"reframed": (
|
| 69 |
+
"They might say yes or no. Either way, I'm brave for trying and will "
|
| 70 |
+
"learn from it."
|
| 71 |
+
),
|
| 72 |
+
},
|
| 73 |
+
],
|
| 74 |
+
"CT": [
|
| 75 |
+
{
|
| 76 |
+
"situation": "Making a typo in an email",
|
| 77 |
+
"distorted": "This will ruin my professional reputation forever",
|
| 78 |
+
"reframed": (
|
| 79 |
+
"It's a small error that most people understand. My work quality speaks louder "
|
| 80 |
+
"than one typo."
|
| 81 |
+
),
|
| 82 |
+
},
|
| 83 |
+
{
|
| 84 |
+
"situation": "Feeling anxious at a party",
|
| 85 |
+
"distorted": "I'll have a panic attack and everyone will think I'm crazy",
|
| 86 |
+
"reframed": (
|
| 87 |
+
"I feel anxious, which is uncomfortable but manageable. I can take breaks if "
|
| 88 |
+
"needed."
|
| 89 |
+
),
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"situation": "Child struggles in one subject",
|
| 93 |
+
"distorted": "They'll never succeed in life",
|
| 94 |
+
"reframed": (
|
| 95 |
+
"They're having difficulty in one area while doing well in others. We can get "
|
| 96 |
+
"help for this specific challenge."
|
| 97 |
+
),
|
| 98 |
+
},
|
| 99 |
+
],
|
| 100 |
+
"SH": [
|
| 101 |
+
{
|
| 102 |
+
"situation": "Taking a rest day",
|
| 103 |
+
"distorted": "I should always be productive",
|
| 104 |
+
"reframed": (
|
| 105 |
+
"I'd like to be productive, and rest is part of sustainable productivity."
|
| 106 |
+
),
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"situation": "Asking for help",
|
| 110 |
+
"distorted": "I should handle everything myself",
|
| 111 |
+
"reframed": "I prefer independence, and asking for help when needed is a strength.",
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"situation": "Making a parenting mistake",
|
| 115 |
+
"distorted": "I should be a perfect parent",
|
| 116 |
+
"reframed": (
|
| 117 |
+
"I aim to be a good parent, and making mistakes is part of learning and "
|
| 118 |
+
"growing."
|
| 119 |
+
),
|
| 120 |
+
},
|
| 121 |
+
],
|
| 122 |
+
"MW": [
|
| 123 |
+
{
|
| 124 |
+
"situation": "Boss seems quiet",
|
| 125 |
+
"distorted": "They think I'm doing a bad job",
|
| 126 |
+
"reframed": (
|
| 127 |
+
"They seem quiet, which could mean many things - "
|
| 128 |
+
"busy, tired, or thinking about something else."
|
| 129 |
+
),
|
| 130 |
+
},
|
| 131 |
+
{
|
| 132 |
+
"situation": "People laughing nearby",
|
| 133 |
+
"distorted": "They're laughing at me",
|
| 134 |
+
"reframed": (
|
| 135 |
+
"People are laughing, likely about their own conversation. I don't have "
|
| 136 |
+
"evidence it's about me."
|
| 137 |
+
),
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"situation": "Partner is distant",
|
| 141 |
+
"distorted": "They're losing interest in me",
|
| 142 |
+
"reframed": (
|
| 143 |
+
"They seem distant today. They might be stressed about something. I could ask "
|
| 144 |
+
"how they're doing."
|
| 145 |
+
),
|
| 146 |
+
},
|
| 147 |
+
],
|
| 148 |
+
"LB": [
|
| 149 |
+
{
|
| 150 |
+
"situation": "Failed at a task",
|
| 151 |
+
"distorted": "I'm such a loser",
|
| 152 |
+
"reframed": (
|
| 153 |
+
"I struggled with this specific task. It doesn't define my worth as a person."
|
| 154 |
+
),
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"situation": "Someone was rude",
|
| 158 |
+
"distorted": "They're a horrible person",
|
| 159 |
+
"reframed": (
|
| 160 |
+
"They acted rudely in this moment. People are complex and have good and bad "
|
| 161 |
+
"moments."
|
| 162 |
+
),
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"situation": "Made a social mistake",
|
| 166 |
+
"distorted": "I'm so awkward and weird",
|
| 167 |
+
"reframed": (
|
| 168 |
+
"I had an awkward moment, which happens to everyone. It's one small part of "
|
| 169 |
+
"who I am."
|
| 170 |
+
),
|
| 171 |
+
},
|
| 172 |
+
],
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
# Get situations for this distortion, or return generic ones
|
| 176 |
+
situations = situation_database.get(distortion_code, [])
|
| 177 |
+
|
| 178 |
+
if not situations:
|
| 179 |
+
# Return generic situations if specific ones not found
|
| 180 |
+
situations = [
|
| 181 |
+
{
|
| 182 |
+
"situation": "Challenging work situation",
|
| 183 |
+
"distorted": "Everything is going wrong",
|
| 184 |
+
"reframed": "This situation has both challenges and manageable aspects.",
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
"situation": "Social interaction difficulty",
|
| 188 |
+
"distorted": "I always mess up socially",
|
| 189 |
+
"reframed": "I had one difficult interaction among many neutral or positive ones.",
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"situation": "Personal setback",
|
| 193 |
+
"distorted": "I'll never recover from this",
|
| 194 |
+
"reframed": "This is a setback I can learn from and gradually move past.",
|
| 195 |
+
},
|
| 196 |
+
]
|
| 197 |
+
|
| 198 |
+
# Return requested number of situations (random selection if more available)
|
| 199 |
+
if len(situations) > num_situations:
|
| 200 |
+
return random.sample(situations, num_situations)
|
| 201 |
+
return situations
|
locales/en.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"app_title": "🧠 re-frame: Cognitive Reframing Assistant",
|
| 3 |
+
"app_description": "Using CBT principles to help you find balanced perspectives",
|
| 4 |
+
"welcome": {
|
| 5 |
+
"title": "Welcome",
|
| 6 |
+
"subtitle": "Find a kinder perspective",
|
| 7 |
+
"description": "Using ideas from Cognitive Behavioral Therapy (CBT), we help you notice thinking patterns and explore gentler, more balanced perspectives. You choose what fits.",
|
| 8 |
+
"how_it_works": "How it works",
|
| 9 |
+
"step1": "Share your thoughts",
|
| 10 |
+
"step1_desc": "Tell us what's on your mind - short is fine",
|
| 11 |
+
"step2": "Notice patterns",
|
| 12 |
+
"step2_desc": "We'll help identify thinking traps",
|
| 13 |
+
"step3": "Find balance",
|
| 14 |
+
"step3_desc": "Explore alternative perspectives that feel true",
|
| 15 |
+
"start_chat": "Start Chat",
|
| 16 |
+
"disclaimer": "Important: This is a self-help tool using CBT ideas. It is not psychotherapy, medical advice, or a crisis service.",
|
| 17 |
+
"privacy": "Privacy: We don't store any data beyond your current session."
|
| 18 |
+
},
|
| 19 |
+
"chat": {
|
| 20 |
+
"title": "Chat",
|
| 21 |
+
"placeholder": "Share what's on your mind...",
|
| 22 |
+
"send": "Send",
|
| 23 |
+
"clear": "New Session",
|
| 24 |
+
"thinking": "Thinking...",
|
| 25 |
+
"distortions_found": "Thinking patterns identified:",
|
| 26 |
+
"reframe_suggestion": "Alternative perspective:",
|
| 27 |
+
"similar_situations": "Similar situations others have reframed:",
|
| 28 |
+
"try_this": "You might try:",
|
| 29 |
+
"no_distortions": "Let me understand better. Can you tell me more about the specific thoughts going through your mind?"
|
| 30 |
+
},
|
| 31 |
+
"learn": {
|
| 32 |
+
"title": "Learn",
|
| 33 |
+
"select_distortion": "Select a thinking pattern to explore",
|
| 34 |
+
"definition": "What it is",
|
| 35 |
+
"examples": "Common Examples",
|
| 36 |
+
"strategies": "Questions to Consider",
|
| 37 |
+
"actions": "Small Steps to Try"
|
| 38 |
+
}
|
| 39 |
+
}
|
locales/es.json
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"app_title": "🧠 re-frame: Asistente de Reencuadre Cognitivo",
|
| 3 |
+
"app_description": "Usando principios de TCC para ayudarte a encontrar perspectivas equilibradas",
|
| 4 |
+
"welcome": {
|
| 5 |
+
"title": "Bienvenido",
|
| 6 |
+
"subtitle": "Encuentra una perspectiva más amable",
|
| 7 |
+
"description": "Usando ideas de la Terapia Cognitivo-Conductual (TCC), te ayudamos a notar patrones de pensamiento y explorar perspectivas más gentiles y equilibradas. Tú eliges lo que te sirva.",
|
| 8 |
+
"how_it_works": "Cómo funciona",
|
| 9 |
+
"step1": "Comparte tus pensamientos",
|
| 10 |
+
"step1_desc": "Cuéntanos qué piensas - breve también está bien",
|
| 11 |
+
"step2": "Detecta patrones",
|
| 12 |
+
"step2_desc": "Te ayudamos a identificar trampas mentales",
|
| 13 |
+
"step3": "Encuentra balance",
|
| 14 |
+
"step3_desc": "Explora perspectivas alternativas que se sientan verdaderas",
|
| 15 |
+
"start_chat": "Iniciar Chat",
|
| 16 |
+
"disclaimer": "Importante: Esta es una herramienta de autoayuda que usa ideas de TCC. No es psicoterapia, consejo médico, ni un servicio de crisis.",
|
| 17 |
+
"privacy": "Privacidad: No almacenamos datos más allá de tu sesión actual."
|
| 18 |
+
},
|
| 19 |
+
"chat": {
|
| 20 |
+
"title": "Chat",
|
| 21 |
+
"placeholder": "Comparte lo que piensas...",
|
| 22 |
+
"send": "Enviar",
|
| 23 |
+
"clear": "Nueva Sesión",
|
| 24 |
+
"thinking": "Pensando...",
|
| 25 |
+
"distortions_found": "Patrones de pensamiento identificados:",
|
| 26 |
+
"reframe_suggestion": "Perspectiva alternativa:",
|
| 27 |
+
"similar_situations": "Situaciones similares que otros han reencuadrado:",
|
| 28 |
+
"try_this": "Podrías intentar:",
|
| 29 |
+
"no_distortions": "Déjame entender mejor. ¿Puedes contarme más sobre los pensamientos específicos que pasan por tu mente?"
|
| 30 |
+
},
|
| 31 |
+
"learn": {
|
| 32 |
+
"title": "Aprender",
|
| 33 |
+
"select_distortion": "Selecciona un patrón de pensamiento para explorar",
|
| 34 |
+
"definition": "Qué es",
|
| 35 |
+
"examples": "Ejemplos Comunes",
|
| 36 |
+
"strategies": "Preguntas a Considerar",
|
| 37 |
+
"actions": "Pequeños Pasos a Intentar"
|
| 38 |
+
}
|
| 39 |
+
}
|
mypy.ini
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[mypy]
|
| 2 |
+
python_version = 3.10
|
| 3 |
+
ignore_missing_imports = True
|
| 4 |
+
warn_unused_ignores = True
|
| 5 |
+
warn_redundant_casts = True
|
| 6 |
+
warn_unreachable = True
|
| 7 |
+
strict_optional = True
|
| 8 |
+
check_untyped_defs = True
|
| 9 |
+
exclude = ^reframe\.egg-info/|^build/|^dist/|^\.venv/
|
| 10 |
+
mypy_path = .
|
| 11 |
+
namespace_packages = True
|
| 12 |
+
explicit_package_bases = True
|
| 13 |
+
|
| 14 |
+
# Keep typing strictness reasonable for this project
|
| 15 |
+
disallow_untyped_defs = False
|
| 16 |
+
disallow_any_generics = False
|
| 17 |
+
|
| 18 |
+
# Don’t type-check tests strictly
|
| 19 |
+
[mypy-tests.*]
|
| 20 |
+
ignore_errors = True
|
requirements-dev.txt
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pytest>=7.0
|
| 2 |
+
ruff>=0.5.0
|
| 3 |
+
mypy>=1.8.0
|
| 4 |
+
-e .
|
requirements.txt
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Core dependencies for re-frame Gradio app (pinned for Spaces reproducibility)
|
| 2 |
+
gradio==5.47.2
|
| 3 |
+
|
| 4 |
+
# Use a recent hub client for streaming + MCP client support
|
| 5 |
+
huggingface-hub==0.35.1
|
| 6 |
+
|
| 7 |
+
# smolagents removed: not used in this Space
|
| 8 |
+
transformers==4.56.2
|
| 9 |
+
|
| 10 |
+
# For using HuggingFace inference API
|
| 11 |
+
accelerate==1.10.1
|
| 12 |
+
sentencepiece==0.2.1
|
| 13 |
+
protobuf==6.32.1
|
| 14 |
+
|
| 15 |
+
# Optional: For local model support
|
| 16 |
+
# If you plan to run local models inside the Space, uncomment torch.
|
| 17 |
+
# torch==2.8.0
|
| 18 |
+
|
| 19 |
+
# Utility libraries
|
| 20 |
+
python-dotenv==1.1.1
|
| 21 |
+
typing-extensions==4.15.0
|
ruff.toml
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
line-length = 100
|
| 2 |
+
target-version = "py310"
|
| 3 |
+
|
| 4 |
+
[lint]
|
| 5 |
+
select = ["E", "F", "I", "B", "UP"]
|
| 6 |
+
ignore = []
|
| 7 |
+
|
| 8 |
+
[format]
|
| 9 |
+
quote-style = "preserve"
|
| 10 |
+
indent-style = "space"
|
| 11 |
+
line-ending = "lf"
|
| 12 |
+
|
setup.cfg
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[metadata]
|
| 2 |
+
name = reframe
|
| 3 |
+
version = 0.0.0
|
| 4 |
+
description = Cognitive Reframing Tool
|
| 5 |
+
long_description = file: README.md
|
| 6 |
+
long_description_content_type = text/markdown
|
| 7 |
+
license = MIT
|
| 8 |
+
|
| 9 |
+
[options]
|
| 10 |
+
packages = find:
|
| 11 |
+
include_package_data = True
|
| 12 |
+
python_requires = >=3.10
|
| 13 |
+
|
| 14 |
+
[options.packages.find]
|
| 15 |
+
where = .
|
| 16 |
+
|
setup.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from setuptools import setup
|
| 2 |
+
|
| 3 |
+
if __name__ == "__main__":
|
| 4 |
+
setup()
|
tests/test_agent.py
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
import pytest
|
| 4 |
+
|
| 5 |
+
from agents import CBTAgent
|
| 6 |
+
|
| 7 |
+
requires_hf_token = pytest.mark.skipif(
|
| 8 |
+
not (os.getenv("HF_TOKEN") or os.getenv("HUGGINGFACEHUB_API_TOKEN")),
|
| 9 |
+
reason="HF Inference token not configured",
|
| 10 |
+
)
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
@requires_hf_token
|
| 14 |
+
def test_agent_analyze_thought_with_hf():
|
| 15 |
+
agent = CBTAgent()
|
| 16 |
+
analysis = agent.analyze_thought("I will fail the interview")
|
| 17 |
+
assert isinstance(analysis, dict)
|
| 18 |
+
codes = [c for c, _ in analysis.get("distortions", [])]
|
| 19 |
+
assert "FT" in codes
|
| 20 |
+
assert isinstance(analysis.get("reframe", ""), str) and analysis["reframe"]
|
| 21 |
+
assert isinstance(analysis.get("similar_situations", []), list)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
@requires_hf_token
|
| 25 |
+
def test_agent_generate_response_hf():
|
| 26 |
+
agent = CBTAgent()
|
| 27 |
+
resp = agent.generate_response("I will definitely mess up the interview")
|
| 28 |
+
assert isinstance(resp, str) and len(resp) > 0
|
tests/test_distortions.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cbt_knowledge import detect_distortions
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def codes(text: str):
|
| 5 |
+
return {c for c, _ in detect_distortions(text)}
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def test_detect_all_or_nothing():
|
| 9 |
+
assert "AO" in codes("I always fail; nothing works")
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def test_detect_fortune_telling():
|
| 13 |
+
assert "FT" in codes("I will fail the interview")
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def test_detect_should_statements():
|
| 17 |
+
assert "SH" in codes("I should always be productive")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def test_detect_labeling():
|
| 21 |
+
assert "LB" in codes("I'm such an idiot")
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def test_no_false_positive_on_neutral_text():
|
| 25 |
+
assert detect_distortions("I went to the park and had a nice day.") == []
|
tests/test_reframing_tools.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from cbt_knowledge import find_similar_situations
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def test_find_similar_situations_returns_shape():
|
| 5 |
+
items = find_similar_situations("AO", num_situations=2)
|
| 6 |
+
assert isinstance(items, list) and len(items) == 2
|
| 7 |
+
for it in items:
|
| 8 |
+
assert set(["situation", "distorted", "reframed"]).issubset(it.keys())
|
ui_components/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
UI Components for re-frame Gradio app
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from .landing import create_landing_tab
|
| 6 |
+
from .learn import create_learn_tab
|
| 7 |
+
|
| 8 |
+
__all__ = ['create_landing_tab', 'create_learn_tab']
|
ui_components/landing.py
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Landing page component for re-frame
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import gradio as gr
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def create_landing_tab(translations):
|
| 9 |
+
"""Create the landing/welcome tab content"""
|
| 10 |
+
|
| 11 |
+
with gr.Column():
|
| 12 |
+
# Hero section
|
| 13 |
+
gr.Markdown(f"## {translations['subtitle']}")
|
| 14 |
+
gr.Markdown(translations['description'])
|
| 15 |
+
|
| 16 |
+
# How it works section
|
| 17 |
+
gr.Markdown(f"### {translations['how_it_works']}")
|
| 18 |
+
|
| 19 |
+
with gr.Row():
|
| 20 |
+
with gr.Column():
|
| 21 |
+
gr.Markdown(f"**1. {translations['step1']}**")
|
| 22 |
+
gr.Markdown(translations['step1_desc'])
|
| 23 |
+
|
| 24 |
+
with gr.Column():
|
| 25 |
+
gr.Markdown(f"**2. {translations['step2']}**")
|
| 26 |
+
gr.Markdown(translations['step2_desc'])
|
| 27 |
+
|
| 28 |
+
with gr.Column():
|
| 29 |
+
gr.Markdown(f"**3. {translations['step3']}**")
|
| 30 |
+
gr.Markdown(translations['step3_desc'])
|
| 31 |
+
|
| 32 |
+
# Disclaimer
|
| 33 |
+
with gr.Row():
|
| 34 |
+
gr.Markdown(f"ℹ️ **{translations['disclaimer']}**")
|
| 35 |
+
|
| 36 |
+
with gr.Row():
|
| 37 |
+
gr.Markdown(f"🔒 **{translations['privacy']}**")
|
| 38 |
+
|
| 39 |
+
# Call to action
|
| 40 |
+
gr.Markdown("---")
|
| 41 |
+
gr.Markdown("### Ready to start?")
|
| 42 |
+
gr.Markdown("Navigate to the **Chat** tab above to begin your session.")
|
ui_components/learn.py
ADDED
|
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Learn tab component for exploring cognitive distortions
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import gradio as gr
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
def create_learn_tab(translations, distortions_db):
|
| 9 |
+
"""Create the learn/education tab"""
|
| 10 |
+
|
| 11 |
+
with gr.Column():
|
| 12 |
+
gr.Markdown("### Explore Cognitive Distortions")
|
| 13 |
+
gr.Markdown("Learn about common thinking patterns and how to work with them.")
|
| 14 |
+
|
| 15 |
+
# Dropdown to select distortion
|
| 16 |
+
distortion_names = [info['name'] for info in distortions_db.values()]
|
| 17 |
+
selected = gr.Dropdown(
|
| 18 |
+
choices=distortion_names,
|
| 19 |
+
label=translations['select_distortion'],
|
| 20 |
+
value=distortion_names[0] if distortion_names else None,
|
| 21 |
+
)
|
| 22 |
+
|
| 23 |
+
# Display area for distortion information
|
| 24 |
+
with gr.Column():
|
| 25 |
+
definition_display = gr.Markdown()
|
| 26 |
+
examples_display = gr.Markdown()
|
| 27 |
+
strategies_display = gr.Markdown()
|
| 28 |
+
actions_display = gr.Markdown()
|
| 29 |
+
|
| 30 |
+
def show_distortion_info(distortion_name):
|
| 31 |
+
"""Display information about selected distortion"""
|
| 32 |
+
# Find the distortion by name
|
| 33 |
+
for _key, info in distortions_db.items():
|
| 34 |
+
if info['name'] == distortion_name:
|
| 35 |
+
definition = f"### {translations['definition']}\n{info['definition']}"
|
| 36 |
+
|
| 37 |
+
examples = f"### {translations['examples']}\n"
|
| 38 |
+
for example in info['examples']:
|
| 39 |
+
examples += f"- \"{example}\"\n"
|
| 40 |
+
|
| 41 |
+
strategies = f"### {translations['strategies']}\n"
|
| 42 |
+
for strategy in info['reframing_strategies']:
|
| 43 |
+
strategies += f"- {strategy}\n"
|
| 44 |
+
|
| 45 |
+
actions = f"### {translations['actions']}\n"
|
| 46 |
+
for action in info['micro_actions']:
|
| 47 |
+
actions += f"- {action}\n"
|
| 48 |
+
|
| 49 |
+
return definition, examples, strategies, actions
|
| 50 |
+
|
| 51 |
+
return "", "", "", ""
|
| 52 |
+
|
| 53 |
+
# Connect dropdown to display
|
| 54 |
+
selected.change(
|
| 55 |
+
show_distortion_info,
|
| 56 |
+
inputs=[selected],
|
| 57 |
+
outputs=[definition_display, examples_display, strategies_display, actions_display],
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
# Initialize with first distortion
|
| 61 |
+
if distortion_names:
|
| 62 |
+
selected.value = distortion_names[0]
|