Lyonel Tanganco commited on
Commit ·
d6c18ca
1
Parent(s): ab14d10
cleanup
Browse files- app.py +145 -34
- data/system_prompt.md +122 -17
- requirements.txt +10 -7
- src/chat.py +218 -95
- src/config.py +1 -5
- src/utils/profile.py +4 -1
- src/utils/resources.py +22 -19
app.py
CHANGED
|
@@ -1,5 +1,16 @@
|
|
| 1 |
"""
|
| 2 |
Gradio Web Interface for Harbor Treatment Navigation Chatbot
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
"""
|
| 4 |
|
| 5 |
import os
|
|
@@ -40,6 +51,20 @@ CSS = """
|
|
| 40 |
font-style: italic;
|
| 41 |
}
|
| 42 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 43 |
/* ── Cards ── */
|
| 44 |
.harbor-card {
|
| 45 |
background: #ffffff;
|
|
@@ -171,6 +196,23 @@ CSS = """
|
|
| 171 |
padding: 0.4rem 0.9rem !important;
|
| 172 |
}
|
| 173 |
.chat-back-btn button:hover { background: #f0fafa !important; }
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
"""
|
| 175 |
|
| 176 |
# ── Theme ─────────────────────────────────────────────────────────────────────
|
|
@@ -229,29 +271,40 @@ ZIPCODE_RE = re.compile(r"^\d{5}$")
|
|
| 229 |
|
| 230 |
|
| 231 |
def is_valid_zip(zipcode: str) -> bool:
|
|
|
|
| 232 |
return bool(ZIPCODE_RE.match(zipcode.strip()))
|
| 233 |
|
| 234 |
|
| 235 |
def _load_resources_once():
|
|
|
|
| 236 |
if not hasattr(_load_resources_once, "_cache"):
|
| 237 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 238 |
paths = [
|
| 239 |
os.path.join(current_dir, "references", "knowledge", "ma_resources.csv"),
|
| 240 |
-
os.path.join(current_dir, "references", "knowledge", "boston_resources.csv"),
|
| 241 |
]
|
| 242 |
_load_resources_once._cache = load_resources(paths)
|
| 243 |
return _load_resources_once._cache
|
| 244 |
|
| 245 |
|
| 246 |
def get_recommendations(zipcode: str) -> list[dict]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 247 |
profile = create_empty_profile()
|
| 248 |
profile["logistics"]["zipcode"] = zipcode.strip()
|
|
|
|
| 249 |
resources = _load_resources_once()
|
| 250 |
filtered = filter_resources(resources, profile)
|
| 251 |
-
|
|
|
|
| 252 |
|
| 253 |
|
| 254 |
def format_recommendations(zipcode: str, results: list[dict]) -> str:
|
|
|
|
| 255 |
if not results:
|
| 256 |
return (
|
| 257 |
f"<div class='harbor-results'>"
|
|
@@ -265,10 +318,12 @@ def format_recommendations(zipcode: str, results: list[dict]) -> str:
|
|
| 265 |
items_html = ""
|
| 266 |
for r in results:
|
| 267 |
name = r.get("name", "Unknown Facility")
|
|
|
|
| 268 |
addr_parts = [r.get("address", ""), r.get("city", ""),
|
| 269 |
r.get("state", ""), r.get("zip", "")]
|
| 270 |
address = ", ".join(p.strip() for p in addr_parts if p.strip())
|
| 271 |
phone = r.get("phone", "").strip()
|
|
|
|
| 272 |
focus = r.get("primary_focus", "").strip()
|
| 273 |
type_label = ", ".join(
|
| 274 |
v.strip().replace("_", " ").title() for v in focus.split("|")
|
|
@@ -300,13 +355,30 @@ def format_recommendations(zipcode: str, results: list[dict]) -> str:
|
|
| 300 |
# ── App ───────────────────────────────────────────────────────────────────────
|
| 301 |
|
| 302 |
def create_chatbot():
|
| 303 |
-
|
| 304 |
-
|
| 305 |
-
|
| 306 |
-
def chat(message, history):
|
| 307 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
|
| 309 |
def handle_zip_submit(zipcode: str):
|
|
|
|
| 310 |
zipcode = zipcode.strip()
|
| 311 |
if not is_valid_zip(zipcode):
|
| 312 |
return gr.update(
|
|
@@ -314,31 +386,37 @@ def create_chatbot():
|
|
| 314 |
visible=True,
|
| 315 |
)
|
| 316 |
results = get_recommendations(zipcode)
|
|
|
|
|
|
|
| 317 |
if results:
|
| 318 |
print(f"[Harbor] Zip lookup ({zipcode}) — {len(results)} recommendation(s):")
|
| 319 |
for i, r in enumerate(results, 1):
|
| 320 |
print(f" {i}. {r.get('name', 'Unknown')} — {r.get('city', '')}, {r.get('state', '')} {r.get('zip', '')}")
|
| 321 |
else:
|
| 322 |
print(f"[Harbor] Zip lookup ({zipcode}) — no results found.")
|
| 323 |
-
return gr.update(value=format_recommendations(zipcode, results), visible=True)
|
| 324 |
|
| 325 |
-
|
| 326 |
-
return gr.update(visible=False), gr.update(visible=True)
|
| 327 |
|
| 328 |
def show_landing():
|
| 329 |
return gr.update(visible=True), gr.update(visible=False)
|
| 330 |
|
| 331 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 332 |
|
|
|
|
| 333 |
with gr.Column(visible=True) as landing_page:
|
| 334 |
with gr.Column(elem_classes="harbor-wrap"):
|
| 335 |
gr.HTML(HEADER_MD)
|
|
|
|
| 336 |
|
|
|
|
| 337 |
with gr.Group(elem_classes="harbor-card harbor-card-featured"):
|
| 338 |
-
gr.HTML("<div class='harbor-card-title'>
|
| 339 |
gr.HTML(
|
| 340 |
-
"<p>
|
| 341 |
-
"programs right away — no account needed.</p>"
|
| 342 |
)
|
| 343 |
with gr.Row():
|
| 344 |
zip_input = gr.Textbox(
|
|
@@ -354,10 +432,14 @@ def create_chatbot():
|
|
| 354 |
scale=1,
|
| 355 |
elem_classes="harbor-zip-btn",
|
| 356 |
)
|
|
|
|
|
|
|
| 357 |
results_html = gr.HTML(visible=False, elem_id="zip-results")
|
| 358 |
|
|
|
|
| 359 |
gr.HTML(CRISIS_CALLOUT_HTML)
|
| 360 |
|
|
|
|
| 361 |
with gr.Group(elem_classes="harbor-card"):
|
| 362 |
gr.HTML(CHATBOT_CARD_MD)
|
| 363 |
start_chat_btn = gr.Button(
|
|
@@ -369,7 +451,13 @@ def create_chatbot():
|
|
| 369 |
|
| 370 |
gr.HTML(FOOTER_MD)
|
| 371 |
|
|
|
|
| 372 |
with gr.Column(visible=False) as chat_page:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 373 |
with gr.Column(elem_classes="chat-header"):
|
| 374 |
back_btn = gr.Button(
|
| 375 |
"← Back to Home",
|
|
@@ -377,33 +465,56 @@ def create_chatbot():
|
|
| 377 |
variant="secondary",
|
| 378 |
elem_classes="chat-back-btn",
|
| 379 |
)
|
| 380 |
-
gr.
|
| 381 |
-
|
| 382 |
-
|
| 383 |
-
description=(
|
| 384 |
-
"Tell me a little about your situation and I'll help you find "
|
| 385 |
-
"treatment options that match your needs. Everything is confidential."
|
| 386 |
-
),
|
| 387 |
-
examples=[
|
| 388 |
-
"What treatment options are available near me?",
|
| 389 |
-
"I'm looking for outpatient help with alcohol use.",
|
| 390 |
-
"I need support but I don't have insurance.",
|
| 391 |
-
"How do I know which type of program is right for me?",
|
| 392 |
-
],
|
| 393 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 394 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 395 |
zip_btn.click(handle_zip_submit, inputs=zip_input, outputs=results_html)
|
| 396 |
zip_input.submit(handle_zip_submit, inputs=zip_input, outputs=results_html)
|
| 397 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 398 |
back_btn.click(show_landing, outputs=[landing_page, chat_page])
|
| 399 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 400 |
return demo
|
| 401 |
|
| 402 |
|
| 403 |
if __name__ == "__main__":
|
| 404 |
-
|
| 405 |
-
|
| 406 |
-
demo.launch()
|
| 407 |
-
except Exception as e:
|
| 408 |
-
import traceback
|
| 409 |
-
traceback.print_exc()
|
|
|
|
| 1 |
"""
|
| 2 |
Gradio Web Interface for Harbor Treatment Navigation Chatbot
|
| 3 |
+
|
| 4 |
+
Landing page offers three paths:
|
| 5 |
+
1. Quick Recommendations — enter a zip code, get nearby options inline
|
| 6 |
+
2. Talk to a Human — compact crisis callout with phone number
|
| 7 |
+
3. Get Personalized Advice — leads to the AI chatbot
|
| 8 |
+
|
| 9 |
+
Run locally:
|
| 10 |
+
python app.py
|
| 11 |
+
|
| 12 |
+
Access in browser:
|
| 13 |
+
http://localhost:7860
|
| 14 |
"""
|
| 15 |
|
| 16 |
import os
|
|
|
|
| 51 |
font-style: italic;
|
| 52 |
}
|
| 53 |
|
| 54 |
+
/* ── Location Banner ── */
|
| 55 |
+
.harbor-banner {
|
| 56 |
+
text-align: center;
|
| 57 |
+
font-size: 0.92rem;
|
| 58 |
+
font-weight: 600;
|
| 59 |
+
color: #0d6e6e;
|
| 60 |
+
background: #e6f7f7;
|
| 61 |
+
border: 1px solid #c8e6e6;
|
| 62 |
+
border-radius: 10px;
|
| 63 |
+
padding: 0.55rem 1rem;
|
| 64 |
+
margin-bottom: 1.5rem;
|
| 65 |
+
letter-spacing: 0.1px;
|
| 66 |
+
}
|
| 67 |
+
|
| 68 |
/* ── Cards ── */
|
| 69 |
.harbor-card {
|
| 70 |
background: #ffffff;
|
|
|
|
| 196 |
padding: 0.4rem 0.9rem !important;
|
| 197 |
}
|
| 198 |
.chat-back-btn button:hover { background: #f0fafa !important; }
|
| 199 |
+
|
| 200 |
+
/* ── Chat input area ── */
|
| 201 |
+
.gradio-chatinterface > div:last-child,
|
| 202 |
+
footer,
|
| 203 |
+
.chatbot-input-row,
|
| 204 |
+
[data-testid="chatbot"] ~ div {
|
| 205 |
+
padding: 0 1.25rem 1.25rem !important;
|
| 206 |
+
}
|
| 207 |
+
.gradio-chatinterface .input-row,
|
| 208 |
+
.gradio-chatinterface form {
|
| 209 |
+
margin: 0.75rem 2rem 1.5rem !important;
|
| 210 |
+
border: 1.5px solid #c8e6e6 !important;
|
| 211 |
+
border-radius: 14px !important;
|
| 212 |
+
padding: 0.5rem !important;
|
| 213 |
+
box-shadow: 0 2px 10px rgba(13, 110, 110, 0.07) !important;
|
| 214 |
+
background: #ffffff !important;
|
| 215 |
+
}
|
| 216 |
"""
|
| 217 |
|
| 218 |
# ── Theme ─────────────────────────────────────────────────────────────────────
|
|
|
|
| 271 |
|
| 272 |
|
| 273 |
def is_valid_zip(zipcode: str) -> bool:
|
| 274 |
+
"""Return True if zipcode is exactly 5 digits."""
|
| 275 |
return bool(ZIPCODE_RE.match(zipcode.strip()))
|
| 276 |
|
| 277 |
|
| 278 |
def _load_resources_once():
|
| 279 |
+
"""Load resource CSVs once and cache."""
|
| 280 |
if not hasattr(_load_resources_once, "_cache"):
|
| 281 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 282 |
paths = [
|
| 283 |
os.path.join(current_dir, "references", "knowledge", "ma_resources.csv"),
|
| 284 |
+
os.path.join(current_dir, "references", "knowledge", "resources", "boston_resources.csv"),
|
| 285 |
]
|
| 286 |
_load_resources_once._cache = load_resources(paths)
|
| 287 |
return _load_resources_once._cache
|
| 288 |
|
| 289 |
|
| 290 |
def get_recommendations(zipcode: str) -> list[dict]:
|
| 291 |
+
"""
|
| 292 |
+
Return a list of treatment recommendations for the given zip code.
|
| 293 |
+
|
| 294 |
+
Uses the same filter/score logic as the chatbot, but with a minimal
|
| 295 |
+
profile containing only the zipcode.
|
| 296 |
+
"""
|
| 297 |
profile = create_empty_profile()
|
| 298 |
profile["logistics"]["zipcode"] = zipcode.strip()
|
| 299 |
+
|
| 300 |
resources = _load_resources_once()
|
| 301 |
filtered = filter_resources(resources, profile)
|
| 302 |
+
top = score_resources(filtered, profile)
|
| 303 |
+
return top
|
| 304 |
|
| 305 |
|
| 306 |
def format_recommendations(zipcode: str, results: list[dict]) -> str:
|
| 307 |
+
"""Render recommendations as an HTML snippet for display."""
|
| 308 |
if not results:
|
| 309 |
return (
|
| 310 |
f"<div class='harbor-results'>"
|
|
|
|
| 318 |
items_html = ""
|
| 319 |
for r in results:
|
| 320 |
name = r.get("name", "Unknown Facility")
|
| 321 |
+
# Build address from parts
|
| 322 |
addr_parts = [r.get("address", ""), r.get("city", ""),
|
| 323 |
r.get("state", ""), r.get("zip", "")]
|
| 324 |
address = ", ".join(p.strip() for p in addr_parts if p.strip())
|
| 325 |
phone = r.get("phone", "").strip()
|
| 326 |
+
# Type from primary_focus
|
| 327 |
focus = r.get("primary_focus", "").strip()
|
| 328 |
type_label = ", ".join(
|
| 329 |
v.strip().replace("_", " ").title() for v in focus.split("|")
|
|
|
|
| 355 |
# ── App ───────────────────────────────────────────────────────────────────────
|
| 356 |
|
| 357 |
def create_chatbot():
|
| 358 |
+
"""Creates the Harbor interface with a landing page and chatbot."""
|
| 359 |
+
_load_resources_once() # pre-load CSVs so first zip lookup is fast
|
| 360 |
+
|
| 361 |
+
def chat(message, history, bot):
|
| 362 |
+
"""
|
| 363 |
+
Generate a response for the current message using a per-session Chatbot.
|
| 364 |
+
|
| 365 |
+
Args:
|
| 366 |
+
message (str): The current message from the user
|
| 367 |
+
history (list): List of previous message dicts for this session
|
| 368 |
+
bot (Chatbot): The per-session Chatbot instance (held in gr.State)
|
| 369 |
+
|
| 370 |
+
Returns:
|
| 371 |
+
tuple: (updated history, cleared input, bot)
|
| 372 |
+
"""
|
| 373 |
+
response = bot.get_response(message, history)
|
| 374 |
+
history = history + [
|
| 375 |
+
{"role": "user", "content": message},
|
| 376 |
+
{"role": "assistant", "content": response},
|
| 377 |
+
]
|
| 378 |
+
return history, gr.update(value=""), bot
|
| 379 |
|
| 380 |
def handle_zip_submit(zipcode: str):
|
| 381 |
+
"""Validate zip and return inline results HTML."""
|
| 382 |
zipcode = zipcode.strip()
|
| 383 |
if not is_valid_zip(zipcode):
|
| 384 |
return gr.update(
|
|
|
|
| 386 |
visible=True,
|
| 387 |
)
|
| 388 |
results = get_recommendations(zipcode)
|
| 389 |
+
|
| 390 |
+
# Log recommendations to console
|
| 391 |
if results:
|
| 392 |
print(f"[Harbor] Zip lookup ({zipcode}) — {len(results)} recommendation(s):")
|
| 393 |
for i, r in enumerate(results, 1):
|
| 394 |
print(f" {i}. {r.get('name', 'Unknown')} — {r.get('city', '')}, {r.get('state', '')} {r.get('zip', '')}")
|
| 395 |
else:
|
| 396 |
print(f"[Harbor] Zip lookup ({zipcode}) — no results found.")
|
|
|
|
| 397 |
|
| 398 |
+
return gr.update(value=format_recommendations(zipcode, results), visible=True)
|
|
|
|
| 399 |
|
| 400 |
def show_landing():
|
| 401 |
return gr.update(visible=True), gr.update(visible=False)
|
| 402 |
|
| 403 |
+
OPENING_MESSAGE = (
|
| 404 |
+
"How can I support you today? You can share anything about what you're dealing with—mental health concerns, alcohol or drug use, support for a loved one, or help finding treatment resources."
|
| 405 |
+
)
|
| 406 |
+
|
| 407 |
+
with gr.Blocks(title="Harbor") as demo:
|
| 408 |
|
| 409 |
+
# ── Landing Page ──────────────────────────────────────────────
|
| 410 |
with gr.Column(visible=True) as landing_page:
|
| 411 |
with gr.Column(elem_classes="harbor-wrap"):
|
| 412 |
gr.HTML(HEADER_MD)
|
| 413 |
+
gr.HTML("<div class='harbor-banner'>📍 Find options near you in the Greater Boston, Massachusetts area.</div>")
|
| 414 |
|
| 415 |
+
# Card 1 — Quick Recommendations (featured)
|
| 416 |
with gr.Group(elem_classes="harbor-card harbor-card-featured"):
|
| 417 |
+
gr.HTML("<div class='harbor-card-title'>🏠 Enter Your Zip Code</div>")
|
| 418 |
gr.HTML(
|
| 419 |
+
"<p>We'll show you nearby treatment programs right away, or talk to our chatbot below for better recommendations.</p>"
|
|
|
|
| 420 |
)
|
| 421 |
with gr.Row():
|
| 422 |
zip_input = gr.Textbox(
|
|
|
|
| 432 |
scale=1,
|
| 433 |
elem_classes="harbor-zip-btn",
|
| 434 |
)
|
| 435 |
+
# Results rendered outside the card so the loading spinner
|
| 436 |
+
# does not overlay the input card above.
|
| 437 |
results_html = gr.HTML(visible=False, elem_id="zip-results")
|
| 438 |
|
| 439 |
+
# Card 2 — Crisis callout (compact)
|
| 440 |
gr.HTML(CRISIS_CALLOUT_HTML)
|
| 441 |
|
| 442 |
+
# Card 3 — Chatbot
|
| 443 |
with gr.Group(elem_classes="harbor-card"):
|
| 444 |
gr.HTML(CHATBOT_CARD_MD)
|
| 445 |
start_chat_btn = gr.Button(
|
|
|
|
| 451 |
|
| 452 |
gr.HTML(FOOTER_MD)
|
| 453 |
|
| 454 |
+
# ── Chat Page ─────────────────────────────────────────────────
|
| 455 |
with gr.Column(visible=False) as chat_page:
|
| 456 |
+
# Per-session state: a fresh Chatbot() is created for each browser session.
|
| 457 |
+
# Clicking "Start a Conversation" also resets it, so no data carries over
|
| 458 |
+
# between conversations on the same tab.
|
| 459 |
+
chatbot_state = gr.State(Chatbot)
|
| 460 |
+
|
| 461 |
with gr.Column(elem_classes="chat-header"):
|
| 462 |
back_btn = gr.Button(
|
| 463 |
"← Back to Home",
|
|
|
|
| 465 |
variant="secondary",
|
| 466 |
elem_classes="chat-back-btn",
|
| 467 |
)
|
| 468 |
+
chatbot_display = gr.Chatbot(
|
| 469 |
+
value=[{"role": "assistant", "content": OPENING_MESSAGE}],
|
| 470 |
+
label="⚓ Harbor",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 471 |
)
|
| 472 |
+
with gr.Row():
|
| 473 |
+
msg_input = gr.Textbox(
|
| 474 |
+
placeholder="Type your message here…",
|
| 475 |
+
show_label=False,
|
| 476 |
+
scale=8,
|
| 477 |
+
container=False,
|
| 478 |
+
)
|
| 479 |
+
send_btn = gr.Button("Send →", variant="primary", scale=1)
|
| 480 |
|
| 481 |
+
def reset_bot_history(bot):
|
| 482 |
+
"""Reset bot state and chatbot history while chat page is still hidden."""
|
| 483 |
+
bot.reset()
|
| 484 |
+
return bot, [{"role": "assistant", "content": OPENING_MESSAGE}]
|
| 485 |
+
|
| 486 |
+
def show_chat_page():
|
| 487 |
+
"""Reveal chat page after bot has been reset — chatbot not in outputs, so no thinking indicator."""
|
| 488 |
+
return gr.update(visible=False), gr.update(visible=True)
|
| 489 |
+
|
| 490 |
+
# ── Events ────────────────────────────────────────────────────
|
| 491 |
zip_btn.click(handle_zip_submit, inputs=zip_input, outputs=results_html)
|
| 492 |
zip_input.submit(handle_zip_submit, inputs=zip_input, outputs=results_html)
|
| 493 |
+
|
| 494 |
+
start_chat_btn.click(
|
| 495 |
+
reset_bot_history,
|
| 496 |
+
inputs=[chatbot_state],
|
| 497 |
+
outputs=[chatbot_state, chatbot_display],
|
| 498 |
+
).then(
|
| 499 |
+
show_chat_page,
|
| 500 |
+
outputs=[landing_page, chat_page],
|
| 501 |
+
)
|
| 502 |
back_btn.click(show_landing, outputs=[landing_page, chat_page])
|
| 503 |
|
| 504 |
+
send_btn.click(
|
| 505 |
+
chat,
|
| 506 |
+
inputs=[msg_input, chatbot_display, chatbot_state],
|
| 507 |
+
outputs=[chatbot_display, msg_input, chatbot_state],
|
| 508 |
+
)
|
| 509 |
+
msg_input.submit(
|
| 510 |
+
chat,
|
| 511 |
+
inputs=[msg_input, chatbot_display, chatbot_state],
|
| 512 |
+
outputs=[chatbot_display, msg_input, chatbot_state],
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
return demo
|
| 516 |
|
| 517 |
|
| 518 |
if __name__ == "__main__":
|
| 519 |
+
demo = create_chatbot()
|
| 520 |
+
demo.launch(share=True, theme=THEME, css=CSS)
|
|
|
|
|
|
|
|
|
|
|
|
data/system_prompt.md
CHANGED
|
@@ -10,6 +10,12 @@ Scope / Boundaries
|
|
| 10 |
|
| 11 |
Only assist with discovering and accessing mental health or substance use treatment services. If users ask unrelated questions, politely redirect the conversation back to treatment support.
|
| 12 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 13 |
If a user indicates they may be in immediate crisis or danger, pause the normal conversation and encourage them to contact the Behavioral Health Help Line (BHHL).
|
| 14 |
|
| 15 |
Crisis support information:
|
|
@@ -26,18 +32,48 @@ Tone / Style
|
|
| 26 |
|
| 27 |
Be warm, patient, supportive, and non-judgmental. Use plain language and avoid clinical jargon.
|
| 28 |
|
| 29 |
-
Use motivational interviewing techniques:
|
|
|
|
|
|
|
| 30 |
|
| 31 |
-
\- Ask
|
| 32 |
|
| 33 |
-
\-
|
| 34 |
|
| 35 |
-
\-
|
| 36 |
|
| 37 |
-
\-
|
|
|
|
|
|
|
| 38 |
|
| 39 |
Respect the user’s autonomy and pace.
|
| 40 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
Key Facts
|
| 42 |
|
| 43 |
Users often hesitate to seek treatment due to stigma, fear, cost concerns, or uncertainty about what treatment involves.
|
|
@@ -68,44 +104,115 @@ Behavior Rules
|
|
| 68 |
|
| 69 |
1\. Engage
|
| 70 |
|
| 71 |
-
Build rapport
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
2\. Educate
|
| 74 |
|
| 75 |
-
If the user is uncertain about treatment, normalize help-seeking and explain what treatment typically involves.
|
| 76 |
|
| 77 |
3\. Assess
|
| 78 |
|
| 79 |
-
|
| 80 |
|
| 81 |
-
|
| 82 |
|
| 83 |
-
\-
|
| 84 |
|
| 85 |
-
\-
|
| 86 |
|
| 87 |
-
\-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 88 |
|
| 89 |
-
\-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
| 91 |
4\. Match
|
| 92 |
|
| 93 |
Present 3–5 treatment facilities that best match the user’s needs. Explain why each facility fits.
|
| 94 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 95 |
5\. Empower
|
| 96 |
|
| 97 |
Explain what happens when contacting a facility and what the intake process usually looks like.
|
| 98 |
|
| 99 |
6\. Plan
|
| 100 |
|
| 101 |
-
Encourage a concrete next step
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 102 |
|
| 103 |
7\. Follow-through
|
| 104 |
|
| 105 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 106 |
|
| 107 |
Guardrails
|
| 108 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
Source of Truth
|
| 110 |
|
| 111 |
Only recommend facilities that come from an approved, up-to-date treatment directory or database provided to the assistant. Do not invent, infer, or guess facility names, addresses, phone numbers, hours, or services. If verified facility data is unavailable, say so clearly and offer general guidance instead.
|
|
@@ -172,8 +279,6 @@ Call or text: 833-773-2445
|
|
| 172 |
|
| 173 |
Trained counselors can talk with you and help figure out the next step.
|
| 174 |
|
| 175 |
-
If you'd like, you can also tell me a little about what's happening and I can help you think through what to do next.
|
| 176 |
-
|
| 177 |
Output Format
|
| 178 |
|
| 179 |
When recommending facilities, present them clearly:
|
|
|
|
| 10 |
|
| 11 |
Only assist with discovering and accessing mental health or substance use treatment services. If users ask unrelated questions, politely redirect the conversation back to treatment support.
|
| 12 |
|
| 13 |
+
**Geographic Scope — Massachusetts only.** This service covers treatment facilities in Massachusetts, USA. If a user asks for help finding facilities outside Massachusetts, let them know clearly that this tool only covers Massachusetts. Do not recommend, name, or describe facilities in any other state or country. If the user's location turns out to be outside Massachusetts, acknowledge this limitation and offer to help if they are open to Massachusetts options.
|
| 14 |
+
|
| 15 |
+
**Do not provide advice on medications, supplements, or eating/dietary behaviors.** If a user raises these topics, acknowledge what they've shared, affirm that these are important concerns, and explain that a treatment provider is the right person to help with those specifics — then redirect toward finding that provider.
|
| 16 |
+
|
| 17 |
+
Your role is to build enough rapport to understand the user's needs and then match them to appropriate treatment. Do not attempt to process trauma or substitute for clinical care. Keep conversations moving toward matching — warmly but consistently.
|
| 18 |
+
|
| 19 |
If a user indicates they may be in immediate crisis or danger, pause the normal conversation and encourage them to contact the Behavioral Health Help Line (BHHL).
|
| 20 |
|
| 21 |
Crisis support information:
|
|
|
|
| 32 |
|
| 33 |
Be warm, patient, supportive, and non-judgmental. Use plain language and avoid clinical jargon.
|
| 34 |
|
| 35 |
+
Use motivational interviewing techniques throughout every exchange:
|
| 36 |
+
|
| 37 |
+
\- **Reflect before you ask.** After each user message, acknowledge what they shared before moving to the next question. Mirror their language. Example: "It sounds like things have been really hard lately — thank you for trusting me with that."
|
| 38 |
|
| 39 |
+
\- **Elicit change talk early.** Ask what brought the user here today and what they’re hoping for, before asking about logistics. Example: "What made you decide to reach out today?" or "What would feel different in your life if you got support?"
|
| 40 |
|
| 41 |
+
\- **Affirm proactively and specifically.** Don’t wait for a milestone. Recognize effort as it happens. Example: "Reaching out takes real courage." "It sounds like you’ve been carrying a lot — you don’t have to figure this out alone." "That’s an important thing to know about yourself."
|
| 42 |
|
| 43 |
+
\- **Normalize ambivalence.** If the user seems uncertain or hesitant, don’t push. Explore with curiosity: "It makes complete sense to have mixed feelings about this. What feels hardest about taking that step?"
|
| 44 |
|
| 45 |
+
\- **Roll with resistance.** If the user pushes back or isn’t ready, reflect their perspective and affirm their autonomy. Never argue or lecture.
|
| 46 |
+
|
| 47 |
+
\- **Avoid lecturing, pushing, or shaming.**
|
| 48 |
|
| 49 |
Respect the user’s autonomy and pace.
|
| 50 |
|
| 51 |
+
Conversation Pacing
|
| 52 |
+
|
| 53 |
+
Ask only ONE question at a time. Never stack multiple questions in a single response. Wait for the user to answer before asking the next question.
|
| 54 |
+
|
| 55 |
+
**Matching Pacing Rule**: Do not spend more than 2–3 exchanges solely on rapport-building. By your 3rd or 4th response, you must begin collecting information needed for treatment matching (type of help, preferred setting, insurance/payment, location). You can do this warmly — weave the question into the conversation naturally — but do not delay beyond the 4th exchange.
|
| 56 |
+
|
| 57 |
+
**NEVER use multiple-choice options (e.g., A/B/C lists, numbered menus, or bulleted option lists) in ANY follow-up response.** The opening message is the only place a structured menu appears. Every question you ask after that must be open-ended and conversational. This rule has no exceptions — not for treatment setting, insurance type, or any other field.
|
| 58 |
+
|
| 59 |
+
Do Not Assume User Details
|
| 60 |
+
|
| 61 |
+
Never assume, infer, or invent details about the user that they have not explicitly stated. This includes — but is not limited to — veteran status, insurance type, language preference, identity factors, location, substances used, or treatment history. If you need a piece of information to make a good recommendation, ask for it directly. Only reference information the user has already told you in this conversation.
|
| 62 |
+
|
| 63 |
+
Handle Impossible or Contradictory User Details
|
| 64 |
+
|
| 65 |
+
If a user provides information that is impossible, highly unlikely, internally inconsistent, clearly joking, or not usable for treatment matching (for example, "I am 3 years old," "I am 200 years old," or "find treatment on Mars"), do not proceed as if the information were correct. Do not shame, mock, or accuse the user of lying. Instead, briefly acknowledge the confusion, state that you may have misunderstood, and ask one calm open-ended question to clarify the relevant fact before continuing. Only continue with treatment matching once the information is realistic enough to support a safe and useful recommendation.
|
| 66 |
+
|
| 67 |
+
Examples of when to clarify before continuing: impossible ages, contradictory statements, fake locations, non-human identities, clearly unserious treatment requests, or combinations of details that make matching unreliable.
|
| 68 |
+
|
| 69 |
+
Preferred response style:
|
| 70 |
+
|
| 71 |
+
\- "I may have misunderstood that. Could you tell me the age of the person who needs help?"
|
| 72 |
+
|
| 73 |
+
\- "I want to make sure I understood correctly. What city are you looking for treatment in?"
|
| 74 |
+
|
| 75 |
+
\- "Some of the details seem inconsistent, so I want to check before I suggest anything."
|
| 76 |
+
|
| 77 |
Key Facts
|
| 78 |
|
| 79 |
Users often hesitate to seek treatment due to stigma, fear, cost concerns, or uncertainty about what treatment involves.
|
|
|
|
| 104 |
|
| 105 |
1\. Engage
|
| 106 |
|
| 107 |
+
Build genuine rapport before collecting any logistics. Spend at least 1–2 exchanges understanding what brought the user here and what they’re hoping for.
|
| 108 |
+
|
| 109 |
+
Begin by acknowledging the courage it takes to reach out. Ask open-ended questions about their situation and goals — not about insurance or zip code. Examples:
|
| 110 |
+
|
| 111 |
+
\- "What’s been going on for you lately?"
|
| 112 |
+
|
| 113 |
+
\- "What made today the day you decided to look into this?"
|
| 114 |
+
|
| 115 |
+
\- "Who are you hoping to get support for — yourself, or someone you care about?"
|
| 116 |
+
|
| 117 |
+
Only move to assessment after the user feels heard.
|
| 118 |
|
| 119 |
2\. Educate
|
| 120 |
|
| 121 |
+
If the user is uncertain about treatment, normalize help-seeking and explain what specific treatment options are available and what treatment typically involves.
|
| 122 |
|
| 123 |
3\. Assess
|
| 124 |
|
| 125 |
+
If a USER PROFILE section appears above, treat those fields as already confirmed — do NOT ask the user again for any information already listed there. Only ask for fields that are still missing.
|
| 126 |
|
| 127 |
+
Before presenting any facility recommendations, make sure you have gathered all four of the following pieces of information. Collect them one at a time — do not ask more than one question per message. Ask each as a plain open-ended question — do NOT list options like A/B/C or bullet choices:
|
| 128 |
|
| 129 |
+
\- **Type of help needed** (substance use, mental health, or both)
|
| 130 |
|
| 131 |
+
\- **Preferred treatment setting** (outpatient, intensive outpatient, residential, or telehealth)
|
| 132 |
|
| 133 |
+
\- **Payment method or insurance** (private insurance, Medicaid/Medicare, VA/TRICARE, or uninsured/self-pay)
|
| 134 |
+
|
| 135 |
+
\- **Location** (zip code or city/region)
|
| 136 |
+
|
| 137 |
+
After collecting those four, also ask about any special preferences that may affect matching:
|
| 138 |
+
|
| 139 |
+
\- Language preference
|
| 140 |
|
| 141 |
+
\- LGBTQ+ affirming care
|
| 142 |
+
|
| 143 |
+
\- Veterans services
|
| 144 |
+
|
| 145 |
+
\- Adolescent or youth services
|
| 146 |
+
|
| 147 |
+
\- Pregnancy-related or postpartum care
|
| 148 |
+
|
| 149 |
+
Frame each assessment question as caring about their experience, not collecting data. Do not ask bare factual questions. Instead:
|
| 150 |
+
|
| 151 |
+
\- Rather than "What is your insurance?" → "I want to make sure we find options that actually work for your situation — do you have insurance, or would you be looking at other ways to cover the cost?"
|
| 152 |
+
|
| 153 |
+
\- Rather than "What treatment setting do you prefer?" → "Some people find it works better to go to a program during the day and come home at night — others need more intensive support. What feels right for you, or what has worked before?"
|
| 154 |
+
|
| 155 |
+
After collecting each piece of information, briefly acknowledge it before asking the next question.
|
| 156 |
+
|
| 157 |
+
Do NOT present facility recommendations until all four required fields above have been collected — unless the user explicitly asks to see options early (e.g., "just show me what’s available" or "can you give me recommendations now?"). In that case, present what you have and note that results may improve with more information.
|
| 158 |
|
| 159 |
4\. Match
|
| 160 |
|
| 161 |
Present 3–5 treatment facilities that best match the user’s needs. Explain why each facility fits.
|
| 162 |
|
| 163 |
+
Immediately after listing the facilities, ask the user which one they’d like to reach out to (open-ended — do not list options). Then, in that same response, include both of the following:
|
| 164 |
+
|
| 165 |
+
**Call script** — a short, plain-language script the user can read aloud when they call, for example:
|
| 166 |
+
|
| 167 |
+
> "Hi, my name is [Name]. I’m looking for [type of support] and I was wondering if you’re currently accepting new clients. I have [insurance / am self-pay]. Can you tell me about your intake process?"
|
| 168 |
+
|
| 169 |
+
Tailor the script to reflect what the user has shared (their insurance, type of support needed, etc.).
|
| 170 |
+
|
| 171 |
+
**What to expect on the call** — briefly explain what the facility will likely ask, such as:
|
| 172 |
+
|
| 173 |
+
\- Their name and contact information
|
| 174 |
+
\- The type of help they’re seeking (substance use, mental health, or both)
|
| 175 |
+
\- Their insurance provider or ability to pay
|
| 176 |
+
\- Their location or transportation situation
|
| 177 |
+
\- Whether they’ve been in treatment before
|
| 178 |
+
\- Any urgent safety concerns or current substance use
|
| 179 |
+
|
| 180 |
+
Reassure the user that these questions are routine, not judgmental, and that they don’t have to have all the answers ready.
|
| 181 |
+
|
| 182 |
5\. Empower
|
| 183 |
|
| 184 |
Explain what happens when contacting a facility and what the intake process usually looks like.
|
| 185 |
|
| 186 |
6\. Plan
|
| 187 |
|
| 188 |
+
Encourage a concrete next step and help the user form a specific plan — not just an intention. People who decide *when*, *where*, and *how* they will act are significantly more likely to follow through.
|
| 189 |
+
|
| 190 |
+
Ask:
|
| 191 |
+
|
| 192 |
+
\- "When do you think you could make that call — tomorrow morning, or later this week?"
|
| 193 |
+
|
| 194 |
+
\- "Where will you be when you call? Is there a place where you'd feel comfortable making it?"
|
| 195 |
+
|
| 196 |
+
\- "Is there anything that might get in the way of making that call?" Then help them think through how to handle that barrier.
|
| 197 |
+
|
| 198 |
+
Provide a simple call script tailored to what the user has shared.
|
| 199 |
|
| 200 |
7\. Follow-through
|
| 201 |
|
| 202 |
+
Close with genuine encouragement that names what the user has done in this conversation. Reference something specific they said. Example: "You came here today knowing something needed to change — that matters."
|
| 203 |
+
|
| 204 |
+
Ask one barrier-coping question: "What might make it hard to follow through, and what could you do if that happens?" Help them think through a backup plan.
|
| 205 |
+
|
| 206 |
+
Remind them they can come back if they need more help or if circumstances change.
|
| 207 |
|
| 208 |
Guardrails
|
| 209 |
|
| 210 |
+
Scope of Role
|
| 211 |
+
|
| 212 |
+
Remember at all times: your primary purpose is to match users to treatment options, not to provide therapy, counseling, or clinical guidance. Rapport-building is a means to that end — use it to earn trust so users will engage with the matching process. If a conversation drifts into extended emotional processing, gently acknowledge the user's feelings and redirect toward identifying their treatment needs. You are a navigator, not a clinician.
|
| 213 |
+
|
| 214 |
+
Never recommend, advise on, or comment on specific medications (prescription or over-the-counter), dosages, or dietary/eating behaviors. If a user asks about these topics or describes struggles with them (e.g., disordered eating, self-medicating), validate their concern and let them know a treatment provider is the right person to address it — then continue guiding them toward finding that provider.
|
| 215 |
+
|
| 216 |
Source of Truth
|
| 217 |
|
| 218 |
Only recommend facilities that come from an approved, up-to-date treatment directory or database provided to the assistant. Do not invent, infer, or guess facility names, addresses, phone numbers, hours, or services. If verified facility data is unavailable, say so clearly and offer general guidance instead.
|
|
|
|
| 279 |
|
| 280 |
Trained counselors can talk with you and help figure out the next step.
|
| 281 |
|
|
|
|
|
|
|
| 282 |
Output Format
|
| 283 |
|
| 284 |
When recommending facilities, present them clearly:
|
requirements.txt
CHANGED
|
@@ -1,7 +1,10 @@
|
|
| 1 |
-
|
| 2 |
-
transformers>=
|
| 3 |
-
|
| 4 |
-
accelerate>=0.
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
torch>=2.1.0
|
| 2 |
+
transformers>=4.34.0
|
| 3 |
+
datasets>=2.14.0
|
| 4 |
+
accelerate>=0.24.0
|
| 5 |
+
sentencepiece>=0.1.99
|
| 6 |
+
gradio>=3.50.0
|
| 7 |
+
huggingface-hub>=0.19.0
|
| 8 |
+
numpy<2.0.0
|
| 9 |
+
ipywidgets>=8.0.0
|
| 10 |
+
python-dotenv>=1.1.0
|
src/chat.py
CHANGED
|
@@ -1,124 +1,247 @@
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
-
import
|
| 3 |
-
from
|
| 4 |
-
from src.
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
)
|
| 9 |
-
|
| 10 |
-
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
print(f"[Harbor] Loading model: {model_id}")
|
| 15 |
-
tokenizer = AutoTokenizer.from_pretrained(model_id, token=HF_TOKEN)
|
| 16 |
-
|
| 17 |
-
if torch.cuda.is_available():
|
| 18 |
-
dtype = torch.bfloat16
|
| 19 |
-
device_map = "auto"
|
| 20 |
-
device = None
|
| 21 |
-
device_label = "CUDA"
|
| 22 |
-
elif torch.backends.mps.is_available():
|
| 23 |
-
# bitsandbytes does not support MPS; float16 on 18 GB can OOM.
|
| 24 |
-
# Fall back to CPU with float32.
|
| 25 |
-
dtype = torch.float32
|
| 26 |
-
device_map = None
|
| 27 |
-
device = -1
|
| 28 |
-
device_label = "CPU"
|
| 29 |
-
else:
|
| 30 |
-
dtype = torch.float32
|
| 31 |
-
device_map = None
|
| 32 |
-
device = -1
|
| 33 |
-
device_label = "CPU"
|
| 34 |
-
|
| 35 |
-
model = AutoModelForCausalLM.from_pretrained(
|
| 36 |
-
model_id,
|
| 37 |
-
dtype=dtype,
|
| 38 |
-
device_map=device_map,
|
| 39 |
-
token=HF_TOKEN,
|
| 40 |
-
)
|
| 41 |
-
|
| 42 |
-
pipe = pipeline(
|
| 43 |
-
"text-generation",
|
| 44 |
-
model=model,
|
| 45 |
-
tokenizer=tokenizer,
|
| 46 |
-
device=device,
|
| 47 |
-
)
|
| 48 |
-
print(f"[Harbor] Model ready on {device_label}: {model_id}")
|
| 49 |
-
return pipe, tokenizer
|
| 50 |
|
| 51 |
|
| 52 |
class Chatbot:
|
| 53 |
|
| 54 |
def __init__(self):
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 61 |
data_dir = os.path.join(current_dir, '..', 'data')
|
| 62 |
self.profile_schema = load_schema(os.path.join(data_dir, 'user_profile_schema.json'))
|
| 63 |
self.user_profile = create_empty_profile()
|
| 64 |
-
|
| 65 |
knowledge_dir = os.path.join(data_dir, '..', 'references', 'knowledge')
|
| 66 |
-
|
| 67 |
os.path.join(knowledge_dir, 'ma_resources.csv'),
|
| 68 |
-
os.path.join(knowledge_dir, 'boston_resources.csv'),
|
| 69 |
-
]
|
|
|
|
| 70 |
|
| 71 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
updates = extract_profile_updates(self.profile_schema, user_input)
|
| 73 |
merge_profile(self.user_profile, updates)
|
| 74 |
|
| 75 |
-
def format_prompt(self, user_input
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
|
|
|
| 77 |
system_prompt_path = os.path.join(current_dir, '../data/system_prompt.md')
|
| 78 |
with open(system_prompt_path, 'r', encoding='utf-8') as f:
|
| 79 |
system_prompt = f.read().strip()
|
| 80 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
self.update_profile(user_input)
|
|
|
|
|
|
|
| 82 |
profile_summary = profile_to_summary(self.user_profile)
|
| 83 |
|
|
|
|
| 84 |
system_content = system_prompt
|
| 85 |
if profile_summary:
|
| 86 |
-
system_content
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
temperature=0.7,
|
| 105 |
-
do_sample=True,
|
| 106 |
-
return_full_text=False,
|
| 107 |
)
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
filtered = filter_resources(self.resources, self.user_profile)
|
| 111 |
-
top_resources = score_resources(filtered, self.user_profile)
|
| 112 |
-
recommendations = format_recommendations(top_resources)
|
| 113 |
-
|
| 114 |
-
if top_resources:
|
| 115 |
-
print(f"[Harbor] {len(top_resources)} recommendation(s) for current profile:")
|
| 116 |
-
for i, r in enumerate(top_resources, 1):
|
| 117 |
-
print(f" {i}. {r.get('name', 'Unknown')} — {r.get('city', '')}, {r.get('state', '')} {r.get('zip', '')}")
|
| 118 |
-
else:
|
| 119 |
-
print("[Harbor] No recommendations matched current profile.")
|
| 120 |
-
|
| 121 |
-
if recommendations:
|
| 122 |
-
response += "\n\n" + recommendations
|
| 123 |
-
|
| 124 |
-
return response
|
|
|
|
| 1 |
+
from huggingface_hub import InferenceClient
|
| 2 |
+
from src.config import BASE_MODEL, MY_MODEL, HF_TOKEN
|
| 3 |
import os
|
| 4 |
+
from src.utils.tags import tag_user_input
|
| 5 |
+
from src.utils.profile import load_schema, create_empty_profile, extract_profile_updates, merge_profile, profile_to_summary
|
| 6 |
+
from src.utils.resources import load_resources, filter_resources, score_resources, format_resources_for_context
|
| 7 |
+
|
| 8 |
+
RECOMMENDATION_KEYWORDS = [
|
| 9 |
+
"show me", "show options", "show recommendations", "give me options",
|
| 10 |
+
"what options", "what facilities", "find me", "recommend", "recommendations",
|
| 11 |
+
"options near", "what's available", "what is available", "what are my options",
|
| 12 |
+
"just show", "see options", "see recommendations", "list options",
|
| 13 |
+
"i want to see", "can you show", "what do you have",
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
REQUIRED_PROFILE_FIELDS = [
|
| 17 |
+
("clinical", "primary_focus"), # type of help
|
| 18 |
+
("preferences", "setting"), # treatment setting
|
| 19 |
+
("logistics", "insurance"), # payment / insurance
|
| 20 |
+
]
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def _has_enough_for_recommendation(profile: dict) -> bool:
|
| 24 |
+
"""Return True when the four key fields are collected."""
|
| 25 |
+
for category, field in REQUIRED_PROFILE_FIELDS:
|
| 26 |
+
value = profile.get(category, {}).get(field)
|
| 27 |
+
if not value:
|
| 28 |
+
return False
|
| 29 |
+
# location: either zipcode or region
|
| 30 |
+
zipcode = profile.get("logistics", {}).get("zipcode")
|
| 31 |
+
region = profile.get("logistics", {}).get("region")
|
| 32 |
+
if not zipcode and not region:
|
| 33 |
+
return False
|
| 34 |
+
return True
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def _user_wants_recommendations(user_input: str) -> bool:
|
| 38 |
+
"""Return True if the user is explicitly asking for recommendations."""
|
| 39 |
+
lower = user_input.lower()
|
| 40 |
+
return any(kw in lower for kw in RECOMMENDATION_KEYWORDS)
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
CRISIS_KEYWORDS = [
|
| 44 |
+
"suicid", "kill myself", "want to die", "end my life", "take my life",
|
| 45 |
+
"don't want to live", "dont want to live", "no reason to live",
|
| 46 |
+
"better off dead", "self-harm", "self harm", "hurt myself", "cutting myself",
|
| 47 |
+
"overdose", "overdosing",
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
CRISIS_RESPONSE = (
|
| 51 |
+
"I'm really sorry you're going through something this difficult. "
|
| 52 |
+
"You don't have to handle it alone.\n\n"
|
| 53 |
+
"Please reach out for immediate support:\n\n"
|
| 54 |
+
"**Behavioral Health Help Line (BHHL)**\n"
|
| 55 |
+
"📞 Call or text: **833-773-2445**\n"
|
| 56 |
+
"Available 24 hours a day, 7 days a week, 365 days a year.\n"
|
| 57 |
+
"Anyone may contact the Help Line if they or a family member are experiencing "
|
| 58 |
+
"a mental health or substance use disorder crisis.\n\n"
|
| 59 |
+
"If you or someone near you may be in immediate danger, please call **911** right now."
|
| 60 |
)
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _is_crisis(text: str) -> bool:
|
| 64 |
+
lower = text.lower()
|
| 65 |
+
return any(kw in lower for kw in CRISIS_KEYWORDS)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
|
| 68 |
class Chatbot:
|
| 69 |
|
| 70 |
def __init__(self):
|
| 71 |
+
"""
|
| 72 |
+
Initialize the chatbot with a HF model ID
|
| 73 |
+
"""
|
| 74 |
+
model_id = MY_MODEL if MY_MODEL else BASE_MODEL # define MY_MODEL in config.py if you create a new model in the HuggingFace Hub
|
| 75 |
+
self.client = InferenceClient(model=model_id, token="HF_TOKEN")
|
| 76 |
+
# Initialize tag lists
|
| 77 |
+
self.user_tags = []
|
| 78 |
+
self.substance_tags = []
|
| 79 |
+
# Initialize user profile
|
| 80 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 81 |
data_dir = os.path.join(current_dir, '..', 'data')
|
| 82 |
self.profile_schema = load_schema(os.path.join(data_dir, 'user_profile_schema.json'))
|
| 83 |
self.user_profile = create_empty_profile()
|
| 84 |
+
# Load treatment resources once
|
| 85 |
knowledge_dir = os.path.join(data_dir, '..', 'references', 'knowledge')
|
| 86 |
+
resources_paths = [
|
| 87 |
os.path.join(knowledge_dir, 'ma_resources.csv'),
|
| 88 |
+
os.path.join(knowledge_dir, 'resources', 'boston_resources.csv'),
|
| 89 |
+
]
|
| 90 |
+
self.resources = load_resources(resources_paths)
|
| 91 |
|
| 92 |
+
def reset(self):
|
| 93 |
+
"""Reset conversation state for a new session without re-initializing the client or resources."""
|
| 94 |
+
self.user_tags = []
|
| 95 |
+
self.substance_tags = []
|
| 96 |
+
self.user_profile = create_empty_profile()
|
| 97 |
+
|
| 98 |
+
def update_profile(self, user_input):
|
| 99 |
+
"""
|
| 100 |
+
Scan user input for profile-relevant information and merge it
|
| 101 |
+
into the running user profile.
|
| 102 |
+
|
| 103 |
+
Args:
|
| 104 |
+
user_input (str): The user's message text.
|
| 105 |
+
"""
|
| 106 |
updates = extract_profile_updates(self.profile_schema, user_input)
|
| 107 |
merge_profile(self.user_profile, updates)
|
| 108 |
|
| 109 |
+
def format_prompt(self, user_input, turn_number=0):
|
| 110 |
+
"""
|
| 111 |
+
Format the user's input into a list of chat messages with system context.
|
| 112 |
+
Also tags the input with relevant keywords and substances that appear in the text,
|
| 113 |
+
and updates the user profile with any new information detected.
|
| 114 |
+
|
| 115 |
+
This method:
|
| 116 |
+
1. Loads system prompt from system_prompt.txt
|
| 117 |
+
2. Detects keywords from keywords.txt in user input (case-insensitive, partial matches)
|
| 118 |
+
3. Detects substances from substances.txt in user input (case-insensitive, partial matches)
|
| 119 |
+
4. Updates user profile from schema-based keyword matching
|
| 120 |
+
5. Injects profile summary into the system prompt so the model knows what's been gathered
|
| 121 |
+
6. Returns a list of message dicts for the chat completion API
|
| 122 |
+
|
| 123 |
+
Args:
|
| 124 |
+
user_input (str): The user's question
|
| 125 |
+
turn_number (int): Zero-indexed turn count (0 = first user message)
|
| 126 |
+
|
| 127 |
+
Returns:
|
| 128 |
+
list[dict]: A list of message dicts with 'role' and 'content' keys
|
| 129 |
+
"""
|
| 130 |
+
# Get the directory where this file is located
|
| 131 |
current_dir = os.path.dirname(os.path.abspath(__file__))
|
| 132 |
+
|
| 133 |
+
# Load system prompt
|
| 134 |
system_prompt_path = os.path.join(current_dir, '../data/system_prompt.md')
|
| 135 |
with open(system_prompt_path, 'r', encoding='utf-8') as f:
|
| 136 |
system_prompt = f.read().strip()
|
| 137 |
|
| 138 |
+
# Tag user input with keywords and substances
|
| 139 |
+
keywords_path = os.path.join(current_dir, '../data/keywords.txt')
|
| 140 |
+
substances_path = os.path.join(current_dir, '../data/substances.txt')
|
| 141 |
+
|
| 142 |
+
self.user_tags = tag_user_input(keywords_path, user_input)
|
| 143 |
+
self.substance_tags = tag_user_input(substances_path, user_input)
|
| 144 |
+
|
| 145 |
+
# Update user profile from this message
|
| 146 |
self.update_profile(user_input)
|
| 147 |
+
|
| 148 |
+
# Build profile summary for the prompt
|
| 149 |
profile_summary = profile_to_summary(self.user_profile)
|
| 150 |
|
| 151 |
+
# Build system message with profile context
|
| 152 |
system_content = system_prompt
|
| 153 |
if profile_summary:
|
| 154 |
+
system_content = system_content + "\n\n" + profile_summary
|
| 155 |
+
|
| 156 |
+
# On the 4th user message (turn_number >= 3), nudge the model to ask a matching question
|
| 157 |
+
if turn_number >= 3:
|
| 158 |
+
missing = []
|
| 159 |
+
clinical = self.user_profile.get("clinical", {})
|
| 160 |
+
preferences = self.user_profile.get("preferences", {})
|
| 161 |
+
logistics = self.user_profile.get("logistics", {})
|
| 162 |
+
if not clinical.get("primary_focus"):
|
| 163 |
+
missing.append("type of help needed (substance use, mental health, or both)")
|
| 164 |
+
if not preferences.get("setting"):
|
| 165 |
+
missing.append("preferred treatment setting")
|
| 166 |
+
if not logistics.get("insurance"):
|
| 167 |
+
missing.append("payment method or insurance")
|
| 168 |
+
if not logistics.get("zipcode") and not logistics.get("region"):
|
| 169 |
+
missing.append("location (zip code or city)")
|
| 170 |
+
|
| 171 |
+
if missing:
|
| 172 |
+
nudge = (
|
| 173 |
+
"\n\n[PACING INSTRUCTION — DO NOT REPEAT TO USER] "
|
| 174 |
+
"You have now had several exchanges. You MUST include a question relevant to "
|
| 175 |
+
"treatment matching in this response. Ask about one of the following missing fields: "
|
| 176 |
+
+ "; ".join(missing)
|
| 177 |
+
+ ". Ask it warmly and conversationally — one question only."
|
| 178 |
+
)
|
| 179 |
+
system_content = system_content + nudge
|
| 180 |
+
|
| 181 |
+
# Return structured messages for chat completion API
|
| 182 |
+
messages = [{"role": "system", "content": system_content}]
|
| 183 |
+
|
| 184 |
+
return messages
|
| 185 |
+
|
| 186 |
+
def get_response(self, user_input, history=None):
|
| 187 |
+
"""
|
| 188 |
+
Generate a response to the user's question, with resource recommendations
|
| 189 |
+
appended when the user profile contains enough information to match.
|
| 190 |
+
|
| 191 |
+
Args:
|
| 192 |
+
user_input (str): The user's question
|
| 193 |
+
|
| 194 |
+
Returns:
|
| 195 |
+
str: The chatbot's response, optionally followed by top 3 resources
|
| 196 |
+
"""
|
| 197 |
+
# 0. Hard crisis check — bypass LLM entirely if crisis keywords detected
|
| 198 |
+
if _is_crisis(user_input):
|
| 199 |
+
print("[Harbor] Crisis keywords detected — returning crisis response.")
|
| 200 |
+
return CRISIS_RESPONSE
|
| 201 |
+
|
| 202 |
+
# 1. Format messages (also updates profile and tags)
|
| 203 |
+
turn_number = len(history) if history else 0
|
| 204 |
+
messages = self.format_prompt(user_input, turn_number=turn_number)
|
| 205 |
+
|
| 206 |
+
# 1b. After the user's first message, return a fixed follow-up instead of calling the LLM.
|
| 207 |
+
# Profile and tags have already been updated above so the first message is not lost.
|
| 208 |
+
if history and len(history) == 1:
|
| 209 |
+
return (
|
| 210 |
+
"Thank you for sharing that. Before I give you any recommendations, "
|
| 211 |
+
"can you tell me a little bit about you or the loved one you are concerned about?"
|
| 212 |
+
)
|
| 213 |
+
|
| 214 |
+
# 2. Inject verified facility data into the system prompt so the LLM presents
|
| 215 |
+
# recommendations naturally in its own voice — only when the profile has enough
|
| 216 |
+
# info or the user is explicitly asking for options.
|
| 217 |
+
if _has_enough_for_recommendation(self.user_profile) or _user_wants_recommendations(user_input):
|
| 218 |
+
filtered = filter_resources(self.resources, self.user_profile)
|
| 219 |
+
top = score_resources(filtered, self.user_profile)
|
| 220 |
+
if top:
|
| 221 |
+
facility_context = format_resources_for_context(top)
|
| 222 |
+
messages[0]["content"] = messages[0]["content"] + "\n\n" + facility_context
|
| 223 |
+
elif _has_enough_for_recommendation(self.user_profile):
|
| 224 |
+
messages[0]["content"] = messages[0]["content"] + (
|
| 225 |
+
"\n\n[NOTE — No verified facilities matched the user's profile. "
|
| 226 |
+
"Let the user know you couldn't find a direct match and ask if "
|
| 227 |
+
"they'd be open to broader options.]"
|
| 228 |
+
)
|
| 229 |
+
|
| 230 |
+
# 3. Append conversation history then current user message
|
| 231 |
+
for item in (history or []):
|
| 232 |
+
if isinstance(item, dict):
|
| 233 |
+
messages.append({"role": item["role"], "content": item["content"]})
|
| 234 |
+
else:
|
| 235 |
+
human, assistant = item
|
| 236 |
+
messages.append({"role": "user", "content": human})
|
| 237 |
+
if assistant is not None:
|
| 238 |
+
messages.append({"role": "assistant", "content": assistant})
|
| 239 |
+
messages.append({"role": "user", "content": user_input})
|
| 240 |
+
|
| 241 |
+
# 4. Generate LLM response via chat completion API
|
| 242 |
+
result = self.client.chat_completion(
|
| 243 |
+
messages=messages,
|
| 244 |
+
max_tokens=512,
|
| 245 |
temperature=0.7,
|
|
|
|
|
|
|
| 246 |
)
|
| 247 |
+
return result.choices[0].message.content.strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/config.py
CHANGED
|
@@ -12,10 +12,6 @@ BASE_MODEL = "Qwen/Qwen2.5-7B-Instruct"
|
|
| 12 |
# BASE_MODEL = "HuggingFaceH4/zephyr-7b-beta" # ungated
|
| 13 |
|
| 14 |
# If you finetune the model or change it in any way, save it to huggingface hub, then set MY_MODEL to your model ID. The model ID is in the format "your-username/your-model-name".
|
| 15 |
-
MY_MODEL = "amitashukla/harbor-qwn25-
|
| 16 |
-
|
| 17 |
-
# Used locally to avoid OOM on 18 GB unified memory.
|
| 18 |
-
# Set to None (or remove) when deploying to HF Spaces.
|
| 19 |
-
LOCAL_MODEL = None #"Qwen/Qwen2.5-1.5B-Instruct"
|
| 20 |
|
| 21 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
|
|
|
| 12 |
# BASE_MODEL = "HuggingFaceH4/zephyr-7b-beta" # ungated
|
| 13 |
|
| 14 |
# If you finetune the model or change it in any way, save it to huggingface hub, then set MY_MODEL to your model ID. The model ID is in the format "your-username/your-model-name".
|
| 15 |
+
MY_MODEL = "" #"amitashukla/harbor-qwn25-lora"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 16 |
|
| 17 |
HF_TOKEN = os.getenv("HF_TOKEN")
|
src/utils/profile.py
CHANGED
|
@@ -221,4 +221,7 @@ def profile_to_summary(profile):
|
|
| 221 |
if not lines:
|
| 222 |
return ""
|
| 223 |
|
| 224 |
-
|
|
|
|
|
|
|
|
|
|
|
|
| 221 |
if not lines:
|
| 222 |
return ""
|
| 223 |
|
| 224 |
+
header = (
|
| 225 |
+
"USER PROFILE (already collected — DO NOT ask the user again for any of these details):\n"
|
| 226 |
+
)
|
| 227 |
+
return header + "\n".join(lines)
|
src/utils/resources.py
CHANGED
|
@@ -180,60 +180,63 @@ def score_resources(filtered, user_profile, top_n=3):
|
|
| 180 |
return [row for _, row in scored[:top_n]]
|
| 181 |
|
| 182 |
|
| 183 |
-
def
|
| 184 |
"""
|
| 185 |
-
Format a list of resource dicts
|
| 186 |
-
|
|
|
|
| 187 |
"""
|
| 188 |
if not results:
|
| 189 |
return ""
|
| 190 |
|
| 191 |
lines = [
|
| 192 |
-
"
|
| 193 |
-
"
|
|
|
|
| 194 |
"",
|
| 195 |
]
|
| 196 |
|
| 197 |
for i, row in enumerate(results, 1):
|
| 198 |
name = row.get("name", "Unknown Facility")
|
| 199 |
-
lines.append(f"{i}
|
| 200 |
|
| 201 |
-
# Address
|
| 202 |
parts = [row.get("address", ""), row.get("city", ""),
|
| 203 |
row.get("state", ""), row.get("zip", "")]
|
| 204 |
address = ", ".join(p.strip() for p in parts if p.strip())
|
| 205 |
if address:
|
| 206 |
-
lines.append(f"
|
| 207 |
|
| 208 |
-
# Phone
|
| 209 |
phone = row.get("phone", "").strip()
|
| 210 |
if phone:
|
| 211 |
-
lines.append(f"
|
| 212 |
|
| 213 |
-
# Website
|
| 214 |
website = row.get("website", "").strip()
|
| 215 |
if website:
|
| 216 |
-
lines.append(f"
|
| 217 |
|
| 218 |
-
# Summary line: focus, substances, settings
|
| 219 |
-
details = []
|
| 220 |
focus = row.get("primary_focus", "").strip()
|
| 221 |
if focus:
|
| 222 |
-
|
| 223 |
v.strip().replace("_", " ").title() for v in focus.split("|")
|
| 224 |
))
|
|
|
|
| 225 |
subs = row.get("substances", "").strip()
|
| 226 |
if subs:
|
| 227 |
-
|
| 228 |
v.strip().replace("_", " ").title() for v in subs.split("|")
|
| 229 |
))
|
|
|
|
| 230 |
settings = row.get("settings", "").strip()
|
| 231 |
if settings:
|
| 232 |
-
|
| 233 |
v.strip().replace("_", " ").title() for v in settings.split("|")
|
| 234 |
))
|
| 235 |
-
|
| 236 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 237 |
|
| 238 |
lines.append("")
|
| 239 |
|
|
|
|
| 180 |
return [row for _, row in scored[:top_n]]
|
| 181 |
|
| 182 |
|
| 183 |
+
def format_resources_for_context(results):
|
| 184 |
"""
|
| 185 |
+
Format a list of resource dicts as a context block for injection into the
|
| 186 |
+
system prompt. The LLM uses this verified data to present recommendations
|
| 187 |
+
naturally in its own voice. Returns empty string if no results.
|
| 188 |
"""
|
| 189 |
if not results:
|
| 190 |
return ""
|
| 191 |
|
| 192 |
lines = [
|
| 193 |
+
"[VERIFIED FACILITY DATA — Present these facilities to the user following the "
|
| 194 |
+
"output format in your instructions. Use only the data listed here — do not invent, "
|
| 195 |
+
"alter, or supplement with facilities not in this list.]",
|
| 196 |
"",
|
| 197 |
]
|
| 198 |
|
| 199 |
for i, row in enumerate(results, 1):
|
| 200 |
name = row.get("name", "Unknown Facility")
|
| 201 |
+
lines.append(f"Facility {i}: {name}")
|
| 202 |
|
|
|
|
| 203 |
parts = [row.get("address", ""), row.get("city", ""),
|
| 204 |
row.get("state", ""), row.get("zip", "")]
|
| 205 |
address = ", ".join(p.strip() for p in parts if p.strip())
|
| 206 |
if address:
|
| 207 |
+
lines.append(f" Address: {address}")
|
| 208 |
|
|
|
|
| 209 |
phone = row.get("phone", "").strip()
|
| 210 |
if phone:
|
| 211 |
+
lines.append(f" Phone: {phone}")
|
| 212 |
|
|
|
|
| 213 |
website = row.get("website", "").strip()
|
| 214 |
if website:
|
| 215 |
+
lines.append(f" Website: {website}")
|
| 216 |
|
|
|
|
|
|
|
| 217 |
focus = row.get("primary_focus", "").strip()
|
| 218 |
if focus:
|
| 219 |
+
lines.append(" Focus: " + ", ".join(
|
| 220 |
v.strip().replace("_", " ").title() for v in focus.split("|")
|
| 221 |
))
|
| 222 |
+
|
| 223 |
subs = row.get("substances", "").strip()
|
| 224 |
if subs:
|
| 225 |
+
lines.append(" Substances: " + ", ".join(
|
| 226 |
v.strip().replace("_", " ").title() for v in subs.split("|")
|
| 227 |
))
|
| 228 |
+
|
| 229 |
settings = row.get("settings", "").strip()
|
| 230 |
if settings:
|
| 231 |
+
lines.append(" Settings: " + ", ".join(
|
| 232 |
v.strip().replace("_", " ").title() for v in settings.split("|")
|
| 233 |
))
|
| 234 |
+
|
| 235 |
+
insurance = row.get("insurance", "").strip()
|
| 236 |
+
if insurance:
|
| 237 |
+
lines.append(" Insurance: " + ", ".join(
|
| 238 |
+
v.strip().replace("_", " ").title() for v in insurance.split("|")
|
| 239 |
+
))
|
| 240 |
|
| 241 |
lines.append("")
|
| 242 |
|