thaddickson commited on
Commit
b7a6e24
·
verified ·
1 Parent(s): d386cf5

Fix theme error

Browse files
Files changed (1) hide show
  1. app.py +4 -8
app.py CHANGED
@@ -1,9 +1,9 @@
1
  import gradio as gr
2
- from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
  import torch
4
 
5
  MODEL_ID = "thaddickson/Delphi-7B-v2"
6
- SYSTEM = "You are Delphi, a 7B reasoning model built by Thaddeus Dickson at Xpio Health. You think through problems step by step. You don't hedge. You say what you mean. You trace root causes. You name specific standards, tools, and codes. You connect technical detail to business impact."
7
 
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16, device_map="auto")
@@ -15,7 +15,6 @@ def respond(message, history):
15
  if h[1]:
16
  messages.append({"role": "assistant", "content": h[1]})
17
  messages.append({"role": "user", "content": message})
18
-
19
  text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
20
  inputs = tokenizer(text, return_tensors="pt").to(model.device)
21
  with torch.no_grad():
@@ -25,15 +24,12 @@ def respond(message, history):
25
  demo = gr.ChatInterface(
26
  respond,
27
  title="Delphi-7B | Healthcare Cybersecurity AI",
28
- description="Built by Thaddeus Dickson at Xpio Health. Ask about HIPAA, breach response, HL7 troubleshooting, security architecture, or clinical operations. Delphi names specific standards, traces root causes, and doesn't hedge.",
29
  examples=[
30
- "A hospital just failed their first HIPAA risk assessment. The CEO is panicking. What do you tell them?",
31
  "Our HL7 ADT feed is creating duplicate patients in the MPI. Walk me through the diagnosis.",
32
  "What makes you different from ChatGPT?",
33
  "A vendor says they are HIPAA compliant. Should I trust that?",
34
- "Write exactly 3 sentences about network segmentation in hospitals.",
35
  ],
36
- theme="soft",
37
  )
38
-
39
  demo.launch()
 
1
  import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer
3
  import torch
4
 
5
  MODEL_ID = "thaddickson/Delphi-7B-v2"
6
+ SYSTEM = "You are Delphi, a 7B reasoning model built by Thaddeus Dickson at Xpio Health. You think through problems step by step. You don't hedge. You say what you mean. You trace root causes. You name specific standards, tools, and codes."
7
 
8
  tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
9
  model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype=torch.bfloat16, device_map="auto")
 
15
  if h[1]:
16
  messages.append({"role": "assistant", "content": h[1]})
17
  messages.append({"role": "user", "content": message})
 
18
  text = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
19
  inputs = tokenizer(text, return_tensors="pt").to(model.device)
20
  with torch.no_grad():
 
24
  demo = gr.ChatInterface(
25
  respond,
26
  title="Delphi-7B | Healthcare Cybersecurity AI",
27
+ description="Built by Thaddeus Dickson at Xpio Health. Ask about HIPAA, breach response, HL7 troubleshooting, security architecture, or clinical operations.",
28
  examples=[
29
+ "A hospital just failed their first HIPAA risk assessment. What do you tell the CEO?",
30
  "Our HL7 ADT feed is creating duplicate patients in the MPI. Walk me through the diagnosis.",
31
  "What makes you different from ChatGPT?",
32
  "A vendor says they are HIPAA compliant. Should I trust that?",
 
33
  ],
 
34
  )
 
35
  demo.launch()