gijl commited on
Commit
fa3957c
·
verified ·
1 Parent(s): eaaff79

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -0
app.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
3
+ import torch
4
+ import os
5
+
6
+ model_name = "google/gemma-4-E2B-it"
7
+
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+
10
+ model = AutoModelForCausalLM.from_pretrained(model_name,
11
+ torch_dtype=torch.float16,
12
+ device_map="auto")
13
+
14
+ pipe = pipeline("text-generation",
15
+ model=model_name,
16
+ tokenizer=tokenizer,
17
+ max_new_tokens=150,
18
+ temperature=0.7)
19
+
20
+ def generate_response(message, history):
21
+ messages = [
22
+ [
23
+ {
24
+ "role": "system",
25
+ "content": [{"type": "text",
26
+ "text": "Você é ELIZA, uma terapeuta que responde com empatia e faz perguntas para entender melhor o paciente."},]
27
+ },
28
+ {
29
+ "role": "user",
30
+ "content": [{"type": "text",
31
+ "text": message},]
32
+ },
33
+ ],
34
+ ]
35
+
36
+ response = pipe(messages)
37
+ return response[0][0]['generated_text'][2]['content']
38
+
39
+ demo = gr.ChatInterface(
40
+ generate_response,
41
+ title="ELIZA (com LLM)",
42
+ description="Compartilhe seus pensamentos e ELIZA irá ajudar você a refletir sobre eles."
43
+ )
44
+
45
+ demo.launch()