Update app.py
Browse files
app.py
CHANGED
|
@@ -3,7 +3,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
|
|
| 3 |
import torch
|
| 4 |
import os
|
| 5 |
|
| 6 |
-
model_name = "google/gemma-4-
|
| 7 |
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 9 |
|
|
@@ -14,7 +14,7 @@ model = AutoModelForCausalLM.from_pretrained(model_name,
|
|
| 14 |
pipe = pipeline("text-generation",
|
| 15 |
model=model_name,
|
| 16 |
tokenizer=tokenizer,
|
| 17 |
-
max_new_tokens=
|
| 18 |
temperature=0.7)
|
| 19 |
|
| 20 |
def generate_response(message, history):
|
|
@@ -38,8 +38,8 @@ def generate_response(message, history):
|
|
| 38 |
|
| 39 |
demo = gr.ChatInterface(
|
| 40 |
generate_response,
|
| 41 |
-
title="
|
| 42 |
-
description="
|
| 43 |
)
|
| 44 |
|
| 45 |
demo.launch()
|
|
|
|
| 3 |
import torch
|
| 4 |
import os
|
| 5 |
|
| 6 |
+
model_name = "google/gemma-4-E2B-it"
|
| 7 |
|
| 8 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 9 |
|
|
|
|
| 14 |
pipe = pipeline("text-generation",
|
| 15 |
model=model_name,
|
| 16 |
tokenizer=tokenizer,
|
| 17 |
+
max_new_tokens=1500,
|
| 18 |
temperature=0.7)
|
| 19 |
|
| 20 |
def generate_response(message, history):
|
|
|
|
| 38 |
|
| 39 |
demo = gr.ChatInterface(
|
| 40 |
generate_response,
|
| 41 |
+
title="Brain map(com LLM)",
|
| 42 |
+
description="You are a Gemma 4 model, a trusted speaker and medical assistant. "
|
| 43 |
)
|
| 44 |
|
| 45 |
demo.launch()
|