File size: 1,055 Bytes
933f126 1488d03 651b8ae e3309dc 1488d03 933f126 5784c56 933f126 d417518 5784c56 d417518 1488d03 d417518 933f126 d417518 5784c56 933f126 1488d03 5784c56 933f126 d417518 933f126 5784c56 d417518 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 | import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
# Load model and tokenizer
tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small")
model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small")
# Chat history (global per session)
chat_history_ids = None
def vibebot_response(message, chat_history):
global chat_history_ids
# Encode user input
new_input_ids = tokenizer.encode(message + tokenizer.eos_token, return_tensors='pt')
# Append to chat history if it exists
bot_input_ids = torch.cat([chat_history_ids, new_input_ids], dim=-1) if chat_history_ids is not None else new_input_ids
# Generate response
chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id)
response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
return response
# Gradio interface
chat = gr.ChatInterface(fn=vibebot_response, title="VibeBot: Gen-Z Therapist")
chat.launch()
|