import gradio as gr from transformers import AutoModelForCausalLM, AutoTokenizer import torch # Load model and tokenizer tokenizer = AutoTokenizer.from_pretrained("microsoft/DialoGPT-small") model = AutoModelForCausalLM.from_pretrained("microsoft/DialoGPT-small") # Chat history (global per session) chat_history_ids = None def vibebot_response(user_input, history=[]): global chat_history_ids # Encode user input new_input_ids = tokenizer.encode(user_input + tokenizer.eos_token, return_tensors='pt') # Append to chat history if exists bot_input_ids = torch.cat([chat_history_ids, new_input_ids], dim=-1) if chat_history_ids is not None else new_input_ids # Generate response chat_history_ids = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id) response = tokenizer.decode(chat_history_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True) # Append for gradio history history.append((user_input, response)) return history, history # Gradio interface demo = gr.ChatInterface(fn=vibebot_response, title="VibeBot: Gen-Z Therapist", chatbot=gr.Chatbot(height=400), theme="default") demo.launch()