| import gradio as gr |
| import spaces |
| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| |
| model_id = "Qwen/Qwen2.5-7B-Instruct" |
|
|
| |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
|
|
| |
| |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, torch_dtype=torch.bfloat16, device_map="auto" |
| ) |
|
|
|
|
| |
| |
| @spaces.GPU |
| def generate_response(message, history): |
| |
| messages = [] |
| for user_msg, bot_msg in history: |
| messages.append({"role": "user", "content": user_msg}) |
| messages.append({"role": "assistant", "content": bot_msg}) |
|
|
| |
| messages.append({"role": "user", "content": message}) |
|
|
| |
| text = tokenizer.apply_chat_template( |
| messages, tokenize=False, add_generation_prompt=True |
| ) |
|
|
| |
| model_inputs = tokenizer([text], return_tensors="pt").to(model.device) |
|
|
| |
| generated_ids = model.generate( |
| **model_inputs, |
| max_new_tokens=512, |
| temperature=0.7, |
| ) |
|
|
| |
| generated_ids = [ |
| output_ids[len(input_ids) :] |
| for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) |
| ] |
|
|
| |
| response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] |
| return response |
|
|
|
|
| |
| demo = gr.ChatInterface( |
| fn=generate_response, |
| title="My Qwen 2.5 Chatbot", |
| description="Running entirely for free using Hugging Face ZeroGPU.", |
| ) |
|
|
| |
| if __name__ == "__main__": |
| demo.launch() |
|
|