| import gradio as gr |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| import torch |
|
|
| MODEL_ID = "Qwen/Qwen2.5-Coder-1.5B-Instruct" |
| device = "cuda" if torch.cuda.is_available() else "cpu" |
|
|
| tokenizer = AutoTokenizer.from_pretrained(MODEL_ID) |
| model = AutoModelForCausalLM.from_pretrained( |
| MODEL_ID, |
| torch_dtype=torch.float16 if device == "cuda" else torch.float32 |
| ) |
| model.to(device) |
|
|
| def generate(prompt): |
| inputs = tokenizer(prompt, return_tensors="pt").to(device) |
| outputs = model.generate(**inputs, max_new_tokens=256, do_sample=True, temperature=0.2) |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
| demo = gr.Interface( |
| fn=generate, |
| inputs=gr.Textbox(lines=2, placeholder="Enter your prompt here..."), |
| outputs=gr.Textbox(lines=2, max_lines=30) |
| ) |
|
|
| demo.launch() |
|
|