| import gradio as gr |
| from transformers import AutoTokenizer, AutoModelForCausalLM |
| import torch |
| pip install accelerate |
|
|
| tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen1.5-1.8B", trust_remote_code=True) |
| model = AutoModelForCausalLM.from_pretrained("Qwen/Qwen1.5-1.8B", device_map="auto", trust_remote_code=True) |
|
|
| def generate_text(prompt): |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) |
| outputs = model.generate(**inputs, max_new_tokens=100) |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
| gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Qwen Text Generator").launch() |
|
|
|
|