| import gradio as gr |
| import torch |
| from transformers import AutoModel, AutoTokenizer |
|
|
| |
| model_name = "Rafay17/Llama3.2_1b_customModle2" |
| model = AutoModel.from_pretrained(model_name) |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
| |
| def generate_output(input_text): |
| |
| inputs = tokenizer(input_text, return_tensors="pt") |
| |
| |
| with torch.no_grad(): |
| outputs = model(**inputs) |
| |
| |
| return outputs.last_hidden_state |
|
|
| |
| iface = gr.Interface( |
| fn=generate_output, |
| inputs=gr.Textbox(label="Input Text"), |
| outputs=gr.Textbox(label="Model Output"), |
| title="Text Processing with Llama Model", |
| description="Enter text to process it with the Llama3.2 model." |
| ) |
|
|
| |
| iface.launch() |
|
|