| import gradio as gr |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
|
|
| |
| model_name = "khaled123/chess" |
| tokenizer = AutoTokenizer.from_pretrained(model_name) |
| model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
| |
| def generate_text(prompt): |
| inputs = tokenizer(prompt, return_tensors="pt") |
| outputs = model.generate(inputs["input_ids"], max_length=50) |
| generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
| return generated_text |
|
|
| |
| iface = gr.Interface( |
| fn=generate_text, |
| inputs="text", |
| outputs="text", |
| title="Chess Model based on LLaMA 2", |
| description="Type a prompt and the model will generate text based on it." |
| ) |
|
|
| |
| iface.launch() |