INACHI-CORE / app.py
minzo456's picture
Update app.py
fe09faf verified
import gradio as gr
import torch
from transformers import pipeline
# Model: Gemma 2B for efficiency
model_id = "google/gemma-2-2b-it"
# Initialize Pipeline
pipe = pipeline(
"text-generation",
model=model_id,
model_kwargs={"torch_dtype": torch.bfloat16},
device_map="auto",
)
def specialist_respond(message, history):
system_prompt = "You are MINZO-PRIME, a highly advanced AI developed under the INACHI AI project. Be technical and precise."
# Format message history
messages = [{"role": "system", "content": system_prompt}]
for val in history:
if val[0]: messages.append({"role": "user", "content": val[0]})
if val[1]: messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
# Generate
prompt = pipe.tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
outputs = pipe(prompt, max_new_tokens=512, do_sample=True, temperature=0.7)
return outputs[0]["generated_text"][len(prompt):]
# πŸ”± UI Setup
demo = gr.ChatInterface(
fn=specialist_respond,
title="INACHI-CORE V1.0",
description="Authorized Access Only: MINZO-PRIME",
theme="soft"
)
if __name__ == "__main__":
demo.launch()