rufatronics commited on
Commit
f265043
·
verified ·
1 Parent(s): 41d13e5

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +53 -0
app.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
+ import torch
4
+
5
+ # Check for GPU and use CPU if not available (your zero-GPU approach)
6
+ device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ print(f"Using device: {device}")
8
+
9
+ # Load model and tokenizer
10
+ model_id = "google/gemma-2-2b-it"
11
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
12
+ model = AutoModelForCausalLM.from_pretrained(
13
+ model_id,
14
+ device_map="auto",
15
+ torch_dtype=torch.bfloat16 # Uses less memory
16
+ )
17
+
18
+ def chat_response(message, history):
19
+ # Format the prompt according to Gemma 2's template
20
+ prompt = f"<start_of_turn>user\n{message}<end_of_turn>\n<start_of_turn>model"
21
+
22
+ # Tokenize input
23
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
24
+
25
+ # Generate response
26
+ with torch.no_grad():
27
+ outputs = model.generate(
28
+ **inputs,
29
+ max_new_tokens=256,
30
+ temperature=0.7,
31
+ top_p=0.9,
32
+ do_sample=True,
33
+ pad_token_id=tokenizer.eos_token_id
34
+ )
35
+
36
+ # Decode the response
37
+ response = tokenizer.decode(outputs[0], skip_special_tokens=True)
38
+
39
+ # Extract only the model's response
40
+ if "<start_of_turn>model" in response:
41
+ return response.split("<start_of_turn>model")[-1].strip()
42
+ return response
43
+
44
+ # Create the chat interface
45
+ demo = gr.ChatInterface(
46
+ fn=chat_response,
47
+ title="Hausa AI Assistant",
48
+ description="A simple AI assistant powered by Gemma 2 2B"
49
+ )
50
+
51
+ # Launch the application
52
+ if __name__ == "__main__":
53
+ demo.launch(server_name="0.0.0.0", server_port=7860)