Arabic250 commited on
Commit
d47a382
·
verified ·
1 Parent(s): 02561db

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +9 -16
app.py CHANGED
@@ -3,25 +3,18 @@ import gradio as gr
3
  from llama_cpp import Llama
4
  import os
5
 
6
- # تحديد المسار المحلي للملف المرفوع في الـ Space
7
  model_path = "gemma-4-medical.gguf"
8
 
9
- try:
10
- if os.path.exists(model_path):
11
- print("--- Loading local GGUF model ---")
12
- llm = Llama(model_path=model_path, n_ctx=2048, n_threads=2)
13
-
14
- def chat(message, history):
15
- output = llm(f"USER: {message}\nASSISTANT: ", max_tokens=512, stop=['USER:'])
16
- return output['choices'][0]['text']
17
-
18
- demo = gr.ChatInterface(fn=chat, title='Gemma 4 Medical (Internal)')
19
- else:
20
- with gr.Blocks() as demo:
21
- gr.Markdown(f'### ⚠️ الملف {model_path} غير موجود في المساحة.')
22
- except Exception as e:
23
  with gr.Blocks() as demo:
24
- gr.Markdown(f'### ❌ خطأ: {e}')
25
 
26
  if __name__ == '__main__':
27
  demo.launch()
 
3
  from llama_cpp import Llama
4
  import os
5
 
6
+ # المسار المحلي داخل المساحة
7
  model_path = "gemma-4-medical.gguf"
8
 
9
+ if os.path.exists(model_path):
10
+ llm = Llama(model_path=model_path, n_ctx=1024, n_threads=2)
11
+ def predict(message, history):
12
+ response = llm(f"USER: {message}\nASSISTANT: ", max_tokens=256, stop=['USER:'])
13
+ return response['choices'][0]['text']
14
+ demo = gr.ChatInterface(fn=predict, title='Gemma 4 Medical - Local Edition')
15
+ else:
 
 
 
 
 
 
 
16
  with gr.Blocks() as demo:
17
+ gr.Markdown(f'# Error: Model file {model_path} not found in Space storage.')
18
 
19
  if __name__ == '__main__':
20
  demo.launch()