nazib61 commited on
Commit
a5953c7
·
verified ·
1 Parent(s): 87d4867

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -37
app.py CHANGED
@@ -1,21 +1,34 @@
1
  import os
2
- import gradio as gr
3
  import copy
 
4
  from llama_cpp import Llama
5
- from huggingface_hub import hf_hub_download
6
 
 
 
 
 
 
 
 
 
 
7
 
8
- llm = Llama(
9
- model_path=hf_hub_download(
10
- repo_id=os.environ.get("REPO_ID", "unsloth/NVIDIA-Nemotron-3-Nano-4B-GGUF:Q4_K_M"),# Run inference directly in the terminal:
 
 
 
 
11
 
12
- filename=os.environ.get("MODEL_FILE", "NVIDIA-Nemotron-3-Nano-4B-GGUF:Q4_K_M"),
13
- ),
 
14
  n_ctx=2048,
15
- n_gpu_layers=50, # change n_gpu_layers if you have more or less VRAM
16
  )
17
 
18
-
19
  def generate_text(
20
  message,
21
  history: list[tuple[str, str]],
@@ -25,11 +38,12 @@ def generate_text(
25
  top_p,
26
  ):
27
  temp = ""
 
28
  input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
29
  for interaction in history:
30
- input_prompt = input_prompt + str(interaction[0]) + " [/INST] " + str(interaction[1]) + " </s><s> [INST] "
31
-
32
- input_prompt = input_prompt + str(message) + " [/INST] "
33
 
34
  output = llm(
35
  input_prompt,
@@ -39,51 +53,38 @@ def generate_text(
39
  repeat_penalty=1.1,
40
  max_tokens=max_tokens,
41
  stop=[
42
- "<|prompter|>",
 
43
  "<|endoftext|>",
44
- "<|endoftext|> \n",
45
- "ASSISTANT:",
46
  "USER:",
47
- "SYSTEM:",
48
  ],
49
  stream=True,
50
  )
 
51
  for out in output:
52
  stream = copy.deepcopy(out)
53
  temp += stream["choices"][0]["text"]
54
  yield temp
55
 
56
-
57
  demo = gr.ChatInterface(
58
  generate_text,
59
- title="llama-cpp-python on GPU",
60
- description="Running LLM with https://github.com/abetlen/llama-cpp-python",
61
  examples=[
62
  ['How to setup a human base on Mars? Give short answer.'],
63
  ['Explain theory of relativity to me like I’m 8 years old.'],
64
- ['What is 9,000 * 9,000?'],
65
- ['Write a pun-filled happy birthday message to my friend Alex.'],
66
- ['Justify why a penguin might make a good king of the jungle.']
67
  ],
68
  cache_examples=False,
69
- retry_btn=None,
70
- undo_btn="Delete Previous",
71
- clear_btn="Clear",
72
  additional_inputs=[
73
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
74
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
75
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
76
- gr.Slider(
77
- minimum=0.1,
78
- maximum=1.0,
79
- value=0.95,
80
- step=0.05,
81
- label="Top-p (nucleus sampling)",
82
- ),
83
  ],
84
  )
85
 
86
-
87
  if __name__ == "__main__":
88
- demo.launch()
89
-
 
1
  import os
 
2
  import copy
3
+ import gradio as gr
4
  from llama_cpp import Llama
5
+ from huggingface_hub import hf_hub_download
6
 
7
+ # Fix for Python 3.13: audioop was removed from the standard library.
8
+ # This try/except block handles the missing dependency if audioop-lts is installed.
9
+ try:
10
+ import audioop
11
+ except ImportError:
12
+ try:
13
+ import audioop_lts as audioop
14
+ except ImportError:
15
+ print("Warning: audioop not found. If Gradio fails to load, install 'audioop-lts'.")
16
 
17
+ # 1. Download the model correctly
18
+ # Repo: unsloth/NVIDIA-Nemotron-3-Nano-4B-GGUF
19
+ # File: NVIDIA-Nemotron-3-Nano-4B-Q4_K_M.gguf
20
+ model_path = hf_hub_download(
21
+ repo_id=os.environ.get("REPO_ID", "unsloth/NVIDIA-Nemotron-3-Nano-4B-GGUF"),
22
+ filename=os.environ.get("MODEL_FILE", "NVIDIA-Nemotron-3-Nano-4B-Q4_K_M.gguf"),
23
+ )
24
 
25
+ # 2. Initialize the Llama model
26
+ llm = Llama(
27
+ model_path=model_path,
28
  n_ctx=2048,
29
+ n_gpu_layers=-1, # -1 uses all available GPU layers, change to 0 for CPU only
30
  )
31
 
 
32
  def generate_text(
33
  message,
34
  history: list[tuple[str, str]],
 
38
  top_p,
39
  ):
40
  temp = ""
41
+ # Standard ChatML / Llama format logic
42
  input_prompt = f"[INST] <<SYS>>\n{system_message}\n<</SYS>>\n\n "
43
  for interaction in history:
44
+ input_prompt += f"{interaction[0]} [/INST] {interaction[1]} </s><s> [INST] "
45
+
46
+ input_prompt += f"{message} [/INST] "
47
 
48
  output = llm(
49
  input_prompt,
 
53
  repeat_penalty=1.1,
54
  max_tokens=max_tokens,
55
  stop=[
56
+ "[/INST]",
57
+ "</s>",
58
  "<|endoftext|>",
 
 
59
  "USER:",
60
+ "ASSISTANT:",
61
  ],
62
  stream=True,
63
  )
64
+
65
  for out in output:
66
  stream = copy.deepcopy(out)
67
  temp += stream["choices"][0]["text"]
68
  yield temp
69
 
70
+ # 3. Define the Gradio Interface
71
  demo = gr.ChatInterface(
72
  generate_text,
73
+ title="NVIDIA Nemotron-3 Nano (Llama-cpp)",
74
+ description="Running NVIDIA Nemotron-3-Nano-4B via llama-cpp-python",
75
  examples=[
76
  ['How to setup a human base on Mars? Give short answer.'],
77
  ['Explain theory of relativity to me like I’m 8 years old.'],
78
+ ['What is 9,000 * 9,000?']
 
 
79
  ],
80
  cache_examples=False,
 
 
 
81
  additional_inputs=[
82
+ gr.Textbox(value="You are a helpful and friendly AI assistant.", label="System message"),
83
  gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
84
+ gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.1, label="Temperature"),
85
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p"),
 
 
 
 
 
 
86
  ],
87
  )
88
 
 
89
  if __name__ == "__main__":
90
+ demo.launch(server_name="0.0.0.0", server_port=7860)