zsolnai commited on
Commit
244a095
·
1 Parent(s): f95b7fe

Fix claude v3

Browse files
Files changed (4) hide show
  1. __pycache__/app.cpython-312.pyc +0 -0
  2. app.py +22 -25
  3. main.py +6 -0
  4. pyproject.toml +7 -0
__pycache__/app.cpython-312.pyc ADDED
Binary file (7.6 kB). View file
 
app.py CHANGED
@@ -36,7 +36,7 @@ tts_model = TTS(model_name=TTS_MODEL_NAME, progress_bar=False)
36
 
37
  def chat_with_bot(message, history):
38
  """
39
- Chat with your GGUF model.
40
  """
41
  # Ensure history is a list
42
  if history is None:
@@ -46,38 +46,35 @@ def chat_with_bot(message, history):
46
  return history, ""
47
 
48
  try:
49
- # Format history for Llama (OpenAI format)
50
- messages = []
51
- # Add system prompt if desired
52
- messages.append(
53
- {"role": "system", "content": "You are a helpful AI assistant."}
 
 
 
 
 
 
 
 
 
 
 
 
54
  )
55
 
56
- # Add conversation history
57
- for msg in history:
58
- messages.append(msg) # msg is already {"role": "...", "content": "..."}
59
-
60
- # Add new user message
61
- messages.append({"role": "user", "content": message})
62
-
63
- # Generate response using your GGUF model
64
- output = llm_client.text_generation(
65
- prompt=message, model=LLM_MODEL, max_new_tokens=256, temperature=0.7
66
- )
67
-
68
- response = output
69
-
70
  # Append to history
71
- history.append({"role": "user", "content": message})
72
- history.append({"role": "assistant", "content": response})
73
 
74
  return history, response
75
 
76
  except Exception as e:
77
  print(f"LLM Error: {e}")
78
- history.append({"role": "user", "content": message})
79
- history.append({"role": "assistant", "content": f"Error: {e}"})
80
- return history, f"Error: {e}"
81
 
82
 
83
  def text_to_speech_from_chat(chat_response):
 
36
 
37
  def chat_with_bot(message, history):
38
  """
39
+ Chat with your model using HuggingFace InferenceClient.
40
  """
41
  # Ensure history is a list
42
  if history is None:
 
46
  return history, ""
47
 
48
  try:
49
+ # Build conversation context from history
50
+ context = ""
51
+ for h in history:
52
+ role = "User" if h.get("role") == "user" else "Assistant"
53
+ context += f"{role}: {h.get('content', '')}\n"
54
+
55
+ # Create prompt with context
56
+ prompt = context + f"User: {message}\nAssistant:"
57
+
58
+ # Generate response using the model
59
+ response = llm_client.text_generation(
60
+ prompt=prompt,
61
+ model=LLM_MODEL,
62
+ max_new_tokens=256,
63
+ temperature=0.7,
64
+ do_sample=True,
65
+ top_p=0.95,
66
  )
67
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
68
  # Append to history
69
+ history.append([message, response])
 
70
 
71
  return history, response
72
 
73
  except Exception as e:
74
  print(f"LLM Error: {e}")
75
+ error_msg = f"Error: {str(e)}"
76
+ history.append([message, error_msg])
77
+ return history, error_msg
78
 
79
 
80
  def text_to_speech_from_chat(chat_response):
main.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ def main():
2
+ print("Hello from lab2!")
3
+
4
+
5
+ if __name__ == "__main__":
6
+ main()
pyproject.toml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "lab2"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ requires-python = ">=3.12"
7
+ dependencies = []