TahaFawzyElshrif commited on
Commit
5c1c37e
·
1 Parent(s): 43544d8

cleaned code

Browse files
Files changed (2) hide show
  1. Server.py +1 -1
  2. agent/agent_graph/Graph_Nodes.py +1 -22
Server.py CHANGED
@@ -66,7 +66,7 @@ def save_send_email(call,user_email,user_name):
66
 
67
  #path_pdf ="/content/drive/MyDrive/study/Projects/CodeBuddyAI/tmp.pdf"
68
  PDF_generator_Node(call['answer'],title,path_pdf)
69
- EMAIL_sender_Node(user_email,email_txt,title,path_pdf)
70
  import os
71
  os.remove(path_pdf)
72
  print("Done")
 
66
 
67
  #path_pdf ="/content/drive/MyDrive/study/Projects/CodeBuddyAI/tmp.pdf"
68
  PDF_generator_Node(call['answer'],title,path_pdf)
69
+ #EMAIL_sender_Node(user_email,email_txt,title,path_pdf)
70
  import os
71
  os.remove(path_pdf)
72
  print("Done")
agent/agent_graph/Graph_Nodes.py CHANGED
@@ -10,7 +10,6 @@ from agent.agent_graph.Graph_Utils import get_egp_to_usd
10
  from agent.rag.rag import *
11
 
12
  def answer_question(state:ProblemState):
13
- print("Answering...")
14
  question = state["question"]
15
 
16
 
@@ -25,15 +24,11 @@ def answer_question(state:ProblemState):
25
  guide_prompt = Tasks_prompts.RAG.value
26
 
27
 
28
- answer_prompt = ((guide_prompt + "طلب المستخدم:\n" + question + "\nاهم معلومات المستخدم لاستخدامها ف الدلالة (بالنسبة للسعر هو نفس السعر لكن بالدولار فدائما ركز على السعر بالدولار\n)"+ str(state) + Route_prompts.FINALIZER_PROMPT.value))
29
 
30
- print("ANSWER-PROMPT",answer_prompt)
31
 
32
  state["answer"] = get_llm_answer(model_llm=state["llm"],messages=state["memory"] + [HumanMessage(content=(guide_prompt + "طلب المستخدم:\n" + question + "\nاهم معلومات المستخدم لاستخدامها ف الدلالة (بالنسبة للسعر هو نفس السعر لكن بالدولار فدائما ركز على السعر بالدولار\n)"+ str(state) + Route_prompts.FINALIZER_PROMPT.value))])
33
 
34
- print("Answer ",state["answer"])
35
- print("STATE ",state)
36
- print("----------------------------")
37
  return state
38
 
39
 
@@ -73,11 +68,9 @@ def update_context(state:ProblemState):
73
  # Make the prompt
74
  prompt_llm_new_info = Route_prompts.Context_UPDATOR.value + "\n <KEYS> \n" +str(keys_modifiable) +"\n </KEYS> <Text>"+state["question"]+"</Text>"
75
 
76
- print("UPDATE - PROMPT",prompt_llm_new_info)
77
 
78
 
79
  llm_new_info = get_llm_answer(model_llm=state["llm"],messages = [HumanMessage(prompt_llm_new_info)])
80
- print("ANSWER ", llm_new_info)
81
 
82
 
83
  # Save and Process the returned json to prevent hallucination
@@ -102,32 +95,24 @@ def update_context(state:ProblemState):
102
  "رجع فقط BOOL (True/False)"
103
  )
104
 
105
- print("UPDATE 2 - PROMPT ",check_finalized_prompt)
106
  check_finalized = get_llm_answer(model_llm=state["llm"], messages = [HumanMessage(
107
  check_finalized_prompt
108
  )])
109
- print("ANSWER ", check_finalized)
110
  state['all_ok'] = check_finalized.strip().lower() == "true" # If wrong parsed it's false
111
- print("Context updated")
112
 
113
 
114
 
115
  except Exception as e:
116
  print("Context was not updated due to error : ",e)
117
 
118
- print("STATE ",state)
119
- print("----------------------------")
120
 
121
  return state
122
 
123
 
124
 
125
  def convertPriceToDollar(state:ProblemState):
126
- print("Converting price to dollar...")
127
  if "price" in state.keys():
128
  state["price"] = get_egp_to_usd(state["price"])
129
- print("STATE ",state)
130
- print("----------------------------")
131
 
132
  return state
133
 
@@ -142,22 +127,16 @@ def step(state:ProblemState):
142
  for i in task_steps[state.get("question_type")]:
143
  if i not in state.keys():
144
  next_topic = i
145
- print("Next topic ",next_topic)
146
  break
147
  # Only after finishing the to do list of the question type we can ask for all_ok to confirm
148
  if (not next_topic) and ("all_ok" not in state.keys() or (state["all_ok"]==False)) and "question_type" in state.keys():
149
 
150
  next_topic = "all_ok"
151
- print("Next topic ",next_topic)
152
 
153
 
154
 
155
  step_prompt = (System_prompts.STATE_DESCRIBE.value + f"<order>{next_topic} </order> <state>{state}</state>" + Route_prompts.FINALIZER_PROMPT_STEP.value)
156
- print("STEP PROMPT",step_prompt)
157
  state['answer'] = get_llm_answer(model_llm=state["llm"],messages = [HumanMessage(step_prompt)])
158
- print("ANSWER ", state['answer'])
159
- print("STATE ",state)
160
- print("----------------------------")
161
 
162
  return state
163
 
 
10
  from agent.rag.rag import *
11
 
12
  def answer_question(state:ProblemState):
 
13
  question = state["question"]
14
 
15
 
 
24
  guide_prompt = Tasks_prompts.RAG.value
25
 
26
 
 
27
 
 
28
 
29
  state["answer"] = get_llm_answer(model_llm=state["llm"],messages=state["memory"] + [HumanMessage(content=(guide_prompt + "طلب المستخدم:\n" + question + "\nاهم معلومات المستخدم لاستخدامها ف الدلالة (بالنسبة للسعر هو نفس السعر لكن بالدولار فدائما ركز على السعر بالدولار\n)"+ str(state) + Route_prompts.FINALIZER_PROMPT.value))])
30
 
31
+
 
 
32
  return state
33
 
34
 
 
68
  # Make the prompt
69
  prompt_llm_new_info = Route_prompts.Context_UPDATOR.value + "\n <KEYS> \n" +str(keys_modifiable) +"\n </KEYS> <Text>"+state["question"]+"</Text>"
70
 
 
71
 
72
 
73
  llm_new_info = get_llm_answer(model_llm=state["llm"],messages = [HumanMessage(prompt_llm_new_info)])
 
74
 
75
 
76
  # Save and Process the returned json to prevent hallucination
 
95
  "رجع فقط BOOL (True/False)"
96
  )
97
 
 
98
  check_finalized = get_llm_answer(model_llm=state["llm"], messages = [HumanMessage(
99
  check_finalized_prompt
100
  )])
 
101
  state['all_ok'] = check_finalized.strip().lower() == "true" # If wrong parsed it's false
 
102
 
103
 
104
 
105
  except Exception as e:
106
  print("Context was not updated due to error : ",e)
107
 
 
 
108
 
109
  return state
110
 
111
 
112
 
113
  def convertPriceToDollar(state:ProblemState):
 
114
  if "price" in state.keys():
115
  state["price"] = get_egp_to_usd(state["price"])
 
 
116
 
117
  return state
118
 
 
127
  for i in task_steps[state.get("question_type")]:
128
  if i not in state.keys():
129
  next_topic = i
 
130
  break
131
  # Only after finishing the to do list of the question type we can ask for all_ok to confirm
132
  if (not next_topic) and ("all_ok" not in state.keys() or (state["all_ok"]==False)) and "question_type" in state.keys():
133
 
134
  next_topic = "all_ok"
 
135
 
136
 
137
 
138
  step_prompt = (System_prompts.STATE_DESCRIBE.value + f"<order>{next_topic} </order> <state>{state}</state>" + Route_prompts.FINALIZER_PROMPT_STEP.value)
 
139
  state['answer'] = get_llm_answer(model_llm=state["llm"],messages = [HumanMessage(step_prompt)])
 
 
 
140
 
141
  return state
142