mattibuzzo13 commited on
Commit
6f03ad8
·
verified ·
1 Parent(s): 629d0a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +52 -36
app.py CHANGED
@@ -7,12 +7,17 @@ import re
7
  import math
8
  import json
9
  import unicodedata
 
 
 
10
  from langchain_openai import ChatOpenAI
11
- from langgraph.prebuilt import create_react_agent
12
- from langchain_core.messages import SystemMessage
13
  from langchain_core.tools import tool
14
  from langchain_community.tools import DuckDuckGoSearchRun
15
  from langchain_community.utilities import WikipediaAPIWrapper
 
 
 
 
16
 
17
  # (Keep Constants as is)
18
  # --- Constants ---
@@ -22,8 +27,7 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
22
  def web_search(query: str) -> str:
23
  """Search the web using DuckDuckGo. Use for current events, facts, and general knowledge."""
24
  try:
25
- search = DuckDuckGoSearchRun()
26
- return search.run(query)
27
  except Exception as e:
28
  return f"Search error: {e}"
29
 
@@ -42,17 +46,15 @@ def wikipedia_search(query: str) -> str:
42
  def python_repl(code: str) -> str:
43
  """
44
  Execute Python code for math calculations, data processing, logic.
45
- Always print() the final result.
46
  Example: print(2 + 2)
47
  """
48
  import io, sys
49
  old_stdout = sys.stdout
50
  sys.stdout = io.StringIO()
51
  try:
52
- exec(code, {
53
- "math": math, "json": json, "re": re,
54
- "unicodedata": unicodedata, "__builtins__": __builtins__
55
- })
56
  output = sys.stdout.getvalue()
57
  return output.strip() if output.strip() else "Code executed with no output. Use print()."
58
  except Exception as e:
@@ -68,8 +70,7 @@ def calculator(expression: str) -> str:
68
  Examples: '2 + 2', '100 * 1.07 ** 5', 'math.sqrt(144)'
69
  """
70
  try:
71
- result = eval(expression, {"math": math, "__builtins__": {}})
72
- return str(result)
73
  except Exception as e:
74
  return f"Calculation error: {e}"
75
 
@@ -77,27 +78,26 @@ def calculator(expression: str) -> str:
77
  @tool
78
  def get_task_file(task_id: str) -> str:
79
  """
80
- Fetch the file associated with a GAIA task by its task_id.
81
  Use this when the question mentions an attached file or document.
82
  """
83
  try:
 
84
  url = f"https://agents-course-unit4-scoring.hf.space/files/{task_id}"
85
- response = requests.get(url, timeout=15)
86
  if response.status_code == 200:
87
- content_type = response.headers.get("Content-Type", "")
88
- if "text" in content_type or "json" in content_type:
89
  return response.text[:5000]
90
- elif "image" in content_type:
91
- return f"[Image file - content-type: {content_type}]"
92
- elif "audio" in content_type:
93
- return f"[Audio file - content-type: {content_type}]"
94
- else:
95
- return f"[File attached: {content_type}]"
96
  return f"No file found for task {task_id}"
97
  except Exception as e:
98
  return f"Error fetching task file: {e}"
99
 
100
 
 
 
 
101
  SYSTEM_PROMPT = """You are a precise expert AI solving GAIA benchmark questions.
102
 
103
  ## Answer Format (CRITICAL)
@@ -122,16 +122,8 @@ FINAL ANSWER: <your answer here>
122
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
123
  class BasicAgent:
124
  def __init__(self):
125
- print("Initializing LangGraph ReAct Agent with Llama 3.3 70B...")
126
 
127
- llm = ChatOpenAI(
128
- model="Qwen/Qwen2.5-72B-Instruct", # supporta tool calling
129
- base_url="https://router.huggingface.co/v1",
130
- api_key=os.getenv("HF_TOKEN"),
131
- temperature=0.1,
132
- max_tokens=1024,
133
- )
134
-
135
  tools = [
136
  web_search,
137
  wikipedia_search,
@@ -140,20 +132,44 @@ class BasicAgent:
140
  get_task_file,
141
  ]
142
 
143
- self.agent = create_react_agent(
144
- model=llm,
145
- tools=tools,
146
- prompt=SystemMessage(content=SYSTEM_PROMPT),
 
 
 
 
147
  )
 
 
 
 
 
 
 
 
 
 
 
148
  print("Agent ready.")
149
 
 
 
 
 
 
 
150
  def __call__(self, question: str) -> str:
151
  print(f"Agent received question (first 50 chars): {question[:50]}...")
152
  try:
153
- result = self.agent.invoke({"messages": [("user", question)]})
 
 
154
  last_message = result["messages"][-1].content
 
155
 
156
- # Estrai FINAL ANSWER se presente
157
  match = re.search(r"FINAL ANSWER:\s*(.+?)(?:\n|$)", last_message, re.IGNORECASE)
158
  answer = match.group(1).strip() if match else last_message.strip().split("\n")[-1]
159
 
 
7
  import math
8
  import json
9
  import unicodedata
10
+ from typing import TypedDict, Annotated
11
+
12
+ from langchain_core.messages import AnyMessage, HumanMessage, SystemMessage
13
  from langchain_openai import ChatOpenAI
 
 
14
  from langchain_core.tools import tool
15
  from langchain_community.tools import DuckDuckGoSearchRun
16
  from langchain_community.utilities import WikipediaAPIWrapper
17
+
18
+ from langgraph.graph import START, StateGraph
19
+ from langgraph.graph.message import add_messages
20
+ from langgraph.prebuilt import ToolNode, tools_condition
21
 
22
  # (Keep Constants as is)
23
  # --- Constants ---
 
27
  def web_search(query: str) -> str:
28
  """Search the web using DuckDuckGo. Use for current events, facts, and general knowledge."""
29
  try:
30
+ return DuckDuckGoSearchRun().run(query)
 
31
  except Exception as e:
32
  return f"Search error: {e}"
33
 
 
46
  def python_repl(code: str) -> str:
47
  """
48
  Execute Python code for math calculations, data processing, logic.
49
+ Always use print() to show the result.
50
  Example: print(2 + 2)
51
  """
52
  import io, sys
53
  old_stdout = sys.stdout
54
  sys.stdout = io.StringIO()
55
  try:
56
+ exec(code, {"math": math, "json": json, "re": re,
57
+ "unicodedata": unicodedata, "__builtins__": __builtins__})
 
 
58
  output = sys.stdout.getvalue()
59
  return output.strip() if output.strip() else "Code executed with no output. Use print()."
60
  except Exception as e:
 
70
  Examples: '2 + 2', '100 * 1.07 ** 5', 'math.sqrt(144)'
71
  """
72
  try:
73
+ return str(eval(expression, {"math": math, "__builtins__": {}}))
 
74
  except Exception as e:
75
  return f"Calculation error: {e}"
76
 
 
78
  @tool
79
  def get_task_file(task_id: str) -> str:
80
  """
81
+ Fetch the file attached to a GAIA task by its task_id.
82
  Use this when the question mentions an attached file or document.
83
  """
84
  try:
85
+ import requests as req
86
  url = f"https://agents-course-unit4-scoring.hf.space/files/{task_id}"
87
+ response = req.get(url, timeout=15)
88
  if response.status_code == 200:
89
+ ct = response.headers.get("Content-Type", "")
90
+ if "text" in ct or "json" in ct:
91
  return response.text[:5000]
92
+ return f"[Binary file attached - content-type: {ct}]"
 
 
 
 
 
93
  return f"No file found for task {task_id}"
94
  except Exception as e:
95
  return f"Error fetching task file: {e}"
96
 
97
 
98
+ class AgentState(TypedDict):
99
+ messages: Annotated[list[AnyMessage], add_messages]
100
+
101
  SYSTEM_PROMPT = """You are a precise expert AI solving GAIA benchmark questions.
102
 
103
  ## Answer Format (CRITICAL)
 
122
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
123
  class BasicAgent:
124
  def __init__(self):
125
+ print("Initializing LangGraph Agent (HF course pattern)...")
126
 
 
 
 
 
 
 
 
 
127
  tools = [
128
  web_search,
129
  wikipedia_search,
 
132
  get_task_file,
133
  ]
134
 
135
+ # Pattern esatto dal notebook del corso:
136
+ # llm.bind_tools() + StateGraph costruito manualmente
137
+ llm = ChatOpenAI(
138
+ model="Qwen/Qwen2.5-72B-Instruct",
139
+ base_url="https://router.huggingface.co/v1",
140
+ api_key=os.getenv("HF_TOKEN"),
141
+ temperature=0.1,
142
+ max_tokens=1024,
143
  )
144
+ self.llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)
145
+
146
+ # Grafo ReAct: assistant → tools → assistant (loop)
147
+ builder = StateGraph(AgentState)
148
+ builder.add_node("assistant", self._assistant_node)
149
+ builder.add_node("tools", ToolNode(tools))
150
+ builder.add_edge(START, "assistant")
151
+ builder.add_conditional_edges("assistant", tools_condition)
152
+ builder.add_edge("tools", "assistant")
153
+ self.graph = builder.compile()
154
+
155
  print("Agent ready.")
156
 
157
+ def _assistant_node(self, state: AgentState):
158
+ """Chiama il LLM con system prompt + history dei messaggi."""
159
+ sys_msg = SystemMessage(content=SYSTEM_PROMPT)
160
+ response = self.llm_with_tools.invoke([sys_msg] + state["messages"])
161
+ return {"messages": [response]}
162
+
163
  def __call__(self, question: str) -> str:
164
  print(f"Agent received question (first 50 chars): {question[:50]}...")
165
  try:
166
+ result = self.graph.invoke({
167
+ "messages": [HumanMessage(content=question)]
168
+ })
169
  last_message = result["messages"][-1].content
170
+ print(f"Agent raw output: {last_message[:200]}...")
171
 
172
+ # Estrai FINAL ANSWER se presente, altrimenti ultima riga
173
  match = re.search(r"FINAL ANSWER:\s*(.+?)(?:\n|$)", last_message, re.IGNORECASE)
174
  answer = match.group(1).strip() if match else last_message.strip().split("\n")[-1]
175