from langgraph.graph import StateGraph from langchain_core.tools import Tool from langchain_community.chat_models import ChatOpenAI from langchain_community.tools import TavilySearchResults def build_graph(): graph = StateGraph(dict) # Setup tools search = TavilySearchResults() def search_step(state): question = state.get("question") if not question: raise ValueError("Missing 'question' in state") result = search.run(question) state["search_result"] = result return state def llm_step(state): llm = ChatOpenAI(model="gpt-3.5-turbo", temperature=0) question = state.get("question") search_info = state.get("search_result", "") prompt = f""" You are solving a GAIA benchmark question. Here’s the question: {question} Here’s retrieved web info: {search_info} ONLY return the final exact answer (no explanation, no prefix). """ response = llm.invoke(prompt) state["llm_output"] = response.content.strip() return state def formatter_step(state): llm_output = state.get("llm_output") if not llm_output: raise ValueError("Missing 'llm_output' in state") cleaned = llm_output.strip() state["final_answer"] = cleaned return state # Add nodes graph.add_node("search", search_step) graph.add_node("llm", llm_step) graph.add_node("formatter", formatter_step) # Define flow graph.set_entry_point("search") graph.add_edge("search", "llm") graph.add_edge("llm", "formatter") graph.set_finish_point("formatter") return graph