| """LangGraph Agent""" |
| import os |
|
|
| from langgraph.graph import START, StateGraph, MessagesState |
| from langgraph.prebuilt import tools_condition |
| from langgraph.prebuilt import ToolNode |
| from langchain_google_genai import ChatGoogleGenerativeAI |
| from langchain_groq import ChatGroq |
| from langchain_huggingface import ChatHuggingFace, HuggingFaceEndpoint, HuggingFaceEmbeddings |
| from langchain_community.tools.tavily_search import TavilySearchResults |
| |
| |
| from langchain_community.vectorstores import SupabaseVectorStore |
| from langchain_core.messages import SystemMessage, HumanMessage |
| from langchain_core.tools import tool |
| |
| |
| from langchain_core.messages import AIMessage |
| from difflib import SequenceMatcher |
| import time |
|
|
| from tools import add, subtract, multiply, divide, modulus, wiki_search, web_search, arvix_search, search_metadata |
|
|
| |
| with open("system_prompt.txt", "r", encoding="utf-8") as f: |
| system_prompt = f.read() |
|
|
| |
| sys_msg = SystemMessage(content=system_prompt) |
|
|
| tools = [ |
| multiply, |
| add, |
| subtract, |
| divide, |
| modulus, |
| wiki_search, |
| web_search, |
| arvix_search, |
| search_metadata, |
| ] |
|
|
| |
| def build_graph(provider: str = "google"): |
| """Build the graph""" |
| |
| if provider == "google": |
| |
| llm = ChatGoogleGenerativeAI(model="gemini-2.5-flash-preview-05-20", temperature=1) |
| elif provider == "groq": |
| |
| llm = ChatGroq(model="qwen-qwq-32b", temperature=0) |
| elif provider == "huggingface": |
| |
| llm = ChatHuggingFace( |
| llm=HuggingFaceEndpoint( |
| url="https://api-inference.huggingface.co/models/Meta-DeepLearning/llama-2-7b-chat-hf", |
| temperature=0, |
| ), |
| ) |
| else: |
| raise ValueError("Invalid provider. Choose 'google', 'groq' or 'huggingface'.") |
|
|
| |
| llm_with_tools = llm.bind_tools(tools) |
|
|
| |
| def assistant(state: MessagesState): |
| """Assistant node""" |
| messages = state["messages"] |
| |
| if len(messages) > 1 and "No matching results found in metadata" not in messages[-1].content: |
| |
| new_messages = [ |
| SystemMessage(content="You are a helpful assistant. Use the following retrieved information to answer the question. If the information is relevant, use it directly. If not, use your own knowledge."), |
| HumanMessage(content=f"Question: {messages[-2].content}\n\nRetrieved Information:\n{messages[-1].content}") |
| ] |
| time.sleep(2) |
| return {"messages": [llm_with_tools.invoke(new_messages)]} |
| else: |
| |
| time.sleep(2) |
| return {"messages": [llm_with_tools.invoke(messages)]} |
| |
| def retriever(state: MessagesState): |
| query = state["messages"][-1].content |
| result = search_metadata(query) |
| return {"messages": [AIMessage(content=result)]} |
|
|
| builder = StateGraph(MessagesState) |
| builder.add_node("retriever", retriever) |
| builder.add_node("assistant", assistant) |
| builder.add_node("tools", ToolNode(tools)) |
| |
| |
| builder.set_entry_point("retriever") |
| |
| |
| builder.add_edge("retriever", "assistant") |
| |
| |
| builder.add_conditional_edges( |
| "assistant", |
| tools_condition, |
| ) |
| builder.add_edge("tools", "assistant") |
|
|
| |
| return builder.compile() |