| import gradio as gr |
| from langchain_ollama import ChatOllama |
| from langchain_core.tools import tool |
| from langgraph.prebuilt import ToolNode, tools_condition |
| from langgraph.graph import StateGraph, START, END |
| from langgraph.graph.message import MessagesState |
| from langgraph.checkpoint.memory import MemorySaver |
| from langchain_core.messages import ( |
| convert_to_openai_messages, |
| SystemMessage, |
| HumanMessage, |
| ) |
| from langchain_community.tools.tavily_search import TavilySearchResults |
| from langchain_community.utilities import OpenWeatherMapAPIWrapper |
|
|
| from dotenv import load_dotenv |
| load_dotenv() |
|
|
| @tool |
| def web_search_tool(query: str) -> str: |
| """Search the web for information.""" |
| tavily_search = TavilySearchResults(max_results=3) |
| search_docs = tavily_search.invoke(query) |
| return search_docs |
|
|
| @tool |
| def weather_tool(city: str) -> str: |
| """Get the weather in a given city. |
| Args: |
| city: The city to get the weather for. |
| Example: "London,GB", "New York,US" |
| """ |
|
|
| weather_api = OpenWeatherMapAPIWrapper() |
| weather_info = weather_api.run(city) |
| return weather_info |
|
|
| def create_conversation_graph(): |
| """ |
| Create a conversational graph with a memory saver. |
| """ |
| memory = MemorySaver() |
| tools = [weather_tool, web_search_tool] |
|
|
| llm = ChatOllama(model="qwen2.5:3b", temperature=0.5) |
| llm_with_tools = llm.bind_tools(tools) |
|
|
| sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic on a set of inputs.") |
|
|
| def assistant(state: MessagesState) -> MessagesState: |
| return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]} |
|
|
| builder = StateGraph(MessagesState) |
|
|
| builder.add_node("assistant", assistant) |
| builder.add_node("tools", ToolNode(tools)) |
|
|
| builder.add_edge(START, "assistant") |
| builder.add_conditional_edges("assistant", tools_condition) |
| builder.add_edge("tools", "assistant") |
|
|
| graph = builder.compile(checkpointer=memory) |
| return graph |
|
|
|
|
| def create_chat_interface(): |
| """ |
| Create and configure the chat interface with the conversation graph. |
| """ |
| graph = create_conversation_graph() |
|
|
| |
| thread_id = "123" |
| config = {"configurable": {"thread_id": thread_id}} |
|
|
| def chat_with_assistant(message, history): |
| """ |
| Chat with the assistant using the conversational graph. |
| """ |
| |
| messages_state = MessagesState(messages=[HumanMessage(content=message)]) |
|
|
| |
| response = graph.invoke(messages_state, config) |
|
|
| for msg in response["messages"]: |
| msg.pretty_print() |
|
|
| |
| ai_message = response["messages"][-1] |
|
|
| |
| return convert_to_openai_messages(ai_message) |
|
|
|
|
| demo = gr.ChatInterface( |
| fn=chat_with_assistant, |
| type="messages", |
| title="Conversational Bot", |
| description="Ask anything you want", |
| examples=["Hello", "What is your name?", "What is the weather in Tokyo?"], |
| ) |
| return demo |
|
|
| if __name__ == "__main__": |
| demo = create_chat_interface() |
| demo.launch() |
|
|