Shrijanagain's picture
Update app.py
1d2910a verified
raw
history blame
1.68 kB
import gradio as gr
import os
from smolagents import CodeAgent, InferenceClientModel
from smolagents.mcp_client import MCPClient
# Initialize the model using Hugging Face's serverless inference
# Ensure you have an HF_TOKEN set in your Space's Secrets
model = InferenceClientModel("meta-llama/Llama-3.2-3B-Instruct")
mcp_client = None
try:
# Initialize the MCP Client pointing to the Gradio SSE endpoint
mcp_client = MCPClient(
{"url": "https://abidlabs-mcp-tools2.hf.space/gradio_api/mcp/sse"}
)
# Retrieve tools from the MCP server
tools = mcp_client.get_tools()
# Create the agent with the retrieved tools
agent = CodeAgent(tools=[*tools], model=model, add_base_tools=True)
def call_agent(message, history):
try:
# Run the agent and return the final answer as a string
response = agent.run(message)
return str(response)
except Exception as error:
return f"Agent Error: {str(error)}"
# Set up the Gradio Chat Interface
demo = gr.ChatInterface(
fn=call_agent,
type="messages",
examples=["What is the prime factorization of 2026?"],
title="SKT AI: Agent with MCP Tools",
description="This agent uses the Model Context Protocol (MCP) to access external tools.",
)
except Exception as e:
print(f"Initialization Error: {e}")
# Fallback demo in case of connection issues
demo = gr.Interface(fn=lambda x: f"Initialization failed: {e}", inputs="text", outputs="text")
if __name__ == "__main__":
try:
demo.launch()
finally:
if mcp_client is not None:
mcp_client.stop()