David
Implementing agent tools and logic
edf3100
raw
history blame
1.71 kB
from llama_index.llms.google_genai import GoogleGenAI
from llama_index.tools.arxiv import ArxivToolSpec
from llama_index.tools.wikipedia import WikipediaToolSpec
from llama_index.tools.duckduckgo import DuckDuckGoSearchResultsToolSpec
from llama_index.core.tools import FunctionTool
from llama_index.core.agent.workflow import AgentWorkflow
from tools import interpret_python_math_code
from gaia_system_prompt import GAIA_SYSTEM_PROMPT
import os
GEMINI_API_KEY = os.getenv("GEMINI_TOKEN")
GEMINI_MODEL_NAME = "gemini-2.5-flash-preview-04-17"
class FinalAgent:
def __init__(self):
# LLM Initialization
self.llm = GoogleGenAI(model=GEMINI_MODEL_NAME, api_key=GEMINI_API_KEY)
# Tool Initialization
self.tools = [
FunctionTool.from_defaults(
func=interpret_python_math_code,
name="InterpretPythonMathCode",
description="Interprets Python code for mathematical expressions."
),
DuckDuckGoSearchResultsToolSpec(),
WikipediaToolSpec(),
ArxivToolSpec()
]
# Agent Workflow Initialization
self.agent = AgentWorkflow(
llm=self.llm,
tools=self.tools,
system_prompt=GAIA_SYSTEM_PROMPT
)
print("FinalAgent initialized.")
def __call__(self, question: str) -> str:
# Example
print(f"Agent received question (first 50 chars): {question[:50]}...")
fixed_answer = "This is a default answer."
print(f"Agent returning fixed answer: {fixed_answer}")
# Implement agent logic here
response = self.agent.run(question)
return response