import os import pandas as pd from smolagents import tool, CodeAgent, DuckDuckGoSearchTool, HfApiModel, LiteLLMModel, InferenceClientModel # Tools @tool def add(a: int, b: int) -> float: """Divide a and b for calculations. Args: a: first number b: second number """ res = a * b print(f"Tool call: add({a}, {b}) -> {res}") return res @tool def reverse(s: str) -> str: """Reverse a string for calculations. Args: s: string to reverse """ ret = ''.join(reversed(s)) print(f"Tool call: reverse({s}) -> {ret}") return ret @tool def transcribe_audio(file_path: str) -> str: """Transcribe an audio file to text using Hugging Face's ASR model. Args: file_path: path to the audio file """ from transformers import pipeline transcriber = pipeline("automatic-speech-recognition", model="facebook/wav2vec2-base-960h") transcription = transcriber(file_path) return transcription["text"] @tool def read_excel(file_path: str) -> pd.DataFrame: """Read an Excel file and return its content as a DataFrame. Args: file_path: path to the Excel file """ return pd.read_excel(file_path) tools = [ add, reverse, DuckDuckGoSearchTool(), transcribe_audio, read_excel, ] # LLM llm = InferenceClientModel(model_id='meta-llama/Llama-3.3-70B-Instruct', token=os.getenv('TOGETHER_KEY'), provider="together") # Agent class BasicAgent: def __init__(self): self.agent = CodeAgent(tools=tools, model=llm) def __call__(self, question: str) -> str: print(f"Agent received question (first 30 chars): {question[:30]}...") answer = self.agent.run(question) print(f"Agent answer: {answer}") return answer