# https://huggingface.co/docs/smolagents/tutorials/inspect_runs import os from langfuse import Langfuse import smolagents.local_python_executor as lpe from concurrent.futures import ThreadPoolExecutor # Fix for compatibility with smolagents >= 1.22.0 and openinference-instrumentation-smolagents if not hasattr(lpe, "ThreadPoolExecutor"): lpe.ThreadPoolExecutor = ThreadPoolExecutor from smolagents import ( CodeAgent, ToolCallingAgent, WebSearchTool, VisitWebpageTool, InferenceClientModel, ) # Initialize Langfuse langfuse = Langfuse() # Verify connection if langfuse.auth_check(): print("āœ“ Langfuse connection successful!") else: print("āœ— Langfuse connection failed. Check your API key and host.") model = InferenceClientModel("deepseek-ai/DeepSeek-V4-Flash") search_agent = CodeAgent( tools=[WebSearchTool(), VisitWebpageTool()], model=model, name="search_agent", description="This is an agent that can do web search.", ) manager_agent = CodeAgent( tools=[], model=model, managed_agents=[search_agent], ) if __name__ == "__main__": result = manager_agent.run( "How can Langfuse be used to monitor and improve the reasoning and decision-making of smolagents when they execute multi-step tasks, like dynamically adjusting a recipe based on user feedback or available ingredients?" ) print("\nAgent Result:") print(result) print("\nāœ“ Done!")