akseljoonas HF Staff commited on
Commit
75cb10f
·
1 Parent(s): 706d95d

Resolve merge conflict in tools.py and update search agent config

Browse files
Files changed (1) hide show
  1. run_search_agent.py +34 -18
run_search_agent.py CHANGED
@@ -19,31 +19,31 @@ async def test_search_agent(query: str):
19
  print(f"Testing search agent with query: {query}\n")
20
  print("=" * 60)
21
 
 
 
 
 
22
  # Create event queue for the sub-agent
23
  sub_event_queue = asyncio.Queue()
24
 
25
- # Create search tool router
26
- search_tool_router = await create_search_tool_router()
 
 
 
 
 
 
 
 
 
 
27
 
28
  # Create config
29
  sub_config = Config(
30
  model_name="anthropic/claude-haiku-4-5",
31
  )
32
 
33
- # Create session with custom system prompt
34
- sub_session = Session(
35
- event_queue=sub_event_queue,
36
- config=sub_config,
37
- tool_router=search_tool_router,
38
- context_manager=ContextManager(
39
- tool_specs=search_tool_router.get_tool_specs_for_llm(),
40
- max_context=get_max_tokens(sub_config.model_name),
41
- compact_size=0.1,
42
- untouched_messages=5,
43
- prompt_file_suffix="search_docs_system_prompt.yaml",
44
- ),
45
- )
46
-
47
  # Event listener to show what the sub-agent is doing
48
  async def event_monitor():
49
  while True:
@@ -81,6 +81,21 @@ async def test_search_agent(query: str):
81
 
82
  # Run the sub-agent and event monitor concurrently
83
  async with search_tool_router:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
84
  monitor_task = asyncio.create_task(event_monitor())
85
 
86
  result = await Handlers.run_agent(
@@ -107,9 +122,10 @@ async def main():
107
  # Example queries to test
108
  test_queries = [
109
  # "Explore the TRL documentation structure and find information about DPO trainer",
110
- # "is there a way to get the logs from a served huggingface space",
111
  # "How do I train GLM4.7 with a GRPO training loop with trl with llm judge as a reward model for training on hle?"
112
- "can i stream logs through the api for a served huggingface space",
 
113
  ]
114
 
115
  for i, query in enumerate(test_queries, 1):
 
19
  print(f"Testing search agent with query: {query}\n")
20
  print("=" * 60)
21
 
22
+ # Import at runtime
23
+ from pathlib import Path
24
+ from agent.config import load_config
25
+
26
  # Create event queue for the sub-agent
27
  sub_event_queue = asyncio.Queue()
28
 
29
+ # Load the search agent's own config file with GitHub MCP server
30
+ search_agent_config_path = Path(__file__).parent / "configs" / "_subagent_config_search_agent.json"
31
+ search_agent_config = load_config(search_agent_config_path)
32
+
33
+ # Extract GitHub MCP config from search agent config
34
+ github_mcp_config = None
35
+ if search_agent_config.mcpServers and "github" in search_agent_config.mcpServers:
36
+ github_server = search_agent_config.mcpServers["github"]
37
+ github_mcp_config = {"github": github_server.model_dump()}
38
+
39
+ # Create search tool router with GitHub MCP config
40
+ search_tool_router = await create_search_tool_router(github_mcp_config)
41
 
42
  # Create config
43
  sub_config = Config(
44
  model_name="anthropic/claude-haiku-4-5",
45
  )
46
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
  # Event listener to show what the sub-agent is doing
48
  async def event_monitor():
49
  while True:
 
81
 
82
  # Run the sub-agent and event monitor concurrently
83
  async with search_tool_router:
84
+ # Create session with custom system prompt
85
+ # NOTE: MCP tools are registered during __aenter__, so we must create session AFTER entering the context
86
+ sub_session = Session(
87
+ event_queue=sub_event_queue,
88
+ config=sub_config,
89
+ tool_router=search_tool_router,
90
+ context_manager=ContextManager(
91
+ tool_specs=search_tool_router.get_tool_specs_for_llm(),
92
+ max_context=get_max_tokens(sub_config.model_name),
93
+ compact_size=0.1,
94
+ untouched_messages=5,
95
+ prompt_file_suffix="search_docs_system_prompt.yaml",
96
+ ),
97
+ )
98
+
99
  monitor_task = asyncio.create_task(event_monitor())
100
 
101
  result = await Handlers.run_agent(
 
122
  # Example queries to test
123
  test_queries = [
124
  # "Explore the TRL documentation structure and find information about DPO trainer",
125
+ "is there a way to get the logs from a served huggingface space",
126
  # "How do I train GLM4.7 with a GRPO training loop with trl with llm judge as a reward model for training on hle?"
127
+ # "can i stream logs through the api for a served huggingface space",
128
+ # 'what tools do you have access to?',
129
  ]
130
 
131
  for i, query in enumerate(test_queries, 1):