Pranoy Mukherjee commited on
Commit ·
3e631d0
1
Parent(s): a3ecd30
Add multi-agent audit MVP
Browse files- README.md +8 -2
- app/agents/graph.py +26 -13
- app/agents/performance_agent.py +121 -2
- tests/test_graph_progress.py +5 -2
- tests/test_performance_agent.py +52 -0
README.md
CHANGED
|
@@ -4,10 +4,10 @@ AI-powered multi-agent code auditing for GitHub repositories. Paste a public Git
|
|
| 4 |
|
| 5 |
## MVP
|
| 6 |
|
| 7 |
-
SwarmAudit currently runs with a mock-first LLM interface so the demo is not blocked by ROCm, vLLM, or AMD MI300X setup. The
|
| 8 |
|
| 9 |
```text
|
| 10 |
-
GitHub URL -> Crawler -> Chunker -> Security Agent -> Synthesizer -> Report
|
| 11 |
```
|
| 12 |
|
| 13 |
## Quick Start
|
|
@@ -58,6 +58,12 @@ Each finding includes:
|
|
| 58 |
- suggested fix
|
| 59 |
- agent source
|
| 60 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
## Tests
|
| 62 |
|
| 63 |
```bash
|
|
|
|
| 4 |
|
| 5 |
## MVP
|
| 6 |
|
| 7 |
+
SwarmAudit currently runs with a mock-first LLM interface so the demo is not blocked by ROCm, vLLM, or AMD MI300X setup. The current graph is:
|
| 8 |
|
| 9 |
```text
|
| 10 |
+
GitHub URL -> Crawler -> Chunker -> [Security Agent + Performance Agent] -> Synthesizer -> Report
|
| 11 |
```
|
| 12 |
|
| 13 |
## Quick Start
|
|
|
|
| 58 |
- suggested fix
|
| 59 |
- agent source
|
| 60 |
|
| 61 |
+
## Current Agents
|
| 62 |
+
|
| 63 |
+
- Security Agent: flags hardcoded secrets, disabled TLS verification, and dynamic code execution.
|
| 64 |
+
- Performance Agent: flags HTTP calls without timeouts, blocking sleep inside async functions, nested loops, file reads in loops, and synchronous Node.js filesystem calls.
|
| 65 |
+
- Synthesizer Agent: deduplicates findings, sorts by severity, and builds the final report.
|
| 66 |
+
|
| 67 |
## Tests
|
| 68 |
|
| 69 |
```bash
|
app/agents/graph.py
CHANGED
|
@@ -1,8 +1,10 @@
|
|
| 1 |
from collections.abc import AsyncIterator
|
| 2 |
-
from
|
|
|
|
| 3 |
|
| 4 |
from langgraph.graph import END, StateGraph
|
| 5 |
|
|
|
|
| 6 |
from app.agents.security_agent import SecurityAgent
|
| 7 |
from app.agents.synthesizer_agent import SynthesizerAgent
|
| 8 |
from app.config import Settings, get_settings
|
|
@@ -17,8 +19,9 @@ class AuditState(TypedDict, total=False):
|
|
| 17 |
repo: RepoScanResult
|
| 18 |
chunks: list[CodeChunk]
|
| 19 |
security_output: AgentOutput
|
|
|
|
| 20 |
report: AuditReport
|
| 21 |
-
progress: list[str]
|
| 22 |
|
| 23 |
|
| 24 |
class AuditGraph:
|
|
@@ -28,6 +31,7 @@ class AuditGraph:
|
|
| 28 |
self.chunker = Chunker(self.settings)
|
| 29 |
self.llm_client = LLMClient(self.settings)
|
| 30 |
self.security_agent = SecurityAgent(self.llm_client)
|
|
|
|
| 31 |
self.synthesizer = SynthesizerAgent()
|
| 32 |
self.graph = self._build_graph()
|
| 33 |
|
|
@@ -36,11 +40,13 @@ class AuditGraph:
|
|
| 36 |
graph.add_node("crawl", self._crawl)
|
| 37 |
graph.add_node("chunk", self._chunk)
|
| 38 |
graph.add_node("security", self._security)
|
|
|
|
| 39 |
graph.add_node("synthesize", self._synthesize)
|
| 40 |
graph.set_entry_point("crawl")
|
| 41 |
graph.add_edge("crawl", "chunk")
|
| 42 |
graph.add_edge("chunk", "security")
|
| 43 |
-
graph.add_edge("
|
|
|
|
| 44 |
graph.add_edge("synthesize", END)
|
| 45 |
return graph.compile()
|
| 46 |
|
|
@@ -63,8 +69,12 @@ class AuditGraph:
|
|
| 63 |
security_output = await self.security_agent.analyze(chunks)
|
| 64 |
yield f"Security Agent: found {len(security_output.findings)} findings."
|
| 65 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
yield "Synthesizer Agent: ranking findings and formatting report..."
|
| 67 |
-
report = await self.synthesizer.synthesize(repo, [security_output])
|
| 68 |
yield "Synthesizer Agent: final report generated."
|
| 69 |
yield report
|
| 70 |
finally:
|
|
@@ -72,21 +82,24 @@ class AuditGraph:
|
|
| 72 |
|
| 73 |
async def _crawl(self, state: AuditState) -> AuditState:
|
| 74 |
repo = self.crawler.clone_and_scan(state["repo_url"])
|
| 75 |
-
|
| 76 |
-
return {"repo": repo, "progress": progress}
|
| 77 |
|
| 78 |
async def _chunk(self, state: AuditState) -> AuditState:
|
| 79 |
chunks = self.chunker.chunk_files(state["repo"].files)
|
| 80 |
-
|
| 81 |
-
return {"chunks": chunks, "progress": progress}
|
| 82 |
|
| 83 |
async def _security(self, state: AuditState) -> AuditState:
|
| 84 |
output = await self.security_agent.analyze(state["chunks"])
|
| 85 |
-
|
| 86 |
-
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
async def _synthesize(self, state: AuditState) -> AuditState:
|
| 89 |
-
report = await self.synthesizer.synthesize(
|
| 90 |
-
|
|
|
|
|
|
|
| 91 |
self.crawler.cleanup(state["repo"])
|
| 92 |
-
return {"report": report, "progress":
|
|
|
|
| 1 |
from collections.abc import AsyncIterator
|
| 2 |
+
from operator import add
|
| 3 |
+
from typing import Annotated, TypedDict
|
| 4 |
|
| 5 |
from langgraph.graph import END, StateGraph
|
| 6 |
|
| 7 |
+
from app.agents.performance_agent import PerformanceAgent
|
| 8 |
from app.agents.security_agent import SecurityAgent
|
| 9 |
from app.agents.synthesizer_agent import SynthesizerAgent
|
| 10 |
from app.config import Settings, get_settings
|
|
|
|
| 19 |
repo: RepoScanResult
|
| 20 |
chunks: list[CodeChunk]
|
| 21 |
security_output: AgentOutput
|
| 22 |
+
performance_output: AgentOutput
|
| 23 |
report: AuditReport
|
| 24 |
+
progress: Annotated[list[str], add]
|
| 25 |
|
| 26 |
|
| 27 |
class AuditGraph:
|
|
|
|
| 31 |
self.chunker = Chunker(self.settings)
|
| 32 |
self.llm_client = LLMClient(self.settings)
|
| 33 |
self.security_agent = SecurityAgent(self.llm_client)
|
| 34 |
+
self.performance_agent = PerformanceAgent()
|
| 35 |
self.synthesizer = SynthesizerAgent()
|
| 36 |
self.graph = self._build_graph()
|
| 37 |
|
|
|
|
| 40 |
graph.add_node("crawl", self._crawl)
|
| 41 |
graph.add_node("chunk", self._chunk)
|
| 42 |
graph.add_node("security", self._security)
|
| 43 |
+
graph.add_node("performance", self._performance)
|
| 44 |
graph.add_node("synthesize", self._synthesize)
|
| 45 |
graph.set_entry_point("crawl")
|
| 46 |
graph.add_edge("crawl", "chunk")
|
| 47 |
graph.add_edge("chunk", "security")
|
| 48 |
+
graph.add_edge("chunk", "performance")
|
| 49 |
+
graph.add_edge(["security", "performance"], "synthesize")
|
| 50 |
graph.add_edge("synthesize", END)
|
| 51 |
return graph.compile()
|
| 52 |
|
|
|
|
| 69 |
security_output = await self.security_agent.analyze(chunks)
|
| 70 |
yield f"Security Agent: found {len(security_output.findings)} findings."
|
| 71 |
|
| 72 |
+
yield "Performance Agent: scanning for slow-path patterns..."
|
| 73 |
+
performance_output = await self.performance_agent.analyze(chunks)
|
| 74 |
+
yield f"Performance Agent: found {len(performance_output.findings)} findings."
|
| 75 |
+
|
| 76 |
yield "Synthesizer Agent: ranking findings and formatting report..."
|
| 77 |
+
report = await self.synthesizer.synthesize(repo, [security_output, performance_output])
|
| 78 |
yield "Synthesizer Agent: final report generated."
|
| 79 |
yield report
|
| 80 |
finally:
|
|
|
|
| 82 |
|
| 83 |
async def _crawl(self, state: AuditState) -> AuditState:
|
| 84 |
repo = self.crawler.clone_and_scan(state["repo_url"])
|
| 85 |
+
return {"repo": repo, "progress": [f"Crawler Agent: mapped {len(repo.files)} files."]}
|
|
|
|
| 86 |
|
| 87 |
async def _chunk(self, state: AuditState) -> AuditState:
|
| 88 |
chunks = self.chunker.chunk_files(state["repo"].files)
|
| 89 |
+
return {"chunks": chunks, "progress": [f"Chunker: created {len(chunks)} code chunks."]}
|
|
|
|
| 90 |
|
| 91 |
async def _security(self, state: AuditState) -> AuditState:
|
| 92 |
output = await self.security_agent.analyze(state["chunks"])
|
| 93 |
+
return {"security_output": output, "progress": [f"Security Agent: found {len(output.findings)} findings."]}
|
| 94 |
+
|
| 95 |
+
async def _performance(self, state: AuditState) -> AuditState:
|
| 96 |
+
output = await self.performance_agent.analyze(state["chunks"])
|
| 97 |
+
return {"performance_output": output, "progress": [f"Performance Agent: found {len(output.findings)} findings."]}
|
| 98 |
|
| 99 |
async def _synthesize(self, state: AuditState) -> AuditState:
|
| 100 |
+
report = await self.synthesizer.synthesize(
|
| 101 |
+
state["repo"],
|
| 102 |
+
[state["security_output"], state["performance_output"]],
|
| 103 |
+
)
|
| 104 |
self.crawler.cleanup(state["repo"])
|
| 105 |
+
return {"report": report, "progress": ["Synthesizer Agent: final report generated."]}
|
app/agents/performance_agent.py
CHANGED
|
@@ -1,8 +1,127 @@
|
|
| 1 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
|
| 4 |
class PerformanceAgent:
|
| 5 |
name = "Performance Agent"
|
| 6 |
|
| 7 |
async def analyze(self, chunks: list[CodeChunk]) -> AgentOutput:
|
| 8 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
from app.schemas import AgentOutput, CodeChunk, Finding, Severity
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
REQUEST_WITHOUT_TIMEOUT = re.compile(r"\brequests\.(get|post|put|patch|delete)\s*\((?!.*\btimeout\s*=)")
|
| 7 |
+
SYNC_FS_JS = re.compile(r"\b(readFileSync|writeFileSync|readdirSync|statSync)\s*\(")
|
| 8 |
+
PYTHON_LOOP = re.compile(r"^(\s*)(for|while)\b")
|
| 9 |
+
PYTHON_FILE_READ = re.compile(r"\b(open\s*\(|Path\s*\([^)]*\)\.read_(text|bytes)\s*\()")
|
| 10 |
|
| 11 |
|
| 12 |
class PerformanceAgent:
|
| 13 |
name = "Performance Agent"
|
| 14 |
|
| 15 |
async def analyze(self, chunks: list[CodeChunk]) -> AgentOutput:
|
| 16 |
+
findings: list[Finding] = []
|
| 17 |
+
for chunk in chunks:
|
| 18 |
+
findings.extend(self._scan_chunk(chunk))
|
| 19 |
+
|
| 20 |
+
return AgentOutput(
|
| 21 |
+
agent_name=self.name,
|
| 22 |
+
findings=findings,
|
| 23 |
+
metadata={"chunks_scanned": len(chunks), "mode": "static-rules"},
|
| 24 |
+
)
|
| 25 |
+
|
| 26 |
+
def _scan_chunk(self, chunk: CodeChunk) -> list[Finding]:
|
| 27 |
+
findings: list[Finding] = []
|
| 28 |
+
lines = chunk.content.splitlines()
|
| 29 |
+
loop_stack: list[int] = []
|
| 30 |
+
async_indent_stack: list[int] = []
|
| 31 |
+
|
| 32 |
+
for offset, line in enumerate(lines):
|
| 33 |
+
actual_line = chunk.line_start + offset
|
| 34 |
+
stripped = line.strip()
|
| 35 |
+
indent = len(line) - len(line.lstrip(" "))
|
| 36 |
+
|
| 37 |
+
loop_stack = [loop_indent for loop_indent in loop_stack if indent > loop_indent]
|
| 38 |
+
async_indent_stack = [async_indent for async_indent in async_indent_stack if indent > async_indent]
|
| 39 |
+
|
| 40 |
+
if stripped.startswith("async def "):
|
| 41 |
+
async_indent_stack.append(indent)
|
| 42 |
+
|
| 43 |
+
loop_match = PYTHON_LOOP.match(line)
|
| 44 |
+
if loop_match:
|
| 45 |
+
if loop_stack:
|
| 46 |
+
findings.append(
|
| 47 |
+
self._finding(
|
| 48 |
+
"Nested loop may become expensive",
|
| 49 |
+
Severity.low,
|
| 50 |
+
chunk,
|
| 51 |
+
actual_line,
|
| 52 |
+
"A loop nested inside another loop can turn small inputs into slow O(n^2) work.",
|
| 53 |
+
"Consider indexing data with a dictionary/set, batching work, or documenting why nested iteration is bounded.",
|
| 54 |
+
)
|
| 55 |
+
)
|
| 56 |
+
loop_stack.append(len(loop_match.group(1)))
|
| 57 |
+
|
| 58 |
+
if REQUEST_WITHOUT_TIMEOUT.search(line):
|
| 59 |
+
findings.append(
|
| 60 |
+
self._finding(
|
| 61 |
+
"HTTP request without timeout",
|
| 62 |
+
Severity.medium,
|
| 63 |
+
chunk,
|
| 64 |
+
actual_line,
|
| 65 |
+
"Network calls without timeouts can hang workers and make the app appear frozen under bad network conditions.",
|
| 66 |
+
"Pass an explicit timeout, for example requests.get(url, timeout=10).",
|
| 67 |
+
)
|
| 68 |
+
)
|
| 69 |
+
|
| 70 |
+
if async_indent_stack and "time.sleep(" in line:
|
| 71 |
+
findings.append(
|
| 72 |
+
self._finding(
|
| 73 |
+
"Blocking sleep inside async function",
|
| 74 |
+
Severity.medium,
|
| 75 |
+
chunk,
|
| 76 |
+
actual_line,
|
| 77 |
+
"time.sleep blocks the event loop, delaying unrelated async work.",
|
| 78 |
+
"Use await asyncio.sleep(...) inside async functions.",
|
| 79 |
+
)
|
| 80 |
+
)
|
| 81 |
+
|
| 82 |
+
if loop_stack and PYTHON_FILE_READ.search(line):
|
| 83 |
+
findings.append(
|
| 84 |
+
self._finding(
|
| 85 |
+
"File read inside loop",
|
| 86 |
+
Severity.low,
|
| 87 |
+
chunk,
|
| 88 |
+
actual_line,
|
| 89 |
+
"Repeated disk reads inside loops can dominate runtime and slow audits on larger inputs.",
|
| 90 |
+
"Read once before the loop, cache results, or stream data deliberately.",
|
| 91 |
+
)
|
| 92 |
+
)
|
| 93 |
+
|
| 94 |
+
if SYNC_FS_JS.search(line):
|
| 95 |
+
findings.append(
|
| 96 |
+
self._finding(
|
| 97 |
+
"Synchronous filesystem call",
|
| 98 |
+
Severity.low,
|
| 99 |
+
chunk,
|
| 100 |
+
actual_line,
|
| 101 |
+
"Synchronous filesystem APIs block the Node.js event loop and can hurt request latency.",
|
| 102 |
+
"Use async fs.promises APIs or move blocking work outside latency-sensitive paths.",
|
| 103 |
+
)
|
| 104 |
+
)
|
| 105 |
+
|
| 106 |
+
return findings
|
| 107 |
+
|
| 108 |
+
def _finding(
|
| 109 |
+
self,
|
| 110 |
+
title: str,
|
| 111 |
+
severity: Severity,
|
| 112 |
+
chunk: CodeChunk,
|
| 113 |
+
line_number: int,
|
| 114 |
+
description: str,
|
| 115 |
+
suggested_fix: str,
|
| 116 |
+
) -> Finding:
|
| 117 |
+
return Finding(
|
| 118 |
+
title=title,
|
| 119 |
+
severity=severity,
|
| 120 |
+
file_path=chunk.file_path,
|
| 121 |
+
line_start=line_number,
|
| 122 |
+
line_end=line_number,
|
| 123 |
+
description=description,
|
| 124 |
+
why_it_matters="Performance issues in hot paths can increase latency, resource usage, and demo analysis time.",
|
| 125 |
+
suggested_fix=suggested_fix,
|
| 126 |
+
agent_source=self.name,
|
| 127 |
+
)
|
tests/test_graph_progress.py
CHANGED
|
@@ -10,7 +10,7 @@ from app.schemas import AuditReport
|
|
| 10 |
@pytest.mark.anyio
|
| 11 |
async def test_run_with_progress_yields_real_stages_and_report(tmp_path: Path):
|
| 12 |
source = tmp_path / "app.py"
|
| 13 |
-
source.write_text("API_KEY = '1234567890abcdef'\n", encoding="utf-8")
|
| 14 |
graph = AuditGraph(Settings(max_files=10, max_file_size_kb=10, max_chars_per_chunk=1000))
|
| 15 |
|
| 16 |
graph.crawler.clone_and_scan = lambda repo_url: graph.crawler.scan_local_repo(repo_url, tmp_path)
|
|
@@ -22,5 +22,8 @@ async def test_run_with_progress_yields_real_stages_and_report(tmp_path: Path):
|
|
| 22 |
|
| 23 |
assert any("Crawler Agent" in event for event in events if isinstance(event, str))
|
| 24 |
assert any("Security Agent" in event for event in events if isinstance(event, str))
|
|
|
|
| 25 |
assert isinstance(events[-1], AuditReport)
|
| 26 |
-
assert len(events[-1].findings) ==
|
|
|
|
|
|
|
|
|
| 10 |
@pytest.mark.anyio
|
| 11 |
async def test_run_with_progress_yields_real_stages_and_report(tmp_path: Path):
|
| 12 |
source = tmp_path / "app.py"
|
| 13 |
+
source.write_text("API_KEY = '1234567890abcdef'\nresponse = requests.get(url)\n", encoding="utf-8")
|
| 14 |
graph = AuditGraph(Settings(max_files=10, max_file_size_kb=10, max_chars_per_chunk=1000))
|
| 15 |
|
| 16 |
graph.crawler.clone_and_scan = lambda repo_url: graph.crawler.scan_local_repo(repo_url, tmp_path)
|
|
|
|
| 22 |
|
| 23 |
assert any("Crawler Agent" in event for event in events if isinstance(event, str))
|
| 24 |
assert any("Security Agent" in event for event in events if isinstance(event, str))
|
| 25 |
+
assert any("Performance Agent" in event for event in events if isinstance(event, str))
|
| 26 |
assert isinstance(events[-1], AuditReport)
|
| 27 |
+
assert len(events[-1].findings) == 2
|
| 28 |
+
assert "Security Agent" in events[-1].agents_run
|
| 29 |
+
assert "Performance Agent" in events[-1].agents_run
|
tests/test_performance_agent.py
ADDED
|
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import pytest
|
| 2 |
+
|
| 3 |
+
from app.agents.performance_agent import PerformanceAgent
|
| 4 |
+
from app.schemas import CodeChunk, Severity
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
@pytest.mark.anyio
|
| 8 |
+
async def test_performance_agent_flags_requests_without_timeout():
|
| 9 |
+
chunk = CodeChunk(
|
| 10 |
+
file_path="client.py",
|
| 11 |
+
language="Python",
|
| 12 |
+
line_start=1,
|
| 13 |
+
line_end=1,
|
| 14 |
+
content="response = requests.get(url)",
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
output = await PerformanceAgent().analyze([chunk])
|
| 18 |
+
|
| 19 |
+
assert output.findings[0].title == "HTTP request without timeout"
|
| 20 |
+
assert output.findings[0].severity == Severity.medium
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
@pytest.mark.anyio
|
| 24 |
+
async def test_performance_agent_flags_blocking_sleep_in_async_function():
|
| 25 |
+
chunk = CodeChunk(
|
| 26 |
+
file_path="worker.py",
|
| 27 |
+
language="Python",
|
| 28 |
+
line_start=20,
|
| 29 |
+
line_end=22,
|
| 30 |
+
content="async def run():\n time.sleep(1)\n return True",
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
output = await PerformanceAgent().analyze([chunk])
|
| 34 |
+
|
| 35 |
+
assert output.findings[0].title == "Blocking sleep inside async function"
|
| 36 |
+
assert output.findings[0].line_start == 21
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
@pytest.mark.anyio
|
| 40 |
+
async def test_performance_agent_flags_nested_loop():
|
| 41 |
+
chunk = CodeChunk(
|
| 42 |
+
file_path="search.py",
|
| 43 |
+
language="Python",
|
| 44 |
+
line_start=5,
|
| 45 |
+
line_end=7,
|
| 46 |
+
content="for user in users:\n for order in orders:\n match(user, order)",
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
output = await PerformanceAgent().analyze([chunk])
|
| 50 |
+
|
| 51 |
+
assert output.findings[0].title == "Nested loop may become expensive"
|
| 52 |
+
assert output.findings[0].line_start == 6
|