File size: 8,520 Bytes
0ee66d2
 
 
9940e16
 
0ee66d2
9940e16
 
 
 
0ee66d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9940e16
 
 
 
 
 
 
 
 
0ee66d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
"""
AgentDebuggerEnv Baseline Inference Script
==========================================
Baseline evaluation script for testing agent performance in the 
AgentDebugger environment.

System Configuration:
- API_BASE_URL: LLM API endpoint
- MODEL_NAME:   Model identifier for evaluation
- HF_TOKEN:     Authentication token
"""

import os
import json
import time
import re
from openai import OpenAI
import requests

# ── Environment variables (never hardcode these) ──────────────────────────────
API_BASE_URL = os.environ.get("API_BASE_URL", "https://api.openai.com/v1")
MODEL_NAME   = os.environ.get("MODEL_NAME", "gpt-4o")
HF_TOKEN     = os.environ.get("HF_TOKEN", "")
ENV_BASE_URL = os.environ.get("ENV_BASE_URL", "http://localhost:8000")

client = OpenAI(base_url=API_BASE_URL, api_key=HF_TOKEN)

SYSTEM_PROMPT = """You are an expert software debugger. You will be given broken code and a
failing test suite. Your job is to:
1. Analyze the error output carefully
2. Form a hypothesis about the root cause (required for every fix attempt)
3. Submit a corrected version of the complete code
4. Observe the new test results and update your hypothesis if needed
5. Repeat until all tests pass or you run out of attempts

You must ALWAYS respond with a valid JSON action object. Available actions:

Submit a fix:
{
  "action_type": "submit_fix",
  "fixed_code": "<complete corrected Python code as a string>",
  "hypothesis": "<your hypothesis about what the bug is and where>"
}

Query for more context (use sparingly β€” first one is free):
{
  "action_type": "query_context",
  "query_type": "error_explanation" | "function_signature" | "related_code" | "test_details",
  "query_target": "<function name or line number or test name>"
}

Give up (if you cannot find the bug):
{
  "action_type": "give_up",
  "final_diagnosis": "<your best guess at what the bug was>"
}

Analyze the error output carefully and provide a corrected version of the complete code. 
You must always include a hypothesis explaining the root cause of the bug before 
submitting your fix. 

Guidelines:
- Submit complete source code files, not partial snippets or diffs.
- Incorporate all feedback from previous execution attempts.
- For concurrent tasks, ensure atomic operations and proper synchronization.
"""


def parse_action(raw: str) -> dict:
    """Parse LLM response to action dict. Handle markdown code blocks."""
    raw = raw.strip()
    # Strip markdown code blocks if present
    raw = re.sub(r'^```(?:json)?\s*', '', raw, flags=re.MULTILINE)
    raw = re.sub(r'\s*```$', '', raw, flags=re.MULTILINE)
    try:
        return json.loads(raw)
    except json.JSONDecodeError:
        # Try to extract first JSON object
        match = re.search(r'\{.*\}', raw, re.DOTALL)
        if match:
            try:
                return json.loads(match.group())
            except json.JSONDecodeError:
                pass
    # Fallback: give up
    return {
        "action_type": "give_up",
        "final_diagnosis": f"Failed to parse response: {raw[:200]}"
    }


def build_initial_message(obs: dict) -> str:
    return (
        f"=== DEBUGGING TASK: {obs['task_id'].upper()} ===\n\n"
        f"TASK DESCRIPTION:\n{obs['task_description']}\n\n"
        f"BUGGY CODE:\n```python\n{obs['buggy_code']}\n```\n\n"
        f"TEST SUITE:\n```python\n{obs['test_suite']}\n```\n\n"
        f"INITIAL ERROR OUTPUT:\n{obs['initial_error_output']}\n\n"
        f"Attempts remaining: {obs['attempts_remaining']}\n"
        f"Max steps: {obs['max_steps']}\n\n"
        f"Analyze the error and submit your first fix attempt."
    )


def build_step_message(obs: dict, reward: dict, info: dict) -> str:
    last_attempt = obs['previous_attempts'][-1] if obs['previous_attempts'] else None
    msg = f"Step {obs['step_number']} result:\n"
    msg += f"Step reward: {reward['step_reward']:+.3f} | Cumulative: {reward['cumulative_reward']:.3f}\n"
    msg += f"Tests passing: {obs['tests_passed']}/{obs['tests_total']}\n"
    msg += f"Attempts remaining: {obs['attempts_remaining']}\n"

    if info.get("error"):
        msg += f"ERROR: {info['error']}\n"

    if info.get("query_result"):
        msg += f"\nQUERY RESULT:\n{info['query_result']}\n"

    if last_attempt and last_attempt.get("execution_output"):
        output = last_attempt["execution_output"]
        # Truncate long outputs to stay within token budget
        if len(output) > 1500:
            output = output[:750] + "\n...[truncated]...\n" + output[-750:]
        msg += f"\nNEW TEST OUTPUT:\n{output}\n"

    if obs['tests_passed'] == obs['tests_total']:
        msg += "\nβœ“ ALL TESTS PASS! Episode solved."
    else:
        msg += f"\nContinue debugging. {obs['tests_total'] - obs['tests_passed']} tests still failing."

    return msg


def run_episode(task_id: str) -> dict:
    """Run one complete debugging episode. Returns result dict."""

    # Reset environment
    reset_resp = requests.post(f"{ENV_BASE_URL}/reset", json={"task_id": task_id})
    reset_resp.raise_for_status()
    obs = reset_resp.json()

    messages = [
        {"role": "system", "content": SYSTEM_PROMPT},
        {"role": "user",   "content": build_initial_message(obs)}
    ]

    done = False
    last_result = {"reward": {"grader_score": 0.0, "cumulative_reward": 0.0}, "observation": obs}
    action = {}

    while not done:
        # Get LLM action
        completion = client.chat.completions.create(
            model=MODEL_NAME,
            messages=messages,
            max_tokens=1200,
            temperature=0.2
        )
        raw = completion.choices[0].message.content
        action = parse_action(raw)

        # Submit action to environment
        step_resp = requests.post(f"{ENV_BASE_URL}/step", json=action)
        step_resp.raise_for_status()
        result = step_resp.json()

        obs    = result["observation"]
        reward = result["reward"]
        done   = result["done"]
        info   = result["info"]
        last_result = result

        # Build context for next LLM call
        step_msg = build_step_message(obs, reward, info)
        messages.append({"role": "assistant", "content": raw})
        messages.append({"role": "user",      "content": step_msg})

        if done:
            break

    final_obs = last_result["observation"]
    return {
        "task_id":             task_id,
        "grader_score":        last_result["reward"]["grader_score"],
        "cumulative_reward":   last_result["reward"]["cumulative_reward"],
        "steps_taken":         final_obs["step_number"],
        "attempts_used":       final_obs["max_attempts"] - final_obs["attempts_remaining"],
        "tests_passed":        final_obs["tests_passed"],
        "tests_total":         final_obs["tests_total"],
        "solved":              final_obs["tests_passed"] == final_obs["tests_total"],
        "final_action_type":   action.get("action_type", "unknown")
    }


def main():
    print("AgentDebuggerEnv β€” Baseline Inference")
    print(f"Model:    {MODEL_NAME}")
    print(f"API:      {API_BASE_URL}")
    print(f"Env:      {ENV_BASE_URL}")
    print("=" * 55)

    results    = []
    start_time = time.time()

    for task_id in ["easy", "medium", "hard"]:
        print(f"\nTask: {task_id}")
        t0     = time.time()
        result = run_episode(task_id)
        elapsed = time.time() - t0

        solved_str = "βœ“ SOLVED" if result["solved"] else "βœ— UNSOLVED"
        print(f"  Score:    {result['grader_score']:.3f}")
        print(f"  Outcome:  {solved_str}")
        print(f"  Attempts: {result['attempts_used']}")
        print(f"  Tests:    {result['tests_passed']}/{result['tests_total']}")
        print(f"  Time:     {elapsed:.1f}s")
        results.append(result)

    total_time = time.time() - start_time
    mean_score = sum(r["grader_score"] for r in results) / len(results)

    print("\n" + "=" * 55)
    print(f"Mean Score:  {mean_score:.3f}")
    print(f"Total Time:  {total_time:.1f}s  (limit: 1200s)")
    print("=" * 55)

    output = {
        "model":                MODEL_NAME,
        "api_base_url":         API_BASE_URL,
        "results":              results,
        "mean_score":           mean_score,
        "total_time_seconds":   round(total_time, 1)
    }

    with open("baseline_results.json", "w") as f:
        json.dump(output, f, indent=2)
    print("\nSaved β†’ baseline_results.json")


if __name__ == "__main__":
    main()