File size: 9,228 Bytes
fdce872
83ea4bd
fdce872
83ea4bd
 
fdce872
 
83ea4bd
fdce872
 
 
 
 
83ea4bd
fdce872
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83ea4bd
 
fdce872
 
83ea4bd
 
fdce872
 
 
 
 
83ea4bd
 
 
 
 
fdce872
 
 
 
83ea4bd
fdce872
 
 
83ea4bd
fdce872
 
 
 
83ea4bd
 
fdce872
 
 
83ea4bd
 
 
 
 
 
fdce872
 
 
83ea4bd
fdce872
83ea4bd
fdce872
 
 
 
 
 
 
 
 
 
83ea4bd
 
fdce872
83ea4bd
fdce872
 
7f888a4
 
 
 
fdce872
83ea4bd
fdce872
 
 
 
83ea4bd
 
fdce872
 
 
83ea4bd
fdce872
83ea4bd
 
 
 
 
 
 
 
 
 
 
fdce872
 
 
 
83ea4bd
 
 
 
 
fdce872
 
83ea4bd
 
 
 
 
 
fdce872
 
83ea4bd
fdce872
 
 
 
83ea4bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fdce872
83ea4bd
fdce872
 
 
83ea4bd
 
fdce872
 
83ea4bd
fdce872
83ea4bd
 
 
 
fdce872
 
 
 
83ea4bd
fdce872
 
83ea4bd
 
 
 
fdce872
83ea4bd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fdce872
 
 
 
83ea4bd
fdce872
 
 
 
83ea4bd
fdce872
 
 
 
83ea4bd
fdce872
 
83ea4bd
 
 
7f888a4
83ea4bd
7f888a4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
83ea4bd
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
"""
environment.py — OpenEnv-compliant environment wrapper for SWEbench-IN (Dockerless).

All Docker container management removed. Each episode runs in a fresh
temp directory managed by Simulator.
"""

import json
import random
from dataclasses import dataclass, field

from tasks import TASKS, Task
from simulator import Simulator
from rewards import compute_reward, RewardBreakdown


@dataclass
class State:
    task_id: int = 0
    step_count: int = 0
    tests_passing_ratio: float = 0.0
    server_running: bool = False
    files_correct: bool = False
    action_history: list = field(default_factory=list)
    reply_texts: list = field(default_factory=list)


class SWEbenchINEnvironment:
    """
    Dockerless RL environment for SWEbench-IN.
    Gym-style: reset() -> observation, step() -> (obs, reward, done, info)
    """

    def __init__(self):
        self.simulator = Simulator()
        self.max_steps = 15
        self._state = State()
        self._current_task: Task = None
        self._done = False

    # ------------------------------------------------------------------
    # Public API
    # ------------------------------------------------------------------

    def reset(self, task_id: int = None) -> dict:
        if task_id is None:
            task_id = random.choice(list(TASKS.keys()))

        if task_id not in TASKS:
            raise ValueError(f"Invalid task_id: {task_id}. Must be 1–5.")

        self._current_task = TASKS[task_id]
        self._done = False
        self._state = State(task_id=task_id)

        self.simulator.setup_task(task_id)
        self.max_steps = self._current_task.max_actions

        obs_text = self.simulator.get_initial_observation(task_id)
        return self._make_obs(obs_text)

    def step(self, action: dict) -> tuple:
        if self._done:
            return (
                {"text": "Episode done. Call reset().", "step_count": self._state.step_count,
                 "max_steps": self.max_steps, "tests_passing_ratio": 0.0,
                 "server_running": False, "reward_breakdown": {}},
                0.0, True, {"error": "episode_done"},
            )

        action_type = action.get("type", "")
        action_args = action.get("args", "")
        content     = action.get("content", "")  # for write_file

        # Snapshot state before action
        state_before = State(
            task_id=self._state.task_id,
            step_count=self._state.step_count,
            tests_passing_ratio=self._state.tests_passing_ratio,
            server_running=self._state.server_running,
            files_correct=self._state.files_correct,
            action_history=list(self._state.action_history),
            reply_texts=list(self._state.reply_texts),
        )

        # Execute action
        obs_text = self._dispatch(action_type, action_args, content)

        # Update state
        self._state.action_history.append(f"{action_type}: {action_args}")
        self._state.step_count += 1
        
        # Only update measurements on state-changing actions (lazy updates)
        if action_type in ("run_tests", "run_command", "write_file", "check_server", "close_case"):
            self._update_state()

        # Check done
        if action_type == "close_case" or self._state.step_count >= self.max_steps:
            self._done = True

        # Compute reward
        breakdown = compute_reward(
            container_id=None,
            action_history=self._state.action_history,
            state_before=state_before,
            state_after=self._state,
            output_dir=self.simulator.output_dir,
            task_id=self._state.task_id,
            work_dir=self.simulator.work_dir,
        )

        # Boost technical reward using live state (pytest ratio already updated)
        adjusted_total = (
            breakdown.technical
            + 0.5 * self._state.tests_passing_ratio  # live pytest score
            + 0.8 * breakdown.boundaries
            + 0.5 * breakdown.communication
            + (0.6 * breakdown.leave_protection if self._state.task_id == 5 else 0.0)
            + 0.3 * breakdown.shaping
        )

        info = {
            "reward_breakdown": {
                "technical":        breakdown.technical,
                "boundaries":       breakdown.boundaries,
                "communication":    breakdown.communication,
                "leave_protection": breakdown.leave_protection,
                "shaping":          breakdown.shaping,
            },
            "step_count": self._state.step_count,
            "max_steps":  self.max_steps,
            "done_reason": (
                "close_case" if action_type == "close_case"
                else "max_steps" if self._state.step_count >= self.max_steps
                else None
            ),
        }

        return (self._make_obs(obs_text), adjusted_total, self._done, info)

    def state(self) -> State:
        return self._state

    def grade(self) -> dict:
        """Summary grade for the completed episode."""
        return {
            "task_id":              self._state.task_id,
            "steps_taken":          self._state.step_count,
            "tests_passing_ratio":  self._state.tests_passing_ratio,
            "server_running":       self._state.server_running,
            "files_correct":        self._state.files_correct,
            "total_reward_approx":  (
                float(self._state.server_running)
                + self._state.tests_passing_ratio * 0.5
                + float(self._state.files_correct) * 0.3
            ),
        }

    # ------------------------------------------------------------------
    # Internal
    # ------------------------------------------------------------------

    ACTION_HANDLERS = {
        "run_command", "read_file", "write_file", "run_tests",
        "check_server", "reply_slack", "reply_email", "reply_hr", "close_case",
    }

    def _dispatch(self, action_type: str, action_args: str, content: str = "") -> str:
        if action_type not in self.ACTION_HANDLERS:
            return (
                f"ERROR: Unknown action '{action_type}'. "
                f"Valid: {sorted(self.ACTION_HANDLERS)}"
            )

        if action_type == "run_command":
            return self.simulator.run_bash(action_args)

        if action_type == "read_file":
            return self.simulator.read_file(action_args)

        if action_type == "write_file":
            # Support both "path|content" and separate content field
            if content:
                return self.simulator.write_file(action_args, content)
            if "|" in action_args:
                path, file_content = action_args.split("|", 1)
                return self.simulator.write_file(path.strip(), file_content)
            return "ERROR: write_file needs 'path|content' or a content field."

        if action_type == "run_tests":
            r = self.simulator.run_pytest()
            return (
                f"Pytest Results:\n"
                f"  Passed: {r['passed']}\n"
                f"  Failed: {r['failed']}\n"
                f"  Ratio:  {r['ratio']:.0%}\n\n"
                f"Output:\n{r['output']}"
            )

        if action_type == "check_server":
            r = self.simulator.curl_server()
            return (
                f"Server Check:\n"
                f"  Status Code: {r['status_code']}\n"
                f"  Success: {r['success']}"
            )

        if action_type == "reply_slack":
            result = self.simulator.write_reply("SLACK", action_args)
            self._state.reply_texts.append(f"[SLACK]: {action_args}")
            return result

        if action_type == "reply_email":
            result = self.simulator.write_reply("EMAIL", action_args)
            self._state.reply_texts.append(f"[EMAIL]: {action_args}")
            return result

        if action_type == "reply_hr":
            result = self.simulator.write_reply("HR", action_args)
            self._state.reply_texts.append(f"[HR]: {action_args}")
            return result

        if action_type == "close_case":
            return "Case closed. Episode ending."

        return "ERROR: Dispatch failed."

    def _update_state(self):
        """Refresh state measurements from live environment (non-blocking with error handling)."""
        import os
        
        # Update server status
        try:
            server = self.simulator.curl_server()
            self._state.server_running = server["success"]
        except Exception:
            pass

        # Update test pass ratio
        try:
            tests = self.simulator.run_pytest()
            self._state.tests_passing_ratio = tests["ratio"]
        except Exception:
            pass

        # Update file correctness
        try:
            reply_path = os.path.join(self.simulator.output_dir, "reply.txt")
            self._state.files_correct = (
                os.path.exists(reply_path) and os.path.getsize(reply_path) > 0
            )
        except Exception:
            pass

    @staticmethod
    def _make_obs(text: str) -> dict:
        """Wrap observation text in a dict for the REST API."""
        return {"text": text}