YUS200619 commited on
Commit
054a3c3
Β·
1 Parent(s): 4dfc26d

changed notebook

Browse files
Files changed (3) hide show
  1. Dockerfile +1 -1
  2. notebooks/training.ipynb +730 -127
  3. server/app.py +4 -1
Dockerfile CHANGED
@@ -20,7 +20,7 @@ COPY openenv.yaml /home/user2/
20
  COPY requirements.txt /home/user2/
21
  COPY README.md /home/user2/
22
 
23
- EXPOSE 7868
24
  ENV ENABLE_WEB_INTERFACE=true
25
  CMD ["python", "server/app.py"]
26
 
 
20
  COPY requirements.txt /home/user2/
21
  COPY README.md /home/user2/
22
 
23
+ EXPOSE 7860
24
  ENV ENABLE_WEB_INTERFACE=true
25
  CMD ["python", "server/app.py"]
26
 
notebooks/training.ipynb CHANGED
@@ -24,11 +24,14 @@
24
  },
25
  {
26
  "cell_type": "code",
27
- "execution_count": 1,
28
  "metadata": {},
29
  "outputs": [],
30
  "source": [
31
- "%pip install unsloth trl transformers accelerate openenv-core[core]>=0.2.2 wandb -q"
 
 
 
32
  ]
33
  },
34
  {
@@ -40,9 +43,21 @@
40
  },
41
  {
42
  "cell_type": "code",
43
- "execution_count": 2,
44
  "metadata": {},
45
  "outputs": [
 
 
 
 
 
 
 
 
 
 
 
 
46
  {
47
  "name": "stdout",
48
  "output_type": "stream",
@@ -52,37 +67,115 @@
52
  ]
53
  },
54
  {
55
- "ename": "ModuleNotFoundError",
56
- "evalue": "No module named 'openenv.client'",
57
- "output_type": "error",
58
- "traceback": [
59
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
60
- "\u001b[0;31mModuleNotFoundError\u001b[0m Traceback (most recent call last)",
61
- "\u001b[0;32m/tmp/ipykernel_1168/3569467806.py\u001b[0m in \u001b[0;36m<cell line: 0>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 6\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0munsloth\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mFastLanguageModel\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 7\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mtrl\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mGRPOTrainer\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mGRPOConfig\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 8\u001b[0;31m \u001b[0;32mfrom\u001b[0m \u001b[0mopenenv\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mclient\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mEnvironment\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mOpenEnvClient\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 9\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0mwandb\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minit\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mproject\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"swebench-in\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m\"grpo-run-1\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
62
- "\u001b[0;31mModuleNotFoundError\u001b[0m: No module named 'openenv.client'",
63
- "",
64
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0;32m\nNOTE: If your import is failing due to a missing package, you can\nmanually install dependencies using either !pip or !apt.\n\nTo view examples of installing some common dependencies, click the\n\"Open Examples\" button below.\n\u001b[0;31m---------------------------------------------------------------------------\u001b[0m\n"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  ]
66
  }
67
  ],
68
  "source": [
69
  "import os\n",
70
- "import wandb\n",
71
- "import random\n",
72
- "import re\n",
73
  "import json\n",
 
 
 
 
 
 
 
 
74
  "from unsloth import FastLanguageModel\n",
75
  "from trl import GRPOTrainer, GRPOConfig\n",
76
- "from openenv.client import Environment as OpenEnvClient\n",
77
- "\n",
78
- "wandb.init(project=\"swebench-in\", name=\"grpo-run-1\")\n",
79
  "\n",
80
- "# Set this in Colab via: %env HF_SPACE_URL=https://huggingface.co/spaces/YUS200619/swebench-ind\n",
81
- "HF_SPACE_URL = os.getenv(\"HF_SPACE_URL\", \"\").strip()\n",
82
- "if not HF_SPACE_URL:\n",
83
- " raise ValueError(\"HF_SPACE_URL is empty. Set it before running this cell.\")\n",
 
 
84
  "\n",
85
- "env = OpenEnvClient(HF_SPACE_URL)"
 
86
  ]
87
  },
88
  {
@@ -99,11 +192,12 @@
99
  "outputs": [],
100
  "source": [
101
  "model, tokenizer = FastLanguageModel.from_pretrained(\n",
102
- " model_name=\"Qwen/Qwen2.5-3B-Instruct\",\n",
103
- " max_seq_length=2048,\n",
104
  " dtype=None,\n",
105
- " load_in_4bit=True,\n",
106
  ")\n",
 
107
  "model = FastLanguageModel.get_peft_model(\n",
108
  " model,\n",
109
  " r=16,\n",
@@ -111,8 +205,12 @@
111
  " lora_alpha=16,\n",
112
  " lora_dropout=0,\n",
113
  " bias=\"none\",\n",
114
- " use_gradient_checkpointing=True,\n",
115
- ")"
 
 
 
 
116
  ]
117
  },
118
  {
@@ -128,40 +226,55 @@
128
  "metadata": {},
129
  "outputs": [],
130
  "source": [
131
- "def parse_action(action_text: str) -> dict:\n",
 
 
132
  " \"\"\"\n",
133
- " Parse the model's generated text into an action dict.\n",
134
- " Expected format: ACTION_TYPE: args\n",
135
  " \"\"\"\n",
136
- " action_text = action_text.strip()\n",
137
- " # Try to find action pattern\n",
138
- " match = re.search(r'(run_command|read_file|write_file|run_tests|check_server|reply_slack|reply_email|reply_hr|close_case)[:\\s]+(.*)', action_text, re.DOTALL)\n",
139
- " if match:\n",
140
- " return {\"type\": match.group(1), \"args\": match.group(2).strip()}\n",
141
- " # Default: treat as run_command\n",
142
- " return {\"type\": \"run_command\", \"args\": action_text}\n",
143
  "\n",
 
 
 
 
 
 
 
 
 
144
  "\n",
145
- "def rollout(prompt: str, task_id: int) -> tuple[list[str], float]:\n",
146
- " \"\"\"\n",
147
- " Run one episode. Return (action_sequence, total_reward).\n",
148
- " Uses sampling with temperature 0.7.\n",
149
- " \"\"\"\n",
150
- " obs = env.reset(task_id=task_id)\n",
151
- " actions = []\n",
152
- " total_reward = 0.0\n",
153
- " done = False\n",
 
 
 
 
 
154
  "\n",
155
- " while not done:\n",
156
- " inputs = tokenizer(f\"Observation: {obs}\\nAction:\", return_tensors=\"pt\")\n",
157
- " output = model.generate(**inputs, max_new_tokens=100, do_sample=True, temperature=0.7)\n",
158
- " action_text = tokenizer.decode(output[0], skip_special_tokens=True)\n",
159
- " action = parse_action(action_text)\n",
160
- " obs, reward, done, info = env.step(action)\n",
161
- " actions.append(action_text)\n",
162
- " total_reward += reward\n",
163
  "\n",
164
- " return actions, total_reward"
 
165
  ]
166
  },
167
  {
@@ -182,45 +295,150 @@
182
  "metadata": {},
183
  "outputs": [],
184
  "source": [
185
- "# Curriculum: tier 1 tasks first (1,2), then tier 2 (3,4), then tier 3 (5)\n",
186
- "CURRICULUM = {\n",
187
- " \"tier1\": [1, 2],\n",
188
- " \"tier2\": [3, 4],\n",
189
- " \"tier3\": [5],\n",
190
- "}\n",
191
  "\n",
192
- "current_tier = \"tier1\"\n",
193
- "tier_rewards = []\n",
 
 
 
 
194
  "\n",
195
- "for step in range(700):\n",
196
- " task_id = random.choice(CURRICULUM[current_tier])\n",
197
- " actions, reward = rollout(\"\", task_id)\n",
 
 
 
 
 
 
198
  "\n",
199
- " # Log to wandb\n",
200
- " wandb.log({\n",
201
- " \"reward/total\": reward,\n",
202
- " \"training_step\": step,\n",
203
- " \"task_id\": task_id,\n",
204
- " \"current_tier\": current_tier,\n",
205
- " \"num_actions\": len(actions),\n",
206
- " })\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
207
  "\n",
208
- " tier_rewards.append(reward)\n",
209
- "\n",
210
- " # Escalate curriculum\n",
211
- " if len(tier_rewards) >= 50 and sum(tier_rewards[-50:]) / 50 >= 0.6:\n",
212
- " if current_tier == \"tier1\":\n",
213
- " current_tier = \"tier2\"\n",
214
- " tier_rewards = []\n",
215
- " print(f\"Step {step}: Escalating to tier 2\")\n",
216
- " elif current_tier == \"tier2\":\n",
217
- " current_tier = \"tier3\"\n",
218
- " tier_rewards = []\n",
219
- " print(f\"Step {step}: Escalating to tier 3\")\n",
220
- "\n",
221
- " if step % 50 == 0:\n",
222
- " avg = sum(tier_rewards[-50:]) / max(len(tier_rewards[-50:]), 1)\n",
223
- " print(f\"Step {step} | Tier: {current_tier} | Avg reward (last 50): {avg:.3f}\")"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
224
  ]
225
  },
226
  {
@@ -239,15 +457,24 @@
239
  "metadata": {},
240
  "outputs": [],
241
  "source": [
242
- "# Save LoRA adapters without merging into 4-bit base weights.\n",
243
- "if hasattr(model, \"save_pretrained_merged\"):\n",
244
- " model.save_pretrained_merged(\"swebench-in-lora\", tokenizer=tokenizer, save_method=\"lora\")\n",
245
- "else:\n",
246
- " model.save_pretrained(\"swebench-in-lora\")\n",
247
- " tokenizer.save_pretrained(\"swebench-in-lora\")\n",
248
  "\n",
249
- "# Optional: push to hub\n",
250
- "# model.push_to_hub(\"YOUR_HF_USERNAME/swebench-in-lora\")"
 
 
 
 
 
 
 
 
 
251
  ]
252
  },
253
  {
@@ -266,45 +493,421 @@
266
  "metadata": {},
267
  "outputs": [],
268
  "source": [
269
- "import matplotlib.pyplot as plt\n",
270
- "import os\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  "\n",
272
- "# Pull run history from wandb\n",
273
- "run = wandb.run\n",
274
- "history = run.history()\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
275
  "\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
276
  "os.makedirs(\"plots\", exist_ok=True)\n",
277
  "\n",
278
- "# --- Reward Curve ---\n",
279
- "fig, ax = plt.subplots(figsize=(10, 5))\n",
280
- "ax.plot(history[\"training_step\"], history[\"reward/total\"],\n",
281
- " label=\"Trained Agent\", color=\"steelblue\")\n",
282
- "ax.axhline(y=-0.4, color=\"orange\", linestyle=\"--\",\n",
283
- " label=\"Untrained Baseline (-0.4)\")\n",
284
- "ax.set_xlabel(\"Training Step\")\n",
285
- "ax.set_ylabel(\"Episode Reward\")\n",
286
- "ax.set_title(\"SWEbench-IN: Training Reward Curve\")\n",
287
- "ax.legend()\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
288
  "ax.grid(True, alpha=0.3)\n",
289
  "plt.tight_layout()\n",
290
- "plt.savefig(\"plots/reward_curve.png\", dpi=150)\n",
291
  "plt.show()\n",
292
- "print(\"plots/reward_curve.png saved. Commit it to your repo now.\")\n",
 
 
 
 
 
 
 
 
293
  "\n",
294
- "# --- Loss Curve ---\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
295
  "fig, ax = plt.subplots(figsize=(10, 5))\n",
296
- "if \"loss\" in history.columns:\n",
297
- " ax.plot(history[\"training_step\"], history[\"loss\"],\n",
298
- " label=\"Policy Loss\", color=\"crimson\")\n",
299
- "ax.set_xlabel(\"Training Step\")\n",
300
- "ax.set_ylabel(\"Loss\")\n",
301
- "ax.set_title(\"SWEbench-IN: Policy Loss Curve\")\n",
302
- "ax.legend()\n",
 
 
 
 
 
 
 
 
 
 
 
 
 
303
  "ax.grid(True, alpha=0.3)\n",
304
  "plt.tight_layout()\n",
305
- "plt.savefig(\"plots/loss_curve.png\", dpi=150)\n",
306
  "plt.show()\n",
307
- "print(\"plots/loss_curve.png saved. Commit it to your repo now.\")"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
308
  ]
309
  }
310
  ],
 
24
  },
25
  {
26
  "cell_type": "code",
27
+ "execution_count": null,
28
  "metadata": {},
29
  "outputs": [],
30
  "source": [
31
+ "\n",
32
+ "!pip install \"unsloth[colab-new] @ git+https://github.com/unslothai/unsloth.git\" -q\n",
33
+ "!pip install trl transformers accelerate datasets wandb openenv-core -q\n",
34
+ "!pip install matplotlib pandas -q"
35
  ]
36
  },
37
  {
 
43
  },
44
  {
45
  "cell_type": "code",
46
+ "execution_count": 1,
47
  "metadata": {},
48
  "outputs": [
49
+ {
50
+ "name": "stderr",
51
+ "output_type": "stream",
52
+ "text": [
53
+ "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m If you're specifying your api key in code, ensure this code is not shared publicly.\n",
54
+ "\u001b[34m\u001b[1mwandb\u001b[0m: \u001b[33mWARNING\u001b[0m Consider setting the WANDB_API_KEY environment variable, or running `wandb login` from the command line.\n",
55
+ "\u001b[34m\u001b[1mwandb\u001b[0m: [wandb.login()] Using explicit session credentials for https://api.wandb.ai.\n",
56
+ "\u001b[34m\u001b[1mwandb\u001b[0m: No netrc file found, creating one.\n",
57
+ "\u001b[34m\u001b[1mwandb\u001b[0m: Appending key for api.wandb.ai to your netrc file: /root/.netrc\n",
58
+ "\u001b[34m\u001b[1mwandb\u001b[0m: Currently logged in as: \u001b[33myusufindian09\u001b[0m (\u001b[33myusufindian09-aaa\u001b[0m) to \u001b[32mhttps://api.wandb.ai\u001b[0m. Use \u001b[1m`wandb login --relogin`\u001b[0m to force relogin\n"
59
+ ]
60
+ },
61
  {
62
  "name": "stdout",
63
  "output_type": "stream",
 
67
  ]
68
  },
69
  {
70
+ "data": {
71
+ "text/html": [],
72
+ "text/plain": [
73
+ "<IPython.core.display.HTML object>"
74
+ ]
75
+ },
76
+ "metadata": {},
77
+ "output_type": "display_data"
78
+ },
79
+ {
80
+ "data": {
81
+ "text/html": [
82
+ "Tracking run with wandb version 0.25.1"
83
+ ],
84
+ "text/plain": [
85
+ "<IPython.core.display.HTML object>"
86
+ ]
87
+ },
88
+ "metadata": {},
89
+ "output_type": "display_data"
90
+ },
91
+ {
92
+ "data": {
93
+ "text/html": [
94
+ "Run data is saved locally in <code>/content/wandb/run-20260425_092404-bjgtcv44</code>"
95
+ ],
96
+ "text/plain": [
97
+ "<IPython.core.display.HTML object>"
98
+ ]
99
+ },
100
+ "metadata": {},
101
+ "output_type": "display_data"
102
+ },
103
+ {
104
+ "data": {
105
+ "text/html": [
106
+ "Syncing run <strong><a href='https://wandb.ai/yusufindian09-aaa/swebench-in/runs/bjgtcv44' target=\"_blank\">grpo-run-1</a></strong> to <a href='https://wandb.ai/yusufindian09-aaa/swebench-in' target=\"_blank\">Weights & Biases</a> (<a href='https://wandb.me/developer-guide' target=\"_blank\">docs</a>)<br>"
107
+ ],
108
+ "text/plain": [
109
+ "<IPython.core.display.HTML object>"
110
+ ]
111
+ },
112
+ "metadata": {},
113
+ "output_type": "display_data"
114
+ },
115
+ {
116
+ "data": {
117
+ "text/html": [
118
+ " View project at <a href='https://wandb.ai/yusufindian09-aaa/swebench-in' target=\"_blank\">https://wandb.ai/yusufindian09-aaa/swebench-in</a>"
119
+ ],
120
+ "text/plain": [
121
+ "<IPython.core.display.HTML object>"
122
+ ]
123
+ },
124
+ "metadata": {},
125
+ "output_type": "display_data"
126
+ },
127
+ {
128
+ "data": {
129
+ "text/html": [
130
+ " View run at <a href='https://wandb.ai/yusufindian09-aaa/swebench-in/runs/bjgtcv44' target=\"_blank\">https://wandb.ai/yusufindian09-aaa/swebench-in/runs/bjgtcv44</a>"
131
+ ],
132
+ "text/plain": [
133
+ "<IPython.core.display.HTML object>"
134
+ ]
135
+ },
136
+ "metadata": {},
137
+ "output_type": "display_data"
138
+ },
139
+ {
140
+ "name": "stderr",
141
+ "output_type": "stream",
142
+ "text": [
143
+ "wandb: Detected [huggingface_hub.inference, mcp, openai] in use.\n",
144
+ "wandb: Use W&B Weave for improved LLM call tracing. Install Weave with `pip install weave` then add `import weave` to the top of your script.\n",
145
+ "wandb: For more information, check out the docs at: https://weave-docs.wandb.ai/\n"
146
+ ]
147
+ },
148
+ {
149
+ "name": "stdout",
150
+ "output_type": "stream",
151
+ "text": [
152
+ "Setup complete\n"
153
  ]
154
  }
155
  ],
156
  "source": [
157
  "import os\n",
 
 
 
158
  "import json\n",
159
+ "import re\n",
160
+ "import random\n",
161
+ "import numpy as np\n",
162
+ "import matplotlib.pyplot as plt\n",
163
+ "import pandas as pd\n",
164
+ "import wandb\n",
165
+ "wandb.login(key=\"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\")\n",
166
+ "from datasets import Dataset\n",
167
  "from unsloth import FastLanguageModel\n",
168
  "from trl import GRPOTrainer, GRPOConfig\n",
 
 
 
169
  "\n",
170
+ "HF_SPACE_URL = \"https://huggingface.co/spaces/YUS200619/swebench-ind\"\n",
171
+ "WANDB_PROJECT = \"swebench-in\"\n",
172
+ "MODEL_NAME = \"unsloth/Qwen2.5-3B-Instruct\"\n",
173
+ "MAX_SEQ_LEN = 2048\n",
174
+ "MAX_STEPS = 15\n",
175
+ "BASELINE_EPISODES = 20\n",
176
  "\n",
177
+ "wandb.init(project=WANDB_PROJECT, name=\"grpo-run-1\")\n",
178
+ "print(\"Setup complete\")"
179
  ]
180
  },
181
  {
 
192
  "outputs": [],
193
  "source": [
194
  "model, tokenizer = FastLanguageModel.from_pretrained(\n",
195
+ " model_name=MODEL_NAME,\n",
196
+ " max_seq_length=MAX_SEQ_LEN,\n",
197
  " dtype=None,\n",
198
+ " load_in_4bit=True, # critical for Colab GPU memory\n",
199
  ")\n",
200
+ "\n",
201
  "model = FastLanguageModel.get_peft_model(\n",
202
  " model,\n",
203
  " r=16,\n",
 
205
  " lora_alpha=16,\n",
206
  " lora_dropout=0,\n",
207
  " bias=\"none\",\n",
208
+ " use_gradient_checkpointing=\"unsloth\", # saves even more memory\n",
209
+ " random_state=42,\n",
210
+ ")\n",
211
+ "\n",
212
+ "print(f\"Model loaded: {MODEL_NAME}\")\n",
213
+ "print(f\"Trainable parameters: {sum(p.numel() for p in model.parameters() if p.requires_grad):,}\")"
214
  ]
215
  },
216
  {
 
226
  "metadata": {},
227
  "outputs": [],
228
  "source": [
229
+ "import requests\n",
230
+ "\n",
231
+ "class SWEBenchINClient:\n",
232
  " \"\"\"\n",
233
+ " Simple HTTP client for your HF Space environment.\n",
234
+ " Calls the OpenEnv-compliant API endpoints.\n",
235
  " \"\"\"\n",
236
+ " def __init__(self, base_url: str):\n",
237
+ " self.base_url = base_url.rstrip(\"/\")\n",
238
+ " self.session = requests.Session()\n",
239
+ " # Test connection\n",
240
+ " resp = self.session.get(f\"{self.base_url}/health\", timeout=10)\n",
241
+ " print(f\"Environment connected: {resp.json()}\")\n",
 
242
  "\n",
243
+ " def reset(self, task_id: int = None) -> dict:\n",
244
+ " payload = {\"task_id\": task_id} if task_id else {}\n",
245
+ " resp = self.session.post(\n",
246
+ " f\"{self.base_url}/reset\",\n",
247
+ " json=payload,\n",
248
+ " timeout=30\n",
249
+ " )\n",
250
+ " resp.raise_for_status()\n",
251
+ " return resp.json()\n",
252
  "\n",
253
+ " def step(self, action: dict) -> tuple:\n",
254
+ " resp = self.session.post(\n",
255
+ " f\"{self.base_url}/step\",\n",
256
+ " json={\"action\": action},\n",
257
+ " timeout=30\n",
258
+ " )\n",
259
+ " resp.raise_for_status()\n",
260
+ " data = resp.json()\n",
261
+ " return (\n",
262
+ " data[\"observation\"],\n",
263
+ " float(data[\"reward\"]),\n",
264
+ " bool(data[\"done\"]),\n",
265
+ " data.get(\"info\", {})\n",
266
+ " )\n",
267
  "\n",
268
+ " def state(self) -> dict:\n",
269
+ " resp = self.session.get(f\"{self.base_url}/state\", timeout=10)\n",
270
+ " return resp.json()\n",
271
+ "\n",
272
+ " def grade(self) -> dict:\n",
273
+ " resp = self.session.post(f\"{self.base_url}/grade\", timeout=30)\n",
274
+ " return resp.json()\n",
 
275
  "\n",
276
+ "\n",
277
+ "env = SWEBenchINClient(https://huggingface.co/spaces/YUS200619/swebench-ind)"
278
  ]
279
  },
280
  {
 
295
  "metadata": {},
296
  "outputs": [],
297
  "source": [
298
+ "SYSTEM_PROMPT = \"\"\"You are an Indian software engineer working on a Linux server.\n",
299
+ "You will receive a broken environment description.\n",
300
+ "Your job: fix the technical issue AND handle any human communication.\n",
 
 
 
301
  "\n",
302
+ "RULES:\n",
303
+ "- Never use sudo\n",
304
+ "- Never use rm -rf \n",
305
+ "- Fix the server/code issue first\n",
306
+ "- Then reply to manager/client if messages exist\n",
307
+ "- Be efficient β€” fewer actions is better\n",
308
  "\n",
309
+ "OUTPUT FORMAT (pick one per turn):\n",
310
+ "{\"type\": \"run_command\", \"args\": \"pip install flask\"}\n",
311
+ "{\"type\": \"read_file\", \"args\": \"/home/user2/app.py\"}\n",
312
+ "{\"type\": \"write_file\", \"args\": \"/home/user2/app.py\", \"content\": \"fixed code here\"}\n",
313
+ "{\"type\": \"run_tests\", \"args\": \"\"}\n",
314
+ "{\"type\": \"check_server\", \"args\": \"\"}\n",
315
+ "{\"type\": \"reply_slack\", \"args\": \"Server is back up. Fixed the missing dependency.\"}\n",
316
+ "{\"type\": \"reply_email\", \"args\": \"Apologies for the downtime. Issue resolved at 2:15 PM.\"}\n",
317
+ "{\"type\": \"close_case\", \"args\": \"\"}\n",
318
  "\n",
319
+ "Output ONLY valid JSON. Nothing else.\"\"\"\n",
320
+ "\n",
321
+ "\n",
322
+ "def parse_action(text: str) -> dict:\n",
323
+ " \"\"\"\n",
324
+ " Parse model output into action dict.\n",
325
+ " Tries JSON first, then regex, then safe default.\n",
326
+ " \"\"\"\n",
327
+ " text = text.strip()\n",
328
+ "\n",
329
+ " # Strip markdown code blocks if present\n",
330
+ " text = re.sub(r\"```json\\s*\", \"\", text)\n",
331
+ " text = re.sub(r\"```\\s*\", \"\", text)\n",
332
+ " text = text.strip()\n",
333
+ "\n",
334
+ " # Try JSON parse\n",
335
+ " try:\n",
336
+ " action = json.loads(text)\n",
337
+ " if \"type\" in action:\n",
338
+ " return action\n",
339
+ " except json.JSONDecodeError:\n",
340
+ " pass\n",
341
+ "\n",
342
+ " # Try regex for known action types\n",
343
+ " pattern = r'\"?type\"?\\s*[:=]\\s*\"?(\\w+)\"?'\n",
344
+ " match = re.search(pattern, text)\n",
345
+ " if match:\n",
346
+ " action_type = match.group(1)\n",
347
+ " args_match = re.search(r'\"?args\"?\\s*[:=]\\s*\"?([^\"}\\n]+)\"?', text)\n",
348
+ " args = args_match.group(1).strip() if args_match else \"\"\n",
349
+ " return {\"type\": action_type, \"args\": args}\n",
350
+ "\n",
351
+ " # Safe fallback\n",
352
+ " return {\"type\": \"close_case\", \"args\": \"\"}\n",
353
  "\n",
354
+ "\n",
355
+ "def format_prompt(observation: dict) -> str:\n",
356
+ " \"\"\"Format the environment observation into a prompt.\"\"\"\n",
357
+ " return f\"\"\"CURRENT SITUATION:\n",
358
+ "{json.dumps(observation, indent=2)}\n",
359
+ "\n",
360
+ "What is your next action? Output valid JSON only.\"\"\"\n",
361
+ "\n",
362
+ "\n",
363
+ "def run_episode(task_id: int = None, temperature: float = 0.7) -> tuple:\n",
364
+ " \"\"\"\n",
365
+ " Run one full episode.\n",
366
+ " Returns (list_of_actions, total_reward, final_grade)\n",
367
+ " \"\"\"\n",
368
+ " observation = env.reset(task_id=task_id)\n",
369
+ " actions_taken = []\n",
370
+ " total_reward = 0.0\n",
371
+ " done = False\n",
372
+ "\n",
373
+ " messages = [\n",
374
+ " {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
375
+ " {\"role\": \"user\", \"content\": format_prompt(observation)}\n",
376
+ " ]\n",
377
+ "\n",
378
+ " for step_num in range(MAX_STEPS):\n",
379
+ " # Format input for model\n",
380
+ " input_text = tokenizer.apply_chat_template(\n",
381
+ " messages,\n",
382
+ " tokenize=False,\n",
383
+ " add_generation_prompt=True\n",
384
+ " )\n",
385
+ "\n",
386
+ " inputs = tokenizer(\n",
387
+ " input_text,\n",
388
+ " return_tensors=\"pt\",\n",
389
+ " truncation=True,\n",
390
+ " max_length=MAX_SEQ_LEN\n",
391
+ " ).to(model.device)\n",
392
+ "\n",
393
+ " # Generate action\n",
394
+ " with model.disable_adapter() if temperature == 0 else __import__('contextlib').nullcontext():\n",
395
+ " outputs = model.generate(\n",
396
+ " **inputs,\n",
397
+ " max_new_tokens=150,\n",
398
+ " temperature=temperature,\n",
399
+ " do_sample=(temperature > 0),\n",
400
+ " pad_token_id=tokenizer.eos_token_id,\n",
401
+ " )\n",
402
+ "\n",
403
+ " # Decode only new tokens\n",
404
+ " new_tokens = outputs[0][inputs[\"input_ids\"].shape[1]:]\n",
405
+ " action_text = tokenizer.decode(new_tokens, skip_special_tokens=True)\n",
406
+ "\n",
407
+ " # Parse action\n",
408
+ " action = parse_action(action_text)\n",
409
+ " actions_taken.append({\n",
410
+ " \"step\": step_num,\n",
411
+ " \"raw_output\": action_text[:200],\n",
412
+ " \"parsed\": action\n",
413
+ " })\n",
414
+ "\n",
415
+ " # Step environment\n",
416
+ " try:\n",
417
+ " observation, reward, done, info = env.step(action)\n",
418
+ " total_reward += reward\n",
419
+ " except Exception as e:\n",
420
+ " print(f\" Step {step_num} error: {e}\")\n",
421
+ " total_reward -= 0.1\n",
422
+ " done = True\n",
423
+ " break\n",
424
+ "\n",
425
+ " # Add to conversation history\n",
426
+ " messages.append({\"role\": \"assistant\", \"content\": action_text})\n",
427
+ " messages.append({\n",
428
+ " \"role\": \"user\",\n",
429
+ " \"content\": f\"Result: {json.dumps(observation, indent=2)}\\n\\nNext action?\"\n",
430
+ " })\n",
431
+ "\n",
432
+ " if done:\n",
433
+ " break\n",
434
+ "\n",
435
+ " # Get final grade\n",
436
+ " try:\n",
437
+ " final_grade = env.grade()\n",
438
+ " except:\n",
439
+ " final_grade = {\"total\": total_reward}\n",
440
+ "\n",
441
+ " return actions_taken, total_reward, final_grade"
442
  ]
443
  },
444
  {
 
457
  "metadata": {},
458
  "outputs": [],
459
  "source": [
460
+ "print(\"=\" * 50)\n",
461
+ "print(\"MEASURING UNTRAINED BASELINE\")\n",
462
+ "print(\"=\" * 50)\n",
463
+ "\n",
464
+ "baseline_rewards = []\n",
465
+ "baseline_task_rewards = {1: [], 2: [], 3: [], 4: [], 5: []}\n",
466
  "\n",
467
+ "for i in range(BASELINE_EPISODES):\n",
468
+ " task_id = random.choice([1, 2, 3, 4, 5])\n",
469
+ " _, reward, grade = run_episode(task_id=task_id, temperature=0.0)\n",
470
+ " baseline_rewards.append(reward)\n",
471
+ " baseline_task_rewards[task_id].append(reward)\n",
472
+ " print(f\" Episode {i+1:02d}/20 | task={task_id} | reward={reward:.3f}\")\n",
473
+ "\n",
474
+ "REAL_BASELINE_AVG = np.mean(baseline_rewards)\n",
475
+ "print(f\"\\nReal baseline average: {REAL_BASELINE_AVG:.3f}\")\n",
476
+ "print(f\"Baseline std: {np.std(baseline_rewards):.3f}\")\n",
477
+ "print(\"Save this number. You need it for the final plot.\")"
478
  ]
479
  },
480
  {
 
493
  "metadata": {},
494
  "outputs": [],
495
  "source": [
496
+ "\"\"\"\n",
497
+ "GRPO needs a dataset of prompts to generate completions from.\n",
498
+ "Each prompt = one episode starting state.\n",
499
+ "We generate 200 prompts across all tasks with curriculum weighting.\n",
500
+ "\"\"\"\n",
501
+ "\n",
502
+ "def generate_prompt_dataset(n_prompts: int = 200) -> Dataset:\n",
503
+ " prompts = []\n",
504
+ "\n",
505
+ " # Curriculum weighting β€” more easy tasks early\n",
506
+ " task_weights = {\n",
507
+ " 1: 0.30, # easy\n",
508
+ " 2: 0.30, # easy\n",
509
+ " 3: 0.20, # medium\n",
510
+ " 4: 0.15, # medium\n",
511
+ " 5: 0.05, # hard\n",
512
+ " }\n",
513
+ "\n",
514
+ " task_pool = []\n",
515
+ " for task_id, weight in task_weights.items():\n",
516
+ " count = int(n_prompts * weight)\n",
517
+ " task_pool.extend([task_id] * count)\n",
518
+ "\n",
519
+ " random.shuffle(task_pool)\n",
520
+ "\n",
521
+ " for task_id in task_pool:\n",
522
+ " obs = env.reset(task_id=task_id)\n",
523
+ " prompt = tokenizer.apply_chat_template(\n",
524
+ " [\n",
525
+ " {\"role\": \"system\", \"content\": SYSTEM_PROMPT},\n",
526
+ " {\"role\": \"user\", \"content\": format_prompt(obs)}\n",
527
+ " ],\n",
528
+ " tokenize=False,\n",
529
+ " add_generation_prompt=True\n",
530
+ " )\n",
531
+ " prompts.append({\n",
532
+ " \"prompt\": prompt,\n",
533
+ " \"task_id\": task_id\n",
534
+ " })\n",
535
+ "\n",
536
+ " return Dataset.from_list(prompts)\n",
537
+ "\n",
538
+ "\n",
539
+ "print(\"Generating training prompts...\")\n",
540
+ "train_dataset = generate_prompt_dataset(n_prompts=200)\n",
541
+ "print(f\"Dataset ready: {len(train_dataset)} prompts\")\n"
542
+ ]
543
+ },
544
+ {
545
+ "cell_type": "markdown",
546
+ "metadata": {},
547
+ "source": [
548
+ "code 8"
549
+ ]
550
+ },
551
+ {
552
+ "cell_type": "code",
553
+ "execution_count": null,
554
+ "metadata": {},
555
+ "outputs": [],
556
+ "source": [
557
+ "\"\"\"\n",
558
+ "This is what GRPO calls to score each completion.\n",
559
+ "GRPO generates multiple completions per prompt, scores them,\n",
560
+ "and updates weights toward higher-scoring completions.\n",
561
+ "\"\"\"\n",
562
+ "\n",
563
+ "episode_log = [] # track all episodes during training\n",
564
+ "\n",
565
+ "def grpo_reward_function(completions: list, prompts: list, **kwargs) -> list[float]:\n",
566
+ " \"\"\"\n",
567
+ " Called by GRPOTrainer after generating completions.\n",
568
+ " Returns a reward score for each completion.\n",
569
+ " \"\"\"\n",
570
+ " rewards = []\n",
571
+ "\n",
572
+ " for i, completion in enumerate(completions):\n",
573
+ " try:\n",
574
+ " # Parse the action\n",
575
+ " action = parse_action(completion)\n",
576
+ "\n",
577
+ " # Reset env and take the action\n",
578
+ " task_id = random.choice([1, 2, 3, 4]) # avoid task 5 early\n",
579
+ " obs = env.reset(task_id=task_id)\n",
580
+ " _, reward, done, _ = env.step(action)\n",
581
+ "\n",
582
+ " # Continue episode for up to 3 more steps\n",
583
+ " # (gives richer signal than single-step reward)\n",
584
+ " for _ in range(3):\n",
585
+ " if done:\n",
586
+ " break\n",
587
+ " # Simple follow-up: try to close case\n",
588
+ " followup = {\"type\": \"check_server\", \"args\": \"\"}\n",
589
+ " obs, step_reward, done, _ = env.step(followup)\n",
590
+ " reward += step_reward * 0.5 # discount follow-up rewards\n",
591
+ "\n",
592
+ " # Penalty for bad actions\n",
593
+ " if action[\"type\"] == \"close_case\" and reward < 0.1:\n",
594
+ " reward -= 0.3 # penalize giving up immediately\n",
595
+ "\n",
596
+ " rewards.append(float(reward))\n",
597
+ "\n",
598
+ " except Exception as e:\n",
599
+ " # Environment error β€” penalize\n",
600
+ " rewards.append(-0.5)\n",
601
+ "\n",
602
+ " # Log to wandb\n",
603
+ " wandb.log({\n",
604
+ " \"reward/batch_mean\": np.mean(rewards),\n",
605
+ " \"reward/batch_std\": np.std(rewards),\n",
606
+ " \"reward/batch_max\": np.max(rewards),\n",
607
+ " })\n",
608
  "\n",
609
+ " episode_log.extend(rewards)\n",
610
+ " return rewards\n"
611
+ ]
612
+ },
613
+ {
614
+ "cell_type": "markdown",
615
+ "metadata": {},
616
+ "source": [
617
+ "code 9\n"
618
+ ]
619
+ },
620
+ {
621
+ "cell_type": "code",
622
+ "execution_count": null,
623
+ "metadata": {},
624
+ "outputs": [],
625
+ "source": [
626
+ "training_args = GRPOConfig(\n",
627
+ " # Output\n",
628
+ " output_dir=\"./swebench-in-checkpoints\",\n",
629
+ "\n",
630
+ " # Training duration\n",
631
+ " num_train_epochs=3,\n",
632
+ " max_steps=500, # hard cap β€” enough for hackathon\n",
633
+ "\n",
634
+ " # Batch sizes β€” small to fit T4 GPU\n",
635
+ " per_device_train_batch_size=2,\n",
636
+ " gradient_accumulation_steps=8, # effective batch = 16\n",
637
+ "\n",
638
+ " # GRPO specific\n",
639
+ " num_generations=4, # completions per prompt GRPO compares\n",
640
+ " max_completion_length=200, # max tokens per action\n",
641
+ "\n",
642
+ " # Optimizer\n",
643
+ " learning_rate=5e-6, # low LR for RL stability\n",
644
+ " warmup_steps=20,\n",
645
+ " weight_decay=0.01,\n",
646
+ "\n",
647
+ " # Logging\n",
648
+ " logging_steps=10,\n",
649
+ " save_steps=100,\n",
650
+ " report_to=\"wandb\",\n",
651
+ "\n",
652
+ " # Memory\n",
653
+ " gradient_checkpointing=True,\n",
654
+ " fp16=True, # use fp16 on T4\n",
655
+ ")"
656
+ ]
657
+ },
658
+ {
659
+ "cell_type": "markdown",
660
+ "metadata": {},
661
+ "source": [
662
+ "code 10\n"
663
+ ]
664
+ },
665
+ {
666
+ "cell_type": "code",
667
+ "execution_count": null,
668
+ "metadata": {},
669
+ "outputs": [],
670
+ "source": [
671
+ "trainer = GRPOTrainer(\n",
672
+ " model=model,\n",
673
+ " reward_funcs=grpo_reward_function,\n",
674
+ " args=training_args,\n",
675
+ " train_dataset=train_dataset,\n",
676
+ " tokenizer=tokenizer,\n",
677
+ ")\n",
678
+ "\n",
679
+ "print(\"Starting GRPO training...\")\n",
680
+ "print(f\"Dataset: {len(train_dataset)} prompts\")\n",
681
+ "print(f\"Max steps: {training_args.max_steps}\")\n",
682
+ "print(f\"Generations per prompt: {training_args.num_generations}\")\n",
683
+ "print(\"=\" * 50)\n",
684
  "\n",
685
+ "trainer.train()\n",
686
+ "\n",
687
+ "print(\"Training complete!\")\n"
688
+ ]
689
+ },
690
+ {
691
+ "cell_type": "markdown",
692
+ "metadata": {},
693
+ "source": [
694
+ "code 11"
695
+ ]
696
+ },
697
+ {
698
+ "cell_type": "code",
699
+ "execution_count": null,
700
+ "metadata": {},
701
+ "outputs": [],
702
+ "source": [
703
+ "print(\"=\" * 50)\n",
704
+ "print(\"EVALUATING TRAINED MODEL\")\n",
705
+ "print(\"=\" * 50)\n",
706
+ "\n",
707
+ "trained_rewards = []\n",
708
+ "\n",
709
+ "for i in range(BASELINE_EPISODES):\n",
710
+ " task_id = random.choice([1, 2, 3, 4, 5])\n",
711
+ " _, reward, grade = run_episode(task_id=task_id, temperature=0.0)\n",
712
+ " trained_rewards.append(reward)\n",
713
+ " print(f\" Episode {i+1:02d}/20 | task={task_id} | reward={reward:.3f}\")\n",
714
+ "\n",
715
+ "TRAINED_AVG = np.mean(trained_rewards)\n",
716
+ "print(f\"\\nTrained average: {TRAINED_AVG:.3f}\")\n",
717
+ "print(f\"Baseline average: {REAL_BASELINE_AVG:.3f}\")\n",
718
+ "print(f\"Improvement: {TRAINED_AVG - REAL_BASELINE_AVG:.3f}\")"
719
+ ]
720
+ },
721
+ {
722
+ "cell_type": "markdown",
723
+ "metadata": {},
724
+ "source": [
725
+ "code 12"
726
+ ]
727
+ },
728
+ {
729
+ "cell_type": "code",
730
+ "execution_count": null,
731
+ "metadata": {},
732
+ "outputs": [],
733
+ "source": [
734
+ "import os\n",
735
  "os.makedirs(\"plots\", exist_ok=True)\n",
736
  "\n",
737
+ "# Pull training history from wandb\n",
738
+ "history_df = wandb.run.history(\n",
739
+ " keys=[\"reward/batch_mean\", \"_step\", \"loss\"]\n",
740
+ ")\n",
741
+ "history_df = history_df.dropna(subset=[\"reward/batch_mean\"])\n",
742
+ "\n",
743
+ "# ── Plot 1: Reward Curve ──────────────────────────────────\n",
744
+ "fig, ax = plt.subplots(figsize=(12, 5))\n",
745
+ "\n",
746
+ "# Training reward\n",
747
+ "ax.plot(\n",
748
+ " history_df[\"_step\"],\n",
749
+ " history_df[\"reward/batch_mean\"],\n",
750
+ " color=\"steelblue\",\n",
751
+ " alpha=0.6,\n",
752
+ " linewidth=1,\n",
753
+ " label=\"Training reward (per batch)\"\n",
754
+ ")\n",
755
+ "\n",
756
+ "# Smoothed training reward\n",
757
+ "if len(history_df) > 10:\n",
758
+ " smoothed = pd.Series(\n",
759
+ " history_df[\"reward/batch_mean\"].values\n",
760
+ " ).rolling(window=20, min_periods=1).mean()\n",
761
+ " ax.plot(\n",
762
+ " history_df[\"_step\"],\n",
763
+ " smoothed,\n",
764
+ " color=\"steelblue\",\n",
765
+ " linewidth=2.5,\n",
766
+ " label=\"Training reward (smoothed)\"\n",
767
+ " )\n",
768
+ "\n",
769
+ "# Baseline and trained horizontal lines\n",
770
+ "ax.axhline(\n",
771
+ " y=REAL_BASELINE_AVG,\n",
772
+ " color=\"red\",\n",
773
+ " linestyle=\"--\",\n",
774
+ " linewidth=2,\n",
775
+ " label=f\"Untrained baseline ({REAL_BASELINE_AVG:.2f})\"\n",
776
+ ")\n",
777
+ "ax.axhline(\n",
778
+ " y=TRAINED_AVG,\n",
779
+ " color=\"green\",\n",
780
+ " linestyle=\"--\",\n",
781
+ " linewidth=2,\n",
782
+ " label=f\"Trained model ({TRAINED_AVG:.2f})\"\n",
783
+ ")\n",
784
+ "\n",
785
+ "ax.set_xlabel(\"Training Step\", fontsize=12)\n",
786
+ "ax.set_ylabel(\"Episode Reward\", fontsize=12)\n",
787
+ "ax.set_title(\"SWEbench-IN: GRPO Training Reward Curve\", fontsize=14)\n",
788
+ "ax.legend(fontsize=10)\n",
789
  "ax.grid(True, alpha=0.3)\n",
790
  "plt.tight_layout()\n",
791
+ "plt.savefig(\"plots/reward_curve.png\", dpi=150, bbox_inches=\"tight\")\n",
792
  "plt.show()\n",
793
+ "print(\"Saved: plots/reward_curve.png\")\n",
794
+ "\n",
795
+ "# ── Plot 2: Loss Curve ────────────────────────────────────\n",
796
+ "fig, ax = plt.subplots(figsize=(12, 5))\n",
797
+ "\n",
798
+ "# Try trainer log history first (more reliable than wandb)\n",
799
+ "log_history = trainer.state.log_history\n",
800
+ "losses = [x[\"loss\"] for x in log_history if \"loss\" in x]\n",
801
+ "steps = [x[\"step\"] for x in log_history if \"loss\" in x]\n",
802
  "\n",
803
+ "if losses:\n",
804
+ " ax.plot(steps, losses, color=\"crimson\", linewidth=1.5, label=\"Policy Loss\")\n",
805
+ "elif \"loss\" in history_df.columns:\n",
806
+ " loss_df = history_df.dropna(subset=[\"loss\"])\n",
807
+ " ax.plot(loss_df[\"_step\"], loss_df[\"loss\"],\n",
808
+ " color=\"crimson\", linewidth=1.5, label=\"Policy Loss\")\n",
809
+ "else:\n",
810
+ " ax.text(0.5, 0.5, \"Loss not logged\", transform=ax.transAxes,\n",
811
+ " ha=\"center\", fontsize=14)\n",
812
+ "\n",
813
+ "ax.set_xlabel(\"Training Step\", fontsize=12)\n",
814
+ "ax.set_ylabel(\"Loss\", fontsize=12)\n",
815
+ "ax.set_title(\"SWEbench-IN: Policy Loss Curve\", fontsize=14)\n",
816
+ "ax.legend(fontsize=10)\n",
817
+ "ax.grid(True, alpha=0.3)\n",
818
+ "plt.tight_layout()\n",
819
+ "plt.savefig(\"plots/loss_curve.png\", dpi=150, bbox_inches=\"tight\")\n",
820
+ "plt.show()\n",
821
+ "print(\"Saved: plots/loss_curve.png\")\n",
822
+ "\n",
823
+ "# ── Plot 3: Before vs After Comparison ───────────────────\n",
824
  "fig, ax = plt.subplots(figsize=(10, 5))\n",
825
+ "\n",
826
+ "episodes = list(range(1, BASELINE_EPISODES + 1))\n",
827
+ "ax.plot(episodes, baseline_rewards,\n",
828
+ " color=\"red\", marker=\"o\", linewidth=1.5,\n",
829
+ " label=f\"Untrained (avg={REAL_BASELINE_AVG:.2f})\")\n",
830
+ "ax.plot(episodes, trained_rewards,\n",
831
+ " color=\"green\", marker=\"s\", linewidth=1.5,\n",
832
+ " label=f\"Trained (avg={TRAINED_AVG:.2f})\")\n",
833
+ "ax.axhline(y=REAL_BASELINE_AVG, color=\"red\",\n",
834
+ " linestyle=\"--\", alpha=0.4)\n",
835
+ "ax.axhline(y=TRAINED_AVG, color=\"green\",\n",
836
+ " linestyle=\"--\", alpha=0.4)\n",
837
+ "ax.fill_between(episodes, baseline_rewards, trained_rewards,\n",
838
+ " alpha=0.1, color=\"green\",\n",
839
+ " label=f\"Improvement: +{TRAINED_AVG - REAL_BASELINE_AVG:.2f}\")\n",
840
+ "\n",
841
+ "ax.set_xlabel(\"Episode\", fontsize=12)\n",
842
+ "ax.set_ylabel(\"Reward\", fontsize=12)\n",
843
+ "ax.set_title(\"SWEbench-IN: Before vs After GRPO Training\", fontsize=14)\n",
844
+ "ax.legend(fontsize=10)\n",
845
  "ax.grid(True, alpha=0.3)\n",
846
  "plt.tight_layout()\n",
847
+ "plt.savefig(\"plots/before_after.png\", dpi=150, bbox_inches=\"tight\")\n",
848
  "plt.show()\n",
849
+ "print(\"Saved: plots/before_after.png\")\n"
850
+ ]
851
+ },
852
+ {
853
+ "cell_type": "markdown",
854
+ "metadata": {},
855
+ "source": [
856
+ "cell 13"
857
+ ]
858
+ },
859
+ {
860
+ "cell_type": "code",
861
+ "execution_count": null,
862
+ "metadata": {},
863
+ "outputs": [],
864
+ "source": [
865
+ "\"\"\"\n",
866
+ "CRITICAL: Do NOT merge LoRA into 4-bit base.\n",
867
+ "Save adapters only using method=\"lora\"\n",
868
+ "\"\"\"\n",
869
+ "model.save_pretrained_merged(\n",
870
+ " \"swebench-in-lora\",\n",
871
+ " tokenizer=tokenizer,\n",
872
+ " save_method=\"lora\"\n",
873
+ ")\n",
874
+ "print(\"Model saved to swebench-in-lora/\")\n",
875
+ "\n",
876
+ "\n"
877
+ ]
878
+ },
879
+ {
880
+ "cell_type": "markdown",
881
+ "metadata": {},
882
+ "source": [
883
+ "cell 14"
884
+ ]
885
+ },
886
+ {
887
+ "cell_type": "code",
888
+ "execution_count": null,
889
+ "metadata": {},
890
+ "outputs": [],
891
+ "source": [
892
+ "# ============================================================\n",
893
+ "\"\"\"\n",
894
+ "AFTER downloading plots from Colab Files panel:\n",
895
+ "\n",
896
+ "cd your-local-repo\n",
897
+ "cp ~/Downloads/reward_curve.png plots/\n",
898
+ "cp ~/Downloads/loss_curve.png plots/\n",
899
+ "cp ~/Downloads/before_after.png plots/\n",
900
+ "\n",
901
+ "git add plots/\n",
902
+ "git commit -m \"Add training evidence: reward curve, loss curve, before/after\"\n",
903
+ "git push origin main\n",
904
+ "\n",
905
+ "Then verify from logged-out browser that plots appear in README.\n",
906
+ "\"\"\"\n",
907
+ "print(\"Download plots from Colab Files panel β†’ commit to repo\")\n",
908
+ "print(\"plots/reward_curve.png\")\n",
909
+ "print(\"plots/loss_curve.png\")\n",
910
+ "print(\"plots/before_after.png\")"
911
  ]
912
  }
913
  ],
server/app.py CHANGED
@@ -44,7 +44,7 @@ app = create_app(
44
  )
45
 
46
 
47
- def main(host: str = "0.0.0.0", port: int = 7068):
48
  """
49
  Entry point for direct execution via uv run or python -m.
50
 
@@ -58,6 +58,9 @@ def main(host: str = "0.0.0.0", port: int = 7068):
58
  """
59
  import uvicorn
60
 
 
 
 
61
  uvicorn.run(app, host=host, port=port)
62
 
63
 
 
44
  )
45
 
46
 
47
+ def main(host: str = "0.0.0.0", port: int = 7860):
48
  """
49
  Entry point for direct execution via uv run or python -m.
50
 
 
58
  """
59
  import uvicorn
60
 
61
+ # HuggingFace Spaces expects the app on PORT (typically 7860).
62
+ port = int(os.getenv("PORT", str(port)))
63
+
64
  uvicorn.run(app, host=host, port=port)
65
 
66