{ "cells": [ { "cell_type": "markdown", "id": "f1f7a30f", "metadata": {}, "source": [ "# Train Flatmate RL with TRL GRPO\n", "\n", "This notebook uses the live Hugging Face Space endpoint as the reward source for GRPO. It first collects prompt states from websocket rollouts, then trains a model to generate one JSON action for each observation. The main reward function resets the endpoint, replays the stored action prefix, applies the model action, and scores the resulting Flatmate RL transition.\n", "\n", "Space endpoint: `https://huggingface.co/spaces/kushalExplores/flatmate_rl`" ] }, { "cell_type": "code", "execution_count": null, "id": "adabfca9", "metadata": {}, "outputs": [], "source": [ "# Restart the kernel after this cell if your notebook runtime asks you to.\n", "%pip install -q \"trl>=0.26.2\" \"transformers>=4.57.0\" accelerate datasets peft websockets huggingface_hub matplotlib pandas" ] }, { "cell_type": "code", "execution_count": null, "id": "b79df9da", "metadata": {}, "outputs": [], "source": [ "from __future__ import annotations\n", "\n", "import asyncio\n", "import json\n", "import random\n", "import threading\n", "from dataclasses import dataclass\n", "from typing import Any\n", "from urllib.parse import urlparse\n", "\n", "import websockets\n", "from datasets import Dataset\n", "\n", "SPACE_HTTP_URL = \"https://kushalexplores-flatmate-rl.hf.space\"\n", "SCENARIOS = [\n", " \"task_visit_single\",\n", " \"task_visit_single_hidden_flex\",\n", " \"task_visit_multi\",\n", " \"task_visit_single_seller_followup\",\n", "]\n", "\n", "def ws_url_from_http(base_url: str) -> str:\n", " parsed = urlparse(base_url.rstrip(\"/\"))\n", " scheme = \"wss\" if parsed.scheme == \"https\" else \"ws\"\n", " return f\"{scheme}://{parsed.netloc}/ws\"\n", "\n", "SPACE_WS_URL = ws_url_from_http(SPACE_HTTP_URL)\n", "SPACE_WS_URL" ] }, { "cell_type": "markdown", "id": "ac8bdbfc", "metadata": {}, "source": [ "## Websocket Environment Client\n", "\n", "OpenEnv keeps episode state on `/ws`. The plain HTTP `/reset` and `/step` endpoints create fresh environment instances, so GRPO reward replay uses websocket sessions." ] }, { "cell_type": "code", "execution_count": null, "id": "bf61d6d4", "metadata": {}, "outputs": [], "source": [ "class FlatmateEndpoint:\n", " def __init__(self, ws_url: str = SPACE_WS_URL, timeout_s: float = 120.0):\n", " self.ws_url = ws_url\n", " self.timeout_s = timeout_s\n", "\n", " async def __aenter__(self):\n", " self.ws = await websockets.connect(\n", " self.ws_url,\n", " open_timeout=self.timeout_s,\n", " ping_timeout=self.timeout_s,\n", " )\n", " return self\n", "\n", " async def __aexit__(self, exc_type, exc, tb):\n", " try:\n", " await self.ws.send(json.dumps({\"type\": \"close\"}))\n", " finally:\n", " await self.ws.close()\n", "\n", " async def _send(self, payload: dict[str, Any]) -> dict[str, Any]:\n", " await self.ws.send(json.dumps(payload))\n", " raw = await asyncio.wait_for(self.ws.recv(), timeout=self.timeout_s)\n", " message = json.loads(raw)\n", " if message.get(\"type\") == \"error\":\n", " raise RuntimeError(message.get(\"data\", message))\n", " data = message[\"data\"]\n", " obs = data.get(\"observation\", {})\n", " obs[\"reward\"] = data.get(\"reward\")\n", " obs[\"done\"] = data.get(\"done\", False)\n", " return obs\n", "\n", " async def reset(self, scenario_id: str, seed: int | None = None) -> dict[str, Any]:\n", " data: dict[str, Any] = {\"scenario_id\": scenario_id}\n", " if seed is not None:\n", " data[\"seed\"] = seed\n", " return await self._send({\"type\": \"reset\", \"data\": data})\n", "\n", " async def step(self, action: dict[str, Any]) -> dict[str, Any]:\n", " return await self._send({\"type\": \"step\", \"data\": action})\n", "\n", "async def smoke_test_endpoint():\n", " async with FlatmateEndpoint() as env:\n", " obs = await env.reset(\"task_visit_single\", seed=1)\n", " print(obs[\"scenario_id\"], obs[\"status\"])\n", " print(obs.get(\"last_user_message\") or obs.get(\"current_user_request\"))\n", "\n", "await smoke_test_endpoint()" ] }, { "cell_type": "markdown", "id": "d6401b21", "metadata": {}, "source": [ "## Prompt States\n", "\n", "GRPO needs prompts. These prompts are endpoint observations collected from heuristic rollouts. Each row also stores the action prefix needed to reconstruct that exact state during reward scoring." ] }, { "cell_type": "code", "execution_count": null, "id": "c2ebe016", "metadata": {}, "outputs": [], "source": [ "def trace_tool_names(obs: dict[str, Any]) -> list[str]:\n", " return [str(t.get(\"tool\", t.get(\"tool_name\", \"\"))) for t in obs.get(\"tool_trace\", [])]\n", "\n", "def heuristic_action(obs: dict[str, Any]) -> dict[str, Any] | None:\n", " tools = trace_tool_names(obs)\n", " phase = obs.get(\"phase\", \"buyer\")\n", " remaining = set(obs.get(\"remaining_required_fields\", []))\n", " scenario_id = obs.get(\"scenario_id\", \"task_visit_single\")\n", "\n", " if phase == \"seller\" and not obs.get(\"seller_profile_stored\"):\n", " if remaining:\n", " return {\"action_type\": \"assistant_message\", \"assistant_message\": \"Please share the household dietary setup, who the flat is for, and available visit time slots.\"}\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"store_seller_details\", \"tool_arguments\": {}}\n", "\n", " if not obs.get(\"buyer_profile_stored\"):\n", " if \"diet\" in remaining and \"visit_availability\" in remaining:\n", " return {\"action_type\": \"assistant_message\", \"assistant_message\": \"Please share your dietary preference and visit availability.\"}\n", " if \"diet\" in remaining:\n", " return {\"action_type\": \"assistant_message\", \"assistant_message\": \"Please share your dietary preference.\"}\n", " if \"visit_availability\" in remaining:\n", " return {\"action_type\": \"assistant_message\", \"assistant_message\": \"Please share your visit availability.\"}\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"store_user_details\", \"tool_arguments\": {}}\n", "\n", " post_ids = [\"post_031\", \"post_052\"] if scenario_id == \"task_visit_multi\" else [\"post_031\"]\n", " if \"search_posts\" not in tools:\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"search_posts\", \"tool_arguments\": {}}\n", " if \"match_location_preference\" not in tools:\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"match_location_preference\", \"tool_arguments\": {\"post_ids\": post_ids}}\n", " if \"get_commute_time\" not in tools:\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"get_commute_time\", \"tool_arguments\": {\"post_ids\": post_ids}}\n", " if \"check_calendar_slots\" not in tools:\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"check_calendar_slots\", \"tool_arguments\": {\"post_ids\": post_ids}}\n", " if \"shortlist\" not in tools:\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"shortlist\", \"tool_arguments\": {\"post_ids\": post_ids}}\n", " if \"contact_poster\" not in tools:\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"contact_poster\", \"tool_arguments\": {\"post_id\": post_ids[0], \"time_text\": \"tomorrow 7pm\"}}\n", " if \"book_viewing\" not in tools:\n", " return {\"action_type\": \"tool_call\", \"tool_name\": \"book_viewing\", \"tool_arguments\": {\"post_id\": post_ids[0], \"time_text\": \"tomorrow 7pm\"}}\n", " return None\n", "\n", "def compact_observation(obs: dict[str, Any]) -> dict[str, Any]:\n", " return {\n", " \"scenario_id\": obs.get(\"scenario_id\"),\n", " \"phase\": obs.get(\"phase\"),\n", " \"status\": obs.get(\"status\"),\n", " \"last_user_message\": obs.get(\"last_user_message\"),\n", " \"current_user_request\": obs.get(\"current_user_request\"),\n", " \"available_tools\": obs.get(\"available_tools\", []),\n", " \"remaining_required_fields\": obs.get(\"remaining_required_fields\", []),\n", " \"prerequisites_satisfied\": obs.get(\"prerequisites_satisfied\", {}),\n", " \"recent_tool_calls\": obs.get(\"recent_tool_calls\", []),\n", " \"last_tool_result\": obs.get(\"last_tool_result\", {}),\n", " \"violations\": obs.get(\"violations\", []),\n", " \"booked_visits\": obs.get(\"booked_visits\", []),\n", " \"feedback_summary\": obs.get(\"feedback_summary\", \"\"),\n", " }\n", "\n", "def prompt_from_observation(obs: dict[str, Any]) -> str:\n", " return (\n", " \"You are a broker policy for the Flatmate RL environment. \"\n", " \"Return exactly one JSON action and no extra text.\\n\\n\"\n", " \"Valid action shapes:\\n\"\n", " \"{\\\"action_type\\\":\\\"assistant_message\\\",\\\"assistant_message\\\":\\\"...\\\"}\\n\"\n", " \"{\\\"action_type\\\":\\\"tool_call\\\",\\\"tool_name\\\":\\\"...\\\",\\\"tool_arguments\\\":{...}}\\n\\n\"\n", " f\"Observation:\\n{json.dumps(compact_observation(obs), ensure_ascii=False, sort_keys=True)}\\n\\n\"\n", " \"Action:\\n\"\n", " )" ] }, { "cell_type": "code", "execution_count": null, "id": "d29c9dc5", "metadata": {}, "outputs": [], "source": [ "@dataclass\n", "class PromptCollectionConfig:\n", " episodes: int = 12\n", " max_steps: int = 14\n", " seed: int = 11\n", "\n", "async def collect_prompt_rows(config: PromptCollectionConfig = PromptCollectionConfig()) -> list[dict[str, Any]]:\n", " rng = random.Random(config.seed)\n", " rows: list[dict[str, Any]] = []\n", " for episode_idx in range(config.episodes):\n", " scenario_id = rng.choice(SCENARIOS)\n", " prefix_actions: list[dict[str, Any]] = []\n", " async with FlatmateEndpoint() as env:\n", " obs = await env.reset(scenario_id, seed=config.seed + episode_idx)\n", " for step_idx in range(config.max_steps):\n", " action = heuristic_action(obs)\n", " if action is None or obs.get(\"done\"):\n", " break\n", " rows.append({\n", " \"prompt\": prompt_from_observation(obs),\n", " \"scenario_id\": scenario_id,\n", " \"seed\": config.seed + episode_idx,\n", " \"prefix_actions_json\": json.dumps(prefix_actions, ensure_ascii=False),\n", " \"reference_action_json\": json.dumps(action, ensure_ascii=False, sort_keys=True),\n", " })\n", " obs = await env.step(action)\n", " prefix_actions.append(action)\n", " if obs.get(\"done\"):\n", " break\n", " print(f\"episode={episode_idx:02d} scenario={scenario_id} total_rows={len(rows)}\")\n", " return rows\n", "\n", "rows = await collect_prompt_rows(PromptCollectionConfig(episodes=12, max_steps=14))\n", "train_dataset = Dataset.from_list(rows)\n", "train_dataset" ] }, { "cell_type": "markdown", "id": "a10b4cd1", "metadata": {}, "source": [ "## GRPO Rewards\n", "\n", "`json_format_reward` is cheap and runs for every completion. `endpoint_transition_reward` is slower because it calls the live Space: it replays the prefix actions, sends the generated action, and returns a reward based on environment reward, validity, violations, and bookings." ] }, { "cell_type": "code", "execution_count": null, "id": "e7bdfc62", "metadata": {}, "outputs": [], "source": [ "def completion_text(completion: Any) -> str:\n", " if isinstance(completion, str):\n", " return completion\n", " if isinstance(completion, list) and completion and isinstance(completion[0], dict):\n", " return str(completion[0].get(\"content\", \"\"))\n", " return str(completion)\n", "\n", "def parse_json_action(text: str) -> dict[str, Any]:\n", " text = completion_text(text).strip()\n", " start = text.find(\"{\")\n", " end = text.rfind(\"}\")\n", " if start == -1 or end == -1 or end <= start:\n", " raise ValueError(\"completion does not contain a JSON object\")\n", " action = json.loads(text[start : end + 1])\n", " if action.get(\"action_type\") == \"assistant_message\":\n", " if not str(action.get(\"assistant_message\", \"\")).strip():\n", " raise ValueError(\"assistant_message action needs assistant_message\")\n", " return {\n", " \"action_type\": \"assistant_message\",\n", " \"assistant_message\": str(action[\"assistant_message\"]),\n", " }\n", " if action.get(\"action_type\") == \"tool_call\":\n", " if not str(action.get(\"tool_name\", \"\")).strip():\n", " raise ValueError(\"tool_call action needs tool_name\")\n", " args = action.get(\"tool_arguments\", {})\n", " if not isinstance(args, dict):\n", " raise ValueError(\"tool_arguments must be an object\")\n", " return {\n", " \"action_type\": \"tool_call\",\n", " \"tool_name\": str(action[\"tool_name\"]),\n", " \"tool_arguments\": args,\n", " }\n", " raise ValueError(\"action_type must be assistant_message or tool_call\")\n", "\n", "def json_format_reward(completions, **kwargs) -> list[float]:\n", " rewards = []\n", " for completion in completions:\n", " try:\n", " parse_json_action(completion_text(completion))\n", " rewards.append(0.25)\n", " except Exception:\n", " rewards.append(-1.0)\n", " return rewards\n", "\n", "async def score_one_completion(\n", " completion: Any,\n", " scenario_id: str,\n", " seed: int,\n", " prefix_actions_json: str,\n", ") -> float:\n", " try:\n", " action = parse_json_action(completion_text(completion))\n", " prefix_actions = json.loads(prefix_actions_json)\n", " except Exception:\n", " return -1.0\n", "\n", " try:\n", " async with FlatmateEndpoint() as env:\n", " obs = await env.reset(scenario_id, seed=int(seed))\n", " for prefix_action in prefix_actions:\n", " obs = await env.step(prefix_action)\n", " if obs.get(\"done\"):\n", " return -0.5\n", "\n", " before_violations = len(obs.get(\"violations\", []))\n", " before_bookings = len(obs.get(\"booked_visits\", []))\n", " obs = await env.step(action)\n", "\n", " reward = float(obs.get(\"reward\") or obs.get(\"step_reward\") or 0.0)\n", " reward += 0.15\n", " if len(obs.get(\"violations\", [])) > before_violations:\n", " reward -= 0.75\n", " if len(obs.get(\"booked_visits\", [])) > before_bookings:\n", " reward += 1.0\n", " if obs.get(\"done\"):\n", " reward += 0.5\n", " return float(max(-2.0, min(2.0, reward)))\n", " except Exception as exc:\n", " print(\"endpoint reward error:\", repr(exc))\n", " return -1.0\n", "\n", "async def score_completion_batch(completions, scenario_id, seed, prefix_actions_json) -> list[float]:\n", " tasks = [\n", " score_one_completion(c, s, int(sd), p)\n", " for c, s, sd, p in zip(completions, scenario_id, seed, prefix_actions_json)\n", " ]\n", " return list(await asyncio.gather(*tasks))\n", "\n", "def run_async_blocking(coro):\n", " try:\n", " asyncio.get_running_loop()\n", " except RuntimeError:\n", " return asyncio.run(coro)\n", "\n", " result: dict[str, Any] = {}\n", " def runner():\n", " try:\n", " result[\"value\"] = asyncio.run(coro)\n", " except Exception as exc:\n", " result[\"error\"] = exc\n", "\n", " thread = threading.Thread(target=runner, daemon=True)\n", " thread.start()\n", " thread.join()\n", " if \"error\" in result:\n", " raise result[\"error\"]\n", " return result[\"value\"]\n", "\n", "def endpoint_transition_reward(completions, scenario_id, seed, prefix_actions_json, **kwargs) -> list[float]:\n", " return run_async_blocking(score_completion_batch(completions, scenario_id, seed, prefix_actions_json))\n", "\n", "# Quick reward smoke test with known reference actions from the collected dataset.\n", "sample = train_dataset.select(range(min(2, len(train_dataset))))\n", "endpoint_transition_reward(\n", " completions=sample[\"reference_action_json\"],\n", " scenario_id=sample[\"scenario_id\"],\n", " seed=sample[\"seed\"],\n", " prefix_actions_json=sample[\"prefix_actions_json\"],\n", ")" ] }, { "cell_type": "markdown", "id": "d286bdc7", "metadata": {}, "source": [ "## Train with GRPO\n", "\n", "The endpoint reward is network-bound, so keep `num_generations`, dataset size, and max steps small until the loop is stable. Increase them once you see valid JSON actions and non-negative endpoint rewards." ] }, { "cell_type": "code", "execution_count": null, "id": "a6323589", "metadata": {}, "outputs": [], "source": [ "import inspect\n", "\n", "from peft import LoraConfig\n", "from trl import GRPOConfig, GRPOTrainer\n", "\n", "MODEL_NAME = \"Qwen/Qwen2.5-0.5B-Instruct\"\n", "OUTPUT_DIR = \"flatmate-rl-grpo-policy\"\n", "\n", "peft_config = LoraConfig(\n", " r=16,\n", " lora_alpha=32,\n", " lora_dropout=0.05,\n", " bias=\"none\",\n", " task_type=\"CAUSAL_LM\",\n", " target_modules=[\"q_proj\", \"k_proj\", \"v_proj\", \"o_proj\", \"gate_proj\", \"up_proj\", \"down_proj\"],\n", ")\n", "\n", "def make_grpo_config(**kwargs):\n", " valid = set(inspect.signature(GRPOConfig.__init__).parameters)\n", " filtered = {key: value for key, value in kwargs.items() if key in valid}\n", " skipped = sorted(set(kwargs) - set(filtered))\n", " if skipped:\n", " print(\"Skipping unsupported GRPOConfig fields for this TRL version:\", skipped)\n", " return GRPOConfig(**filtered)\n", "\n", "training_args = make_grpo_config(\n", " output_dir=OUTPUT_DIR,\n", " learning_rate=1e-5,\n", " per_device_train_batch_size=1,\n", " gradient_accumulation_steps=4,\n", " num_generations=4,\n", " max_prompt_length=1536,\n", " max_completion_length=160,\n", " max_steps=30,\n", " logging_steps=1,\n", " save_steps=15,\n", " save_total_limit=2,\n", " report_to=\"none\",\n", ")\n", "\n", "trainer = GRPOTrainer(\n", " model=MODEL_NAME,\n", " reward_funcs=[json_format_reward, endpoint_transition_reward],\n", " args=training_args,\n", " train_dataset=train_dataset,\n", " peft_config=peft_config,\n", ")\n", "\n", "train_result = trainer.train()\n", "train_log_history = trainer.state.log_history\n", "trainer.save_model(OUTPUT_DIR)\n", "train_result" ] }, { "cell_type": "markdown", "id": "da41a231", "metadata": {}, "source": [ "## Training Log\n", "\n", "Plot logged GRPO reward and loss curves over optimizer steps." ] }, { "cell_type": "code", "execution_count": null, "id": "a6c8071d", "metadata": {}, "outputs": [], "source": [ "import json\n", "from pathlib import Path\n", "\n", "import matplotlib.pyplot as plt\n", "import pandas as pd\n", "\n", "log_path = Path(OUTPUT_DIR) / \"train_log_history.json\"\n", "log_path.parent.mkdir(parents=True, exist_ok=True)\n", "log_path.write_text(json.dumps(train_log_history, indent=2))\n", "\n", "def plot_training_log(log_history, title: str = \"GRPO training log\"):\n", " df = pd.DataFrame(log_history)\n", " if df.empty:\n", " print(\"No trainer log rows found yet.\")\n", " return df\n", " metric_cols = [col for col in [\"loss\", \"reward\", \"rewards/json_format_reward\", \"rewards/endpoint_transition_reward\", \"kl\"] if col in df.columns]\n", " if not metric_cols:\n", " metric_cols = [col for col in df.columns if \"reward\" in col or col in {\"loss\", \"kl\"}]\n", " if not metric_cols or \"step\" not in df.columns:\n", " print(\"No plottable step metrics found. Available columns:\", list(df.columns))\n", " return df\n", " axes = df.dropna(subset=[\"step\"]).plot(\n", " x=\"step\",\n", " y=metric_cols,\n", " marker=\"o\",\n", " figsize=(9, 4),\n", " title=title,\n", " )\n", " axes.set_xlabel(\"optimizer step\")\n", " axes.grid(True, alpha=0.3)\n", " plt.show()\n", " return df\n", "\n", "train_log_df = plot_training_log(train_log_history)\n", "train_log_df.tail()" ] }, { "cell_type": "markdown", "id": "c013682e", "metadata": {}, "source": [ "## Evaluate One Episode" ] }, { "cell_type": "code", "execution_count": null, "id": "d4cc3bc3", "metadata": {}, "outputs": [], "source": [ "import torch\n", "from peft import AutoPeftModelForCausalLM\n", "from transformers import AutoModelForCausalLM, AutoTokenizer\n", "\n", "tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, trust_remote_code=True)\n", "if tokenizer.pad_token is None:\n", " tokenizer.pad_token = tokenizer.eos_token\n", "\n", "base_model_for_eval = AutoModelForCausalLM.from_pretrained(\n", " MODEL_NAME,\n", " trust_remote_code=True,\n", " device_map=\"auto\",\n", ")\n", "base_model_for_eval.eval()\n", "base_model_for_eval.config.use_cache = False\n", "\n", "loaded_model_for_eval = AutoPeftModelForCausalLM.from_pretrained(OUTPUT_DIR, device_map=\"auto\")\n", "loaded_model_for_eval.eval()\n", "loaded_model_for_eval.config.use_cache = False\n", "active_model = loaded_model_for_eval\n", "print(f\"Loaded base model from {MODEL_NAME}\")\n", "print(f\"Loaded saved GRPO model from {OUTPUT_DIR}\")\n", "\n", "generation_stats = {\"fallbacks\": 0, \"parse_errors\": []}\n", "\n", "\n", "def safe_model_action(obs: dict[str, Any], log_bad: bool = False) -> dict[str, Any]:\n", " # Prefix the completion with \"{\" so the model only has to finish a JSON object.\n", " prompt = prompt_from_observation(obs) + \"{\"\n", " inputs = tokenizer(prompt, return_tensors=\"pt\", truncation=True, max_length=1536).to(active_model.device)\n", " active_model.generation_config.do_sample = False\n", " active_model.generation_config.temperature = None\n", " active_model.generation_config.top_p = None\n", " active_model.generation_config.top_k = None\n", " with torch.no_grad():\n", " output = active_model.generate(\n", " **inputs,\n", " max_new_tokens=80,\n", " do_sample=False,\n", " repetition_penalty=1.15,\n", " no_repeat_ngram_size=3,\n", " eos_token_id=tokenizer.eos_token_id,\n", " pad_token_id=tokenizer.eos_token_id,\n", " )\n", " completion_tail = tokenizer.decode(output[0][inputs[\"input_ids\"].shape[-1]:], skip_special_tokens=True)\n", " completion = \"{\" + completion_tail\n", " try:\n", " return parse_json_action(completion)\n", " except Exception as exc:\n", " generation_stats[\"fallbacks\"] += 1\n", " if len(generation_stats[\"parse_errors\"]) < 5:\n", " generation_stats[\"parse_errors\"].append({\"error\": str(exc), \"completion\": completion[:300]})\n", " if log_bad:\n", " print(f\"bad generation, using fallback: {exc}\")\n", " print(\"raw completion:\", repr(completion[:300]))\n", " fallback = heuristic_action(obs)\n", " if fallback is None:\n", " fallback = {\"action_type\": \"assistant_message\", \"assistant_message\": \"Could you confirm the details needed for scheduling?\"}\n", " return fallback\n", "\n", "def heuristic_policy(obs: dict[str, Any]) -> dict[str, Any]:\n", " action = heuristic_action(obs)\n", " if action is None:\n", " return {\"action_type\": \"assistant_message\", \"assistant_message\": \"Could you confirm the details needed for scheduling?\"}\n", " return action\n", "\n", "async def evaluate_policy(policy_fn, label: str, scenarios=SCENARIOS, seeds=(123, 124), max_steps: int = 20, verbose: bool = False):\n", " rows = []\n", " for scenario_id in scenarios:\n", " for seed in seeds:\n", " before_fallbacks = generation_stats[\"fallbacks\"] if \"generation_stats\" in globals() else 0\n", " async with FlatmateEndpoint() as env:\n", " obs = await env.reset(scenario_id, seed=seed)\n", " total_reward = 0.0\n", " steps = 0\n", " for step_idx in range(max_steps):\n", " action = policy_fn(obs)\n", " if verbose:\n", " print(label, scenario_id, seed, step_idx, action)\n", " obs = await env.step(action)\n", " steps = step_idx + 1\n", " total_reward += float(obs.get(\"reward\") or obs.get(\"step_reward\") or 0.0)\n", " if obs.get(\"done\"):\n", " break\n", " after_fallbacks = generation_stats[\"fallbacks\"] if \"generation_stats\" in globals() else before_fallbacks\n", " rows.append({\n", " \"policy\": label,\n", " \"scenario_id\": scenario_id,\n", " \"seed\": seed,\n", " \"total_reward\": total_reward,\n", " \"done\": bool(obs.get(\"done\")),\n", " \"bookings\": len(obs.get(\"booked_visits\", [])),\n", " \"violations\": len(obs.get(\"violations\", [])),\n", " \"steps\": steps,\n", " \"fallbacks\": after_fallbacks - before_fallbacks,\n", " })\n", " return rows\n", "\n", "def raw_generate_action_text(obs: dict[str, Any]) -> str:\n", " prompt = prompt_from_observation(obs) + \"{\"\n", " inputs = tokenizer(prompt, return_tensors=\"pt\", truncation=True, max_length=1536).to(active_model.device)\n", " active_model.generation_config.do_sample = False\n", " active_model.generation_config.temperature = None\n", " active_model.generation_config.top_p = None\n", " active_model.generation_config.top_k = None\n", " with torch.no_grad():\n", " output = active_model.generate(\n", " **inputs,\n", " max_new_tokens=80,\n", " do_sample=False,\n", " repetition_penalty=1.15,\n", " no_repeat_ngram_size=3,\n", " eos_token_id=tokenizer.eos_token_id,\n", " pad_token_id=tokenizer.eos_token_id,\n", " )\n", " return \"{\" + tokenizer.decode(output[0][inputs[\"input_ids\"].shape[-1]:], skip_special_tokens=True)\n", "\n", "async def sanity_check_generations(limit: int = 4):\n", " rows = []\n", " for scenario_id in SCENARIOS[:limit]:\n", " async with FlatmateEndpoint() as env:\n", " obs = await env.reset(scenario_id, seed=321)\n", " raw = raw_generate_action_text(obs)\n", " try:\n", " parsed = parse_json_action(raw)\n", " ok = True\n", " except Exception as exc:\n", " parsed = str(exc)\n", " ok = False\n", " rows.append({\"scenario_id\": scenario_id, \"json_ok\": ok, \"raw\": raw[:240], \"parsed_or_error\": parsed})\n", " return pd.DataFrame(rows)\n", "\n", "async def run_inference_each_task(policy_fn, label: str, seed: int = 321, max_steps: int = 20):\n", " rows = []\n", " for scenario_id in SCENARIOS:\n", " print(f\"\\n=== {label}: {scenario_id} ===\")\n", " async with FlatmateEndpoint() as env:\n", " obs = await env.reset(scenario_id, seed=seed)\n", " total_reward = 0.0\n", " steps = 0\n", " before_fallbacks = generation_stats[\"fallbacks\"]\n", " for step_idx in range(max_steps):\n", " action = policy_fn(obs)\n", " print(f\"step={step_idx:02d} action={action}\")\n", " obs = await env.step(action)\n", " steps = step_idx + 1\n", " total_reward += float(obs.get(\"reward\") or obs.get(\"step_reward\") or 0.0)\n", " if obs.get(\"done\"):\n", " break\n", " result = {\n", " \"policy\": label,\n", " \"scenario_id\": scenario_id,\n", " \"seed\": seed,\n", " \"total_reward\": total_reward,\n", " \"done\": bool(obs.get(\"done\")),\n", " \"bookings\": len(obs.get(\"booked_visits\", [])),\n", " \"violations\": len(obs.get(\"violations\", [])),\n", " \"steps\": steps,\n", " \"fallbacks\": generation_stats[\"fallbacks\"] - before_fallbacks,\n", " }\n", " print(\"result=\", result)\n", " rows.append(result)\n", " return pd.DataFrame(rows)\n", "\n", "generation_stats = {\"fallbacks\": 0, \"parse_errors\": []}\n", "active_model = base_model_for_eval\n", "base_generation_sanity_df = await sanity_check_generations()\n", "base_generation_sanity_df[\"model\"] = \"base\"\n", "base_per_task_inference_df = await run_inference_each_task(safe_model_action, \"base_model\")\n", "base_model_eval = await evaluate_policy(safe_model_action, \"base_model\")\n", "base_stats = dict(generation_stats)\n", "\n", "active_model = loaded_model_for_eval\n", "generation_stats = {\"fallbacks\": 0, \"parse_errors\": []}\n", "loaded_generation_sanity_df = await sanity_check_generations()\n", "loaded_generation_sanity_df[\"model\"] = \"grpo_loaded\"\n", "loaded_per_task_inference_df = await run_inference_each_task(safe_model_action, \"grpo_loaded\")\n", "per_task_inference_df = pd.concat([base_per_task_inference_df, loaded_per_task_inference_df], ignore_index=True)\n", "loaded_stats = dict(generation_stats)\n", "\n", "generation_sanity_df = pd.concat([base_generation_sanity_df, loaded_generation_sanity_df], ignore_index=True)\n", "print(\"base_generation_stats\", base_stats)\n", "print(\"loaded_generation_stats\", loaded_stats)\n", "if loaded_stats[\"fallbacks\"] > 0:\n", " print(\"WARNING: loaded fine-tuned model produced malformed JSON and used fallback.\")\n", "\n", "heuristic_eval = await evaluate_policy(heuristic_policy, \"heuristic\")\n", "active_model = loaded_model_for_eval\n", "grpo_eval = await evaluate_policy(safe_model_action, \"grpo_loaded\")\n", "eval_rows = heuristic_eval + base_model_eval + grpo_eval\n", "eval_df = pd.DataFrame(eval_rows)\n", "eval_df" ] }, { "cell_type": "markdown", "id": "644fd566", "metadata": {}, "source": [ "## Performance Comparison\n", "\n", "Compare heuristic rollout behavior against the trained GRPO policy on the same scenarios and seeds." ] }, { "cell_type": "code", "execution_count": null, "id": "0162e4ad", "metadata": {}, "outputs": [], "source": [ "def plot_policy_comparison(eval_df, title: str = \"Base vs GRPO loaded-model comparison\"):\n", " if eval_df is None or eval_df.empty or \"policy\" not in eval_df.columns:\n", " print(\"eval_df is empty because loaded-model generation used fallback or evaluation was skipped.\")\n", " if \"per_task_inference_df\" in globals() and not per_task_inference_df.empty:\n", " print(\"Plotting per_task_inference_df instead. This is loaded-model rollout only, not heuristic-vs-model comparison.\")\n", " display(per_task_inference_df)\n", " ax = per_task_inference_df.set_index(\"scenario_id\")[[\"total_reward\", \"bookings\", \"violations\", \"fallbacks\"]].plot(\n", " kind=\"bar\",\n", " subplots=True,\n", " layout=(2, 2),\n", " figsize=(10, 7),\n", " legend=False,\n", " title=\"GRPO loaded-model per-task inference\",\n", " )\n", " for axis in ax.ravel():\n", " axis.grid(axis=\"y\", alpha=0.3)\n", " axis.set_xlabel(\"\")\n", " plt.tight_layout()\n", " plt.show()\n", " return pd.DataFrame()\n", "\n", " summary = (\n", " eval_df.groupby(\"policy\", as_index=True)\n", " .agg(\n", " avg_reward=(\"total_reward\", \"mean\"),\n", " completion_rate=(\"done\", \"mean\"),\n", " avg_bookings=(\"bookings\", \"mean\"),\n", " avg_violations=(\"violations\", \"mean\"),\n", " avg_steps=(\"steps\", \"mean\"),\n", " avg_fallbacks=(\"fallbacks\", \"mean\") if \"fallbacks\" in eval_df.columns else (\"steps\", \"size\"),\n", " )\n", " .sort_index()\n", " )\n", " plot_cols = [\"avg_reward\", \"completion_rate\", \"avg_bookings\", \"avg_violations\"]\n", " if \"avg_fallbacks\" in summary.columns:\n", " plot_cols.append(\"avg_fallbacks\")\n", " axes = summary[plot_cols].plot(\n", " kind=\"bar\",\n", " subplots=True,\n", " layout=(3, 2),\n", " figsize=(10, 9),\n", " legend=False,\n", " title=title,\n", " )\n", " for ax in axes.ravel():\n", " ax.grid(axis=\"y\", alpha=0.3)\n", " ax.set_xlabel(\"\")\n", " plt.tight_layout()\n", " plt.show()\n", " return summary\n", "\n", "comparison_summary = plot_policy_comparison(eval_df)\n", "comparison_summary" ] }, { "cell_type": "code", "execution_count": null, "id": "ee2e4887", "metadata": {}, "outputs": [], "source": [ "# Optional Hub upload after training.\n", "# from huggingface_hub import notebook_login\n", "# notebook_login()\n", "# trainer.push_to_hub(\"flatmate-rl-grpo-policy\")" ] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11" } }, "nbformat": 4, "nbformat_minor": 5 }