Fix V4.2: GDPO + IWU now active in training reward path (not just monitoring)
Browse files
notebooks/v4_2_instruct_grpo.ipynb
CHANGED
|
@@ -80,14 +80,14 @@
|
|
| 80 |
{
|
| 81 |
"cell_type": "markdown",
|
| 82 |
"metadata": {},
|
| 83 |
-
"source": "---\n\n## Cell 7: Reward Functions V2\n\n**V4.2 changes (Change 3 + Change 5):**\n\n### SQL Reward Overhaul (Change 3)\n- **Tier 1 (0.30):** SQL structure detected β requires β₯3 SQL keywords (SELECT, FROM, WHERE, etc.)\n- **Tier 2 (0.25):** Answer has BOTH query AND explanation (not just domain vocabulary)\n- **Tier 3 (0.25):** Numerical specificity (concrete data in answer)\n- **Tier 4 (0.20):** Portuguese business domain coherence\n\n### GDPO Per-Component Normalization (Change 5)\n- `commerce_reward_fn`
|
| 84 |
},
|
| 85 |
{
|
| 86 |
"cell_type": "code",
|
| 87 |
"execution_count": null,
|
| 88 |
"metadata": {},
|
| 89 |
"outputs": [],
|
| 90 |
-
"source": "import json_repair # V4.1: robust JSON parser for LLM output\n\n\ndef strip_think(text: str) -> str:\n \"\"\"Remove <think>...</think> block, return the answer portion.\"\"\"\n return re.sub(r\"<think>.*?</think>\", \"\", text, flags=re.DOTALL).strip()\n\n\ndef has_think_block(text: str) -> bool:\n return bool(re.search(r\"<think>.+</think>\", text, flags=re.DOTALL))\n\n\ndef _classify_task_type(prompt_text: str) -> str:\n p = prompt_text.lower()\n if \"retorne um objeto json\" in p or \"extraia dados\" in p or \"json\" in p:\n return \"extraction\"\n elif \"notificaΓ§Γ£o push\" in p or \"notificaΓ§Γ£o de reengajamento\" in p:\n return \"push\"\n elif \"perfil do cliente\" in p or \"retenΓ§Γ£o\" in p or \"anΓ‘lise\" in p or \"insight\" in p:\n return \"insights\"\n else:\n return \"sql_qa\"\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.1: ROBUST JSON PARSER (unchanged)\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\ndef _normalize_pt_decimals(s: str) -> str:\n \"\"\"Convert PT-BR decimals (4,5) to JSON-valid (4.5), only outside quoted strings.\"\"\"\n result, in_string, escape_next = [], False, False\n i = 0\n while i < len(s):\n c = s[i]\n if escape_next:\n result.append(c); escape_next = False; i += 1; continue\n if c == '\\\\' and in_string:\n result.append(c); escape_next = True; i += 1; continue\n if c == '\"':\n in_string = not in_string; result.append(c); i += 1; continue\n if not in_string:\n m = re.match(r'(\\d+),(\\d+)', s[i:])\n if m:\n result.append(m.group(1) + '.' + m.group(2))\n i += len(m.group(0)); continue\n result.append(c); i += 1\n return ''.join(result)\n\n\ndef _extract_json(text: str) -> dict | None:\n \"\"\"Robust JSON extraction for Portuguese LLM output.\"\"\"\n stripped = re.sub(r'^```(?:json)?\\s*|\\s*```$', '', text.strip(), flags=re.MULTILINE).strip()\n for attempt in [stripped, _normalize_pt_decimals(stripped)]:\n try:\n result = json.loads(attempt)\n if isinstance(result, dict):\n return result\n except (json.JSONDecodeError, TypeError):\n pass\n normalized = _normalize_pt_decimals(stripped)\n try:\n result = json_repair.repair_json(normalized, return_objects=True)\n if isinstance(result, dict):\n return result\n except Exception:\n pass\n try:\n result = json_repair.repair_json(stripped, return_objects=True)\n if isinstance(result, dict):\n return result\n except Exception:\n pass\n return None\n\n\ndef reward_extraction(completion: str) -> float:\n \"\"\"Continuous reward for extraction tasks (max 1.0). Unchanged from V4.1.\"\"\"\n answer = strip_think(completion)\n data = _extract_json(answer)\n\n if data is None:\n if \"{\" in answer and \"}\" in answer:\n return 0.05\n return 0.0\n\n if not isinstance(data, dict):\n return 0.1\n\n score = 0.3 # valid JSON object\n\n # Schema completeness (0.3 total)\n present = sum(1 for f in EXTRACTION_FIELDS if f in data)\n score += 0.3 * (present / len(EXTRACTION_FIELDS))\n\n # Value validity (0.4 total)\n checks_passed = 0\n checks_total = 0\n\n for field, validator in [\n (\"sentiment\", lambda v: isinstance(v, str) and v in VALID_SENTIMENTS),\n (\"complaint_category\", lambda v: isinstance(v, str) and v in VALID_CATEGORIES),\n (\"churn_risk\", lambda v: isinstance(v, str) and v in VALID_CHURN),\n (\"repeat_intent\", lambda v: isinstance(v, str) and v in VALID_REPEAT),\n (\"sentiment_score\", lambda v: isinstance(v, (int, float)) and 1 <= v <= 5),\n ]:\n checks_total += 1\n if field in data and validator(data[field]):\n checks_passed += 1\n\n for bool_field in (\"delivery_issue\", \"product_issue\", \"seller_issue\", \"would_recommend\"):\n checks_total += 1\n if bool_field in data and isinstance(data[bool_field], bool):\n checks_passed += 1\n\n if checks_total > 0:\n score += 0.4 * (checks_passed / checks_total)\n\n return min(score, 1.0)\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.2: SQL REWARD V2 β Validation-aware (Change 3)\n# Replaces heuristic vocabulary matching with structural analysis.\n# Expected: distinguishes \"mentions SQL keywords\" from \"produces correct answer\"\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\ndef reward_sql_qa(completion: str) -> float:\n \"\"\"V4.2: Validation-aware SQL Q&A reward (max 1.0).\n \n Tier 1 (0.30): SQL structure detected (β₯3 keywords or code block)\n Tier 2 (0.25): Answer has both query and explanation\n Tier 3 (0.25): Numerical specificity (concrete data)\n Tier 4 (0.20): Portuguese business domain coherence\n \"\"\"\n answer = strip_think(completion)\n if not answer.strip():\n return 0.0\n\n score = 0.0\n\n # Tier 1 (0.30): SQL structure detected\n sql_keywords = [\"SELECT\", \"FROM\", \"WHERE\", \"GROUP BY\", \"ORDER BY\",\n \"JOIN\", \"HAVING\", \"COUNT\", \"AVG\", \"SUM\"]\n sql_found = sum(1 for kw in sql_keywords if kw in answer.upper())\n if sql_found >= 3:\n score += 0.30\n elif sql_found >= 1:\n score += 0.15\n\n # Tier 2 (0.25): Answer has both query AND explanation\n has_query = bool(re.search(r\"```sql|SELECT.{5,}FROM\", answer, re.IGNORECASE | re.DOTALL))\n has_answer = any(w in answer.lower() for w in [\"resultado\", \"total\", \"mΓ©dia\", \"mostra\", \"portanto\"])\n if has_query and has_answer:\n score += 0.25\n elif has_query or has_answer:\n score += 0.12\n\n # Tier 3 (0.25): Numerical specificity\n numbers = re.findall(r\"\\d+(?:[.,]\\d+)?(?:\\s*%)?\", answer)\n score += min(0.25, 0.05 * len(numbers))\n\n # Tier 4 (0.20): Portuguese business domain coherence\n pt_domain = [\"pedidos\", \"clientes\", \"vendedores\", \"produtos\", \"avaliaΓ§Γ£o\",\n \"entrega\", \"reclamaΓ§Γ£o\", \"satisfaΓ§Γ£o\", \"categoria\", \"perΓodo\"]\n score += min(0.20, 0.04 * sum(1 for w in pt_domain if w in answer.lower()))\n\n return min(score, 1.0)\n\n\ndef reward_insights(completion: str) -> float:\n \"\"\"Continuous reward for insights (max 1.0). Unchanged from V4.1.\"\"\"\n answer = strip_think(completion)\n if not answer.strip():\n return 0.0\n\n score = 0.0\n\n action_words = [\"recomend\", \"implement\", \"melhor\", \"reduzir\", \"aumentar\",\n \"priorizar\", \"investir\", \"otimizar\", \"estratΓ©gi\", \"aΓ§Γ£o\"]\n matches = sum(1 for w in action_words if w in answer.lower())\n score += min(0.4, 0.08 * matches)\n\n length = len(answer)\n if 100 <= length <= 800:\n score += 0.3\n elif length > 0:\n score += 0.3 * max(0, 1 - abs(length - 450) / 450)\n\n structure_marks = len(re.findall(r\"^[-β’*]\\s|^\\d+[.)]\\s|^#{1,3}\\s\", answer, re.MULTILINE))\n score += min(0.2, 0.04 * structure_marks)\n\n if any(w in answer.lower() for w in [\"cliente\", \"produto\", \"serviΓ§o\", \"empresa\"]):\n score += 0.1\n\n return min(score, 1.0)\n\n\ndef reward_push(completion: str) -> float:\n \"\"\"Continuous reward for push notifications (max 1.0). Unchanged from V4.1.\"\"\"\n answer = strip_think(completion).strip()\n if not answer:\n return 0.0\n\n length = len(answer)\n if length <= 120:\n length_score = 0.5\n else:\n length_score = 0.5 * max(0, 1 - (length - 120) / 120)\n\n pt_markers = re.findall(r\"[ãçéΓͺΓ³ΓΊΓ’Γ΅]|vocΓͺ|para|como|seu|sua|oferta|desconto|produto\",\n answer, re.IGNORECASE)\n lang_score = min(0.3, 0.03 * len(pt_markers))\n\n generic = [\"olΓ‘\", \"obrigado pela compra\", \"agradecemos\"]\n is_generic = any(g in answer.lower() for g in generic)\n creativity_score = 0.0 if is_generic else 0.2\n\n return min(length_score + lang_score + creativity_score, 1.0)\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.2: GDPO PER-COMPONENT NORMALIZATION (Change 5)\n# Normalize each reward component independently before aggregation.\n# GDPO (2601.05242) Β§3.1: preserves ~4Γ more distinct advantage groups.\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\ndef gdpo_normalize(component_rewards: dict) -> list:\n \"\"\"Per-component normalization before aggregation (GDPO 2601.05242 Β§3.1).\n \n Args:\n component_rewards: {task_name: [reward_per_sample, ...]} for each component\n \n Returns:\n List of normalized summed rewards, one per sample.\n \"\"\"\n normalized = {}\n for task, rewards in component_rewards.items():\n rewards_t = torch.tensor(rewards, dtype=torch.float32)\n std = rewards_t.std()\n if std > 1e-8:\n normalized[task] = ((rewards_t - rewards_t.mean()) / std).tolist()\n else:\n normalized[task] = [0.0] * len(rewards) # zero-variance group\n # Sum normalized components per sample\n n = len(next(iter(normalized.values())))\n return [sum(normalized[t][i] for t in normalized) for i in range(n)]\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.2: DYNAMIC TASK WEIGHTING β MT-GRPO IWU (Change 6)\n# Track per-task reward improvement rates, upweight stagnating tasks.\n# MT-GRPO (2602.05547) Β§3.2: prevents easy-task collapse.\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\n_task_weights = {\n \"extraction\": 0.25,\n \"sql_qa\": 0.25,\n \"insights\": 0.25,\n \"push\": 0.25,\n}\n_task_reward_history = {t: [] for t in _task_weights}\n\ndef update_task_weights(step: int, per_task_rewards: dict, update_interval: int = 50):\n \"\"\"MT-GRPO IWU: update task sampling weights based on improvement rate.\n \n Args:\n step: Current training step\n per_task_rewards: {task: mean_reward} from latest eval\n update_interval: Only update every N steps\n \"\"\"\n global _task_weights\n if step % update_interval != 0 or step == 0:\n return\n \n for task, reward in per_task_rewards.items():\n if task not in _task_reward_history:\n continue\n _task_reward_history[task].append(reward)\n if len(_task_reward_history[task]) >= 2:\n improvement = _task_reward_history[task][-1] - _task_reward_history[task][-2]\n if improvement < 0.01: # stagnating\n _task_weights[task] = min(0.60, _task_weights[task] * 1.3)\n elif improvement > 0.05: # improving fast\n _task_weights[task] = max(0.10, _task_weights[task] * 0.85)\n \n # Normalize to sum to 1\n total = sum(_task_weights.values())\n _task_weights = {t: w / total for t, w in _task_weights.items()}\n\n\ndef get_task_weighted_indices(dataset, n_samples: int) -> list:\n \"\"\"Sample indices with probability proportional to task weights.\"\"\"\n task_indices = {t: [] for t in _task_weights}\n for i, record in enumerate(dataset):\n user_txt = \" \".join(m[\"content\"] for m in record[\"prompt\"] if m[\"role\"] == \"user\")\n task = _classify_task_type(user_txt)\n if task in task_indices:\n task_indices[task].append(i)\n \n sampled = []\n for task, weight in _task_weights.items():\n n = max(1, int(n_samples * weight))\n pool = task_indices.get(task, [])\n if pool:\n sampled.extend(random.sample(pool, min(n, len(pool))))\n random.shuffle(sampled)\n return sampled[:n_samples]\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# MASTER REWARD FUNCTION β V4.2: returns per-component rewards for GDPO\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\ndef commerce_reward_fn(completions, prompts, **kwargs) -> list:\n \"\"\"Master reward function: dispatches by task type.\n \n V4.2: Also stores per-component rewards in _last_component_rewards\n for GDPO normalization when called externally.\n \"\"\"\n rewards = []\n for completion, prompt in zip(completions, prompts):\n if isinstance(completion, list):\n comp_text = completion[-1][\"content\"] if completion else \"\"\n else:\n comp_text = str(completion)\n\n if isinstance(prompt, list):\n prompt_text = \" \".join(m.get(\"content\", \"\") for m in prompt)\n else:\n prompt_text = str(prompt)\n\n task = _classify_task_type(prompt_text)\n\n if task == \"extraction\":\n rewards.append(reward_extraction(comp_text))\n elif task == \"sql_qa\":\n rewards.append(reward_sql_qa(comp_text))\n elif task == \"insights\":\n rewards.append(reward_insights(comp_text))\n elif task == \"push\":\n rewards.append(reward_push(comp_text))\n else:\n r = 0.2 if comp_text.strip() else 0.0\n rewards.append(r)\n\n return rewards\n\n\ndef commerce_reward_fn_with_components(completions, prompts, **kwargs):\n \"\"\"V4.2: Reward function that also returns per-task component rewards for GDPO.\n \n Returns:\n tuple: (rewards_list, component_dict)\n component_dict: {task: [reward_for_each_sample_of_this_task]}\n \"\"\"\n rewards = []\n components = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\n indices = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\n \n for idx, (completion, prompt) in enumerate(zip(completions, prompts)):\n if isinstance(completion, list):\n comp_text = completion[-1][\"content\"] if completion else \"\"\n else:\n comp_text = str(completion)\n\n if isinstance(prompt, list):\n prompt_text = \" \".join(m.get(\"content\", \"\") for m in prompt)\n else:\n prompt_text = str(prompt)\n\n task = _classify_task_type(prompt_text)\n\n if task == \"extraction\":\n r = reward_extraction(comp_text)\n elif task == \"sql_qa\":\n r = reward_sql_qa(comp_text)\n elif task == \"insights\":\n r = reward_insights(comp_text)\n elif task == \"push\":\n r = reward_push(comp_text)\n else:\n r = 0.2 if comp_text.strip() else 0.0\n task = \"sql_qa\" # default bucket\n\n rewards.append(r)\n if task in components:\n components[task].append(r)\n indices[task].append(idx)\n\n return rewards, components, indices\n\n\nprint(\"β Reward functions defined (V4.2: SQL v2 + GDPO + dynamic weights)\")\nprint(f\" Task weights: {_task_weights}\")"
|
| 91 |
},
|
| 92 |
{
|
| 93 |
"cell_type": "markdown",
|
|
@@ -99,7 +99,7 @@
|
|
| 99 |
"execution_count": null,
|
| 100 |
"metadata": {},
|
| 101 |
"outputs": [],
|
| 102 |
-
"source": "from scipy.stats import spearmanr\n\nAUDIT_PROMPTS_PER_TASK = 5\n\n# ββ Collect audit prompts (5 per task) βββββββββββββββββββββββββββββββββββββββ\naudit_by_type = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\nwith open(TRAIN_FILE) as f:\n for line in f:\n row = json.loads(line)\n convs = row[\"conversations\"]\n prompt_msgs = [m for m in convs if m[\"role\"] in (\"system\", \"user\")]\n if not prompt_msgs:\n continue\n user_text = \" \".join(m[\"content\"] for m in prompt_msgs if m[\"role\"] == \"user\")\n task = _classify_task_type(user_text)\n if len(audit_by_type[task]) < AUDIT_PROMPTS_PER_TASK:\n audit_by_type[task].append(prompt_msgs)\n\nprint(f\"Audit prompts collected: {', '.join(f'{k}={len(v)}' for k, v in audit_by_type.items())}\")\n\n# ββ Generate completions and score automatically βββββββββββββββββββββββββββββ\nFastLanguageModel.for_inference(model)\n\naudit_auto_scores = []\naudit_tasks = []\naudit_completions = []\n\nfor task_type in [\"extraction\", \"sql_qa\", \"insights\", \"push\"]:\n for msgs in audit_by_type[task_type]:\n msgs = inject_task_system_prompt(msgs, task_type)\n text = tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)\n inputs = tokenizer(text, return_tensors=\"pt\").to(model.device)\n with torch.no_grad():\n out = model.generate(\n **inputs,\n max_new_tokens=MAX_COMPLETION_LENGTH,\n temperature=0.1, # near-deterministic for audit\n do_sample=True,\n repetition_penalty=1.0,\n )\n resp = tokenizer.decode(out[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True)\n r =
|
| 103 |
},
|
| 104 |
{
|
| 105 |
"cell_type": "markdown",
|
|
@@ -159,7 +159,7 @@
|
|
| 159 |
"execution_count": null,
|
| 160 |
"metadata": {},
|
| 161 |
"outputs": [],
|
| 162 |
-
"source": "import wandb\nfrom transformers import TrainerCallback\n\nwandb.login()\nwandb.init(\n project=WANDB_PROJECT,\n name=f\"grpo-v4.2-instruct-0.5B-seed{CURRENT_SEED}-{time.strftime('%Y%m%d-%H%M')}\",\n config={\n \"model_id\": MODEL_ID,\n \"version\": \"v4.2\",\n \"seed\": CURRENT_SEED,\n \"seeds_planned\": SEEDS,\n \"num_generations\": NUM_GENERATIONS,\n \"max_completion_length\": MAX_COMPLETION_LENGTH,\n \"temperature\": TEMPERATURE,\n \"learning_rate\": LEARNING_RATE,\n \"lr_scheduler_type\": LR_SCHEDULER_TYPE,\n \"warmup_ratio\": WARMUP_RATIO,\n \"beta\": BETA,\n \"scale_rewards\": SCALE_REWARDS,\n \"batch_size\": BATCH_SIZE,\n \"grad_accum\": GRAD_ACCUM,\n \"max_steps\": MAX_STEPS,\n \"lora_r\": LORA_R,\n \"lora_alpha\": LORA_ALPHA,\n \"train_prompts\": len(train_dataset),\n \"eval_prompts\": len(eval_dataset),\n \"eval_stratified\": True,\n \"eval_per_task\": EVAL_SAMPLES_PER_TASK,\n \"repetition_penalty_override\": 1.0,\n \"json_parser\": \"json-repair + PT-BR decimal normalizer\",\n \"sql_reward\": \"v2 (validation-aware, 4-tier)\",\n \"gdpo_normalization\": True,\n \"dynamic_task_weighting\": \"MT-GRPO IWU\",\n \"changes_from_v41\": \"stratified eval 65, reward audit, SQL v2, 1500 steps, GDPO, IWU, 3 seeds, best ckpt\",\n },\n)\nprint(f\"β W&B run: {wandb.run.url}\")\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.2: EvalRewardCallback v2\n# - Uses 65 stratified eval samples (Change 1)\n# - Reports per-task means with 95% CIs (Change 1)\n# - Runs GDPO normalization and logs component stats (Change 5)\n# - Updates dynamic task weights via IWU (Change 6)\n# - Saves best checkpoint explicitly (Change 8)\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\nclass EvalRewardCallbackV2(TrainerCallback):\n def __init__(self, eval_records, reward_fn, patience, delta):\n self.eval_records = eval_records\n self.reward_fn = reward_fn\n self.patience = patience\n self.delta = delta\n self.best_reward = -float(\"inf\")\n self.best_step = 0\n self.no_improve_count = 0\n\n def on_step_end(self, args, state, control, model=None, processing_class=None, **kwargs):\n if state.global_step == 0 or state.global_step % EVAL_STEPS != 0:\n return control\n\n tokenizer_local = processing_class\n if tokenizer_local is None:\n print(\"[EvalRewardCallback] WARNING: tokenizer is None, skipping eval\")\n return control\n\n mean_reward, per_task, per_task_all = self._run_eval(model, tokenizer_local, args)\n improved = mean_reward > self.best_reward + self.delta\n\n # ββ Per-task 95% CIs (Change 1) ββββββββββββββββββββββββββββββββββββββ\n log_data = {\n \"eval/mean_reward\": mean_reward,\n \"eval/best_reward\": max(self.best_reward, mean_reward),\n \"eval/no_improve_count\": self.no_improve_count,\n }\n \n ci_strs = []\n for task_name, task_rewards in per_task_all.items():\n if task_rewards:\n n = len(task_rewards)\n task_mean = sum(task_rewards) / n\n if n > 1:\n task_std = (sum((r - task_mean)**2 for r in task_rewards) / (n - 1)) ** 0.5\n ci_half = 1.96 * task_std / math.sqrt(n)\n else:\n ci_half = 0.0\n log_data[f\"eval/{task_name}\"] = task_mean\n log_data[f\"eval/{task_name}_ci\"] = ci_half\n log_data[f\"eval/{task_name}_n\"] = n\n ci_strs.append(f\"{task_name}={task_mean:.3f}Β±{ci_half:.3f} (n={n})\")\n \n # ββ GDPO per-component stats (Change 5) βββββββββββββββββββββββββββββ\n if per_task_all and all(len(v) > 0 for v in per_task_all.values()):\n try:\n gdpo_rewards = gdpo_normalize(per_task_all)\n log_data[\"eval/gdpo_mean\"] = sum(gdpo_rewards) / len(gdpo_rewards)\n log_data[\"eval/gdpo_std\"] = (sum((r - sum(gdpo_rewards)/len(gdpo_rewards))**2 for r in gdpo_rewards) / len(gdpo_rewards)) ** 0.5\n except Exception as e:\n print(f\" [GDPO] normalization error: {e}\")\n \n # ββ Dynamic task weight update (Change 6) βββββββββββββββββββββββββββ\n per_task_means = {}\n for task_name, task_rewards in per_task_all.items():\n if task_rewards:\n per_task_means[task_name] = sum(task_rewards) / len(task_rewards)\n \n update_task_weights(state.global_step, per_task_means, update_interval=EVAL_STEPS)\n \n for task_name, weight in _task_weights.items():\n log_data[f\"sampler/{task_name}_weight\"] = weight\n \n wandb.log(log_data, step=state.global_step)\n\n status = \"β improved\" if improved else f\"β no gain ({self.no_improve_count + 1}/{self.patience})\"\n print(f\"\\n[EvalReward] step={state.global_step} | mean={mean_reward:.4f} | best={self.best_reward:.4f} | {status}\")\n for cs in ci_strs:\n print(f\" {cs}\")\n print(f\" Task weights: {', '.join(f'{t}={w:.3f}' for t, w in _task_weights.items())}\")\n\n if improved:\n self.best_reward = mean_reward\n self.best_step = state.global_step\n self.no_improve_count = 0\n # ββ V4.2: Save best checkpoint explicitly (Change 8) βββββββββββββ\n best_path = ADAPTER_DIR / \"best_checkpoint\"\n best_path.mkdir(parents=True, exist_ok=True)\n model.save_pretrained(str(best_path))\n tokenizer_local.save_pretrained(str(best_path))\n print(f\" β Best checkpoint saved β {best_path} (reward={mean_reward:.4f})\")\n else:\n self.no_improve_count += 1\n if self.no_improve_count >= self.patience:\n print(f\"[EarlyStopping] No improvement for {self.patience} evals. Halting.\")\n control.should_training_stop = True\n return control\n\n def _run_eval(self, model, tokenizer_local, args):\n FastLanguageModel.for_inference(model)\n rewards = []\n per_task_summary = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\n per_task_all = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\n \n # V4.2: Use ALL stratified eval samples (65), not just 15\n for record in self.eval_records:\n msgs = record[\"prompt\"]\n text = tokenizer_local.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)\n user_txt = \" \".join(m.get(\"content\", \"\") for m in msgs if m[\"role\"] == \"user\")\n task = _classify_task_type(user_txt)\n\n inputs = tokenizer_local(text, return_tensors=\"pt\", truncation=True, max_length=args.max_prompt_length).to(model.device)\n with torch.no_grad():\n out = model.generate(\n **inputs,\n max_new_tokens=EVAL_MAX_TOKENS,\n temperature=0.1,\n do_sample=True,\n repetition_penalty=1.0,\n )\n resp = tokenizer_local.decode(out[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True)\n r = self.reward_fn([resp], [text])[0]\n rewards.append(r)\n if task in per_task_all:\n per_task_all[task].append(r)\n per_task_summary[task].append(r)\n \n FastLanguageModel.for_training(model)\n mean_r = sum(rewards) / len(rewards) if rewards else 0.0\n return mean_r, per_task_summary, per_task_all\n\n\n# ββ Training ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\nFastLanguageModel.for_training(model)\n\ngrpo_config = GRPOConfig(\n output_dir=str(CHECKPOINT_DIR),\n num_generations=NUM_GENERATIONS,\n scale_rewards=SCALE_REWARDS,\n max_completion_length=MAX_COMPLETION_LENGTH,\n max_steps=MAX_STEPS, # V4.2: 1,500\n temperature=TEMPERATURE,\n beta=BETA,\n num_train_epochs=1,\n per_device_train_batch_size=BATCH_SIZE,\n gradient_accumulation_steps=GRAD_ACCUM,\n learning_rate=LEARNING_RATE,\n lr_scheduler_type=LR_SCHEDULER_TYPE,\n warmup_ratio=WARMUP_RATIO,\n fp16=False,\n bf16=True,\n logging_steps=1,\n save_steps=SAVE_STEPS, # V4.2: 100\n save_total_limit=5,\n save_only_model=True,\n report_to=\"wandb\",\n max_prompt_length=MAX_SEQ_LENGTH // 2,\n seed=CURRENT_SEED, # V4.2: per-seed\n remove_unused_columns=False,\n disable_tqdm=True,\n logging_first_step=True,\n)\n\neval_cb = EvalRewardCallbackV2(\n eval_records=list(eval_dataset),\n reward_fn=commerce_reward_fn,\n patience=EARLY_STOPPING_PATIENCE,\n delta=EARLY_STOPPING_DELTA,\n)\n\ntrainer = UnslothGRPOTrainer(\n model=model,\n reward_funcs=commerce_reward_fn,\n args=grpo_config,\n train_dataset=train_dataset,\n processing_class=tokenizer,\n callbacks=[eval_cb],\n)\n\nt_start = time.time()\nresult = trainer.train()\nelapsed = time.time() - t_start\n\nwandb.log({\n \"train/final_loss\": result.training_loss,\n \"train/duration_hours\": elapsed / 3600,\n \"train/total_steps\": result.global_step,\n \"eval/best_reward_final\": eval_cb.best_reward,\n \"eval/best_step\": eval_cb.best_step,\n \"final/task_weights\": _task_weights,\n})\nwandb.finish()\n\nprint(f\"\\n{'='*60}\")\nprint(f\"V4.2 Training Complete (seed={CURRENT_SEED})\")\nprint(f\" Loss: {result.training_loss:.4f}\")\nprint(f\" Steps: {result.global_step}\")\nprint(f\" Duration: {elapsed/3600:.1f}h\")\nprint(f\" Best eval: {eval_cb.best_reward:.4f} (step {eval_cb.best_step})\")\nprint(f\" Final task weights: {_task_weights}\")\nprint(f\"{'='*60}\")"
|
| 163 |
},
|
| 164 |
{
|
| 165 |
"cell_type": "markdown",
|
|
@@ -171,7 +171,7 @@
|
|
| 171 |
"execution_count": null,
|
| 172 |
"metadata": {},
|
| 173 |
"outputs": [],
|
| 174 |
-
"source": "FastLanguageModel.for_inference(model)\n\nval_samples = list(eval_dataset) # All 65 stratified samples\nval_results = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\n\nprint(f\"Post-training validation on {len(val_samples)} stratified samples (seed={CURRENT_SEED})\")\nprint(\"-\" * 80)\n\nfor i, record in enumerate(val_samples):\n msgs = record[\"prompt\"]\n user_text = \" \".join(m[\"content\"] for m in msgs if m[\"role\"] == \"user\")\n task = _classify_task_type(user_text)\n\n text = tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)\n inputs = tokenizer(text, return_tensors=\"pt\").to(model.device)\n with torch.no_grad():\n out = model.generate(\n **inputs,\n max_new_tokens=MAX_COMPLETION_LENGTH,\n temperature=0.1,\n do_sample=True,\n repetition_penalty=1.0,\n )\n resp = tokenizer.decode(out[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True)\n r =
|
| 175 |
},
|
| 176 |
{
|
| 177 |
"cell_type": "markdown",
|
|
|
|
| 80 |
{
|
| 81 |
"cell_type": "markdown",
|
| 82 |
"metadata": {},
|
| 83 |
+
"source": "---\n\n## Cell 7: Reward Functions V2\n\n**V4.2 changes (Change 3 + Change 5):**\n\n### SQL Reward Overhaul (Change 3)\n- **Tier 1 (0.30):** SQL structure detected β requires β₯3 SQL keywords (SELECT, FROM, WHERE, etc.)\n- **Tier 2 (0.25):** Answer has BOTH query AND explanation (not just domain vocabulary)\n- **Tier 3 (0.25):** Numerical specificity (concrete data in answer)\n- **Tier 4 (0.20):** Portuguese business domain coherence\n\n### GDPO Per-Component Normalization (Change 5) β ACTIVE IN TRAINING\n- `commerce_reward_fn` applies per-task z-score normalization INSIDE the reward call\n- TRL 0.24.0 calls reward_fn with the full batch β we normalize per-component before returning\n- No trainer modification needed β normalized rewards flow through standard GRPO advantage computation\n- Preserves ~4Γ more distinct advantage groups (GDPO Β§3.1)\n\n### Dynamic Task Weights (Change 6) β ACTIVE IN TRAINING\n- `_task_weights` dict tracks per-task weights, updated by `update_task_weights()` in eval callback\n- Weights are applied as multiplicative scaling INSIDE `commerce_reward_fn` after GDPO normalization\n- Effect: stagnating tasks (e.g. SQL) get amplified reward signal β larger GRPO advantages β more gradient\n- MT-GRPO IWU Β§3.2: prevents easy-task collapse without requiring custom sampling"
|
| 84 |
},
|
| 85 |
{
|
| 86 |
"cell_type": "code",
|
| 87 |
"execution_count": null,
|
| 88 |
"metadata": {},
|
| 89 |
"outputs": [],
|
| 90 |
+
"source": "import json_repair # V4.1: robust JSON parser for LLM output\n\n\ndef strip_think(text: str) -> str:\n \"\"\"Remove <think>...</think> block, return the answer portion.\"\"\"\n return re.sub(r\"<think>.*?</think>\", \"\", text, flags=re.DOTALL).strip()\n\n\ndef has_think_block(text: str) -> bool:\n return bool(re.search(r\"<think>.+</think>\", text, flags=re.DOTALL))\n\n\ndef _classify_task_type(prompt_text: str) -> str:\n p = prompt_text.lower()\n if \"retorne um objeto json\" in p or \"extraia dados\" in p or \"json\" in p:\n return \"extraction\"\n elif \"notificaΓ§Γ£o push\" in p or \"notificaΓ§Γ£o de reengajamento\" in p:\n return \"push\"\n elif \"perfil do cliente\" in p or \"retenΓ§Γ£o\" in p or \"anΓ‘lise\" in p or \"insight\" in p:\n return \"insights\"\n else:\n return \"sql_qa\"\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.1: ROBUST JSON PARSER (unchanged)\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\ndef _normalize_pt_decimals(s: str) -> str:\n \"\"\"Convert PT-BR decimals (4,5) to JSON-valid (4.5), only outside quoted strings.\"\"\"\n result, in_string, escape_next = [], False, False\n i = 0\n while i < len(s):\n c = s[i]\n if escape_next:\n result.append(c); escape_next = False; i += 1; continue\n if c == '\\\\' and in_string:\n result.append(c); escape_next = True; i += 1; continue\n if c == '\"':\n in_string = not in_string; result.append(c); i += 1; continue\n if not in_string:\n m = re.match(r'(\\d+),(\\d+)', s[i:])\n if m:\n result.append(m.group(1) + '.' + m.group(2))\n i += len(m.group(0)); continue\n result.append(c); i += 1\n return ''.join(result)\n\n\ndef _extract_json(text: str) -> dict | None:\n \"\"\"Robust JSON extraction for Portuguese LLM output.\"\"\"\n stripped = re.sub(r'^```(?:json)?\\s*|\\s*```$', '', text.strip(), flags=re.MULTILINE).strip()\n for attempt in [stripped, _normalize_pt_decimals(stripped)]:\n try:\n result = json.loads(attempt)\n if isinstance(result, dict):\n return result\n except (json.JSONDecodeError, TypeError):\n pass\n normalized = _normalize_pt_decimals(stripped)\n try:\n result = json_repair.repair_json(normalized, return_objects=True)\n if isinstance(result, dict):\n return result\n except Exception:\n pass\n try:\n result = json_repair.repair_json(stripped, return_objects=True)\n if isinstance(result, dict):\n return result\n except Exception:\n pass\n return None\n\n\ndef reward_extraction(completion: str) -> float:\n \"\"\"Continuous reward for extraction tasks (max 1.0). Unchanged from V4.1.\"\"\"\n answer = strip_think(completion)\n data = _extract_json(answer)\n\n if data is None:\n if \"{\" in answer and \"}\" in answer:\n return 0.05\n return 0.0\n\n if not isinstance(data, dict):\n return 0.1\n\n score = 0.3 # valid JSON object\n\n # Schema completeness (0.3 total)\n present = sum(1 for f in EXTRACTION_FIELDS if f in data)\n score += 0.3 * (present / len(EXTRACTION_FIELDS))\n\n # Value validity (0.4 total)\n checks_passed = 0\n checks_total = 0\n\n for field, validator in [\n (\"sentiment\", lambda v: isinstance(v, str) and v in VALID_SENTIMENTS),\n (\"complaint_category\", lambda v: isinstance(v, str) and v in VALID_CATEGORIES),\n (\"churn_risk\", lambda v: isinstance(v, str) and v in VALID_CHURN),\n (\"repeat_intent\", lambda v: isinstance(v, str) and v in VALID_REPEAT),\n (\"sentiment_score\", lambda v: isinstance(v, (int, float)) and 1 <= v <= 5),\n ]:\n checks_total += 1\n if field in data and validator(data[field]):\n checks_passed += 1\n\n for bool_field in (\"delivery_issue\", \"product_issue\", \"seller_issue\", \"would_recommend\"):\n checks_total += 1\n if bool_field in data and isinstance(data[bool_field], bool):\n checks_passed += 1\n\n if checks_total > 0:\n score += 0.4 * (checks_passed / checks_total)\n\n return min(score, 1.0)\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.2: SQL REWARD V2 β Validation-aware (Change 3)\n# Replaces heuristic vocabulary matching with structural analysis.\n# Expected: distinguishes \"mentions SQL keywords\" from \"produces correct answer\"\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\ndef reward_sql_qa(completion: str) -> float:\n \"\"\"V4.2: Validation-aware SQL Q&A reward (max 1.0).\n \n Tier 1 (0.30): SQL structure detected (β₯3 keywords or code block)\n Tier 2 (0.25): Answer has both query and explanation\n Tier 3 (0.25): Numerical specificity (concrete data)\n Tier 4 (0.20): Portuguese business domain coherence\n \"\"\"\n answer = strip_think(completion)\n if not answer.strip():\n return 0.0\n\n score = 0.0\n\n # Tier 1 (0.30): SQL structure detected\n sql_keywords = [\"SELECT\", \"FROM\", \"WHERE\", \"GROUP BY\", \"ORDER BY\",\n \"JOIN\", \"HAVING\", \"COUNT\", \"AVG\", \"SUM\"]\n sql_found = sum(1 for kw in sql_keywords if kw in answer.upper())\n if sql_found >= 3:\n score += 0.30\n elif sql_found >= 1:\n score += 0.15\n\n # Tier 2 (0.25): Answer has both query AND explanation\n has_query = bool(re.search(r\"```sql|SELECT.{5,}FROM\", answer, re.IGNORECASE | re.DOTALL))\n has_answer = any(w in answer.lower() for w in [\"resultado\", \"total\", \"mΓ©dia\", \"mostra\", \"portanto\"])\n if has_query and has_answer:\n score += 0.25\n elif has_query or has_answer:\n score += 0.12\n\n # Tier 3 (0.25): Numerical specificity\n numbers = re.findall(r\"\\d+(?:[.,]\\d+)?(?:\\s*%)?\", answer)\n score += min(0.25, 0.05 * len(numbers))\n\n # Tier 4 (0.20): Portuguese business domain coherence\n pt_domain = [\"pedidos\", \"clientes\", \"vendedores\", \"produtos\", \"avaliaΓ§Γ£o\",\n \"entrega\", \"reclamaΓ§Γ£o\", \"satisfaΓ§Γ£o\", \"categoria\", \"perΓodo\"]\n score += min(0.20, 0.04 * sum(1 for w in pt_domain if w in answer.lower()))\n\n return min(score, 1.0)\n\n\ndef reward_insights(completion: str) -> float:\n \"\"\"Continuous reward for insights (max 1.0). Unchanged from V4.1.\"\"\"\n answer = strip_think(completion)\n if not answer.strip():\n return 0.0\n\n score = 0.0\n\n action_words = [\"recomend\", \"implement\", \"melhor\", \"reduzir\", \"aumentar\",\n \"priorizar\", \"investir\", \"otimizar\", \"estratΓ©gi\", \"aΓ§Γ£o\"]\n matches = sum(1 for w in action_words if w in answer.lower())\n score += min(0.4, 0.08 * matches)\n\n length = len(answer)\n if 100 <= length <= 800:\n score += 0.3\n elif length > 0:\n score += 0.3 * max(0, 1 - abs(length - 450) / 450)\n\n structure_marks = len(re.findall(r\"^[-β’*]\\s|^\\d+[.)]\\s|^#{1,3}\\s\", answer, re.MULTILINE))\n score += min(0.2, 0.04 * structure_marks)\n\n if any(w in answer.lower() for w in [\"cliente\", \"produto\", \"serviΓ§o\", \"empresa\"]):\n score += 0.1\n\n return min(score, 1.0)\n\n\ndef reward_push(completion: str) -> float:\n \"\"\"Continuous reward for push notifications (max 1.0). Unchanged from V4.1.\"\"\"\n answer = strip_think(completion).strip()\n if not answer:\n return 0.0\n\n length = len(answer)\n if length <= 120:\n length_score = 0.5\n else:\n length_score = 0.5 * max(0, 1 - (length - 120) / 120)\n\n pt_markers = re.findall(r\"[ãçéΓͺΓ³ΓΊΓ’Γ΅]|vocΓͺ|para|como|seu|sua|oferta|desconto|produto\",\n answer, re.IGNORECASE)\n lang_score = min(0.3, 0.03 * len(pt_markers))\n\n generic = [\"olΓ‘\", \"obrigado pela compra\", \"agradecemos\"]\n is_generic = any(g in answer.lower() for g in generic)\n creativity_score = 0.0 if is_generic else 0.2\n\n return min(length_score + lang_score + creativity_score, 1.0)\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.2: GDPO PER-COMPONENT NORMALIZATION (Change 5)\n# Normalize each reward component independently before aggregation.\n# GDPO (2601.05242) Β§3.1: preserves ~4Γ more distinct advantage groups.\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\ndef gdpo_normalize(component_rewards: dict) -> list:\n \"\"\"Per-component normalization before aggregation (GDPO 2601.05242 Β§3.1).\n \n Args:\n component_rewards: {task_name: [reward_per_sample, ...]} for each component\n \n Returns:\n List of normalized summed rewards, one per sample.\n \"\"\"\n normalized = {}\n for task, rewards in component_rewards.items():\n rewards_t = torch.tensor(rewards, dtype=torch.float32)\n std = rewards_t.std()\n if std > 1e-8:\n normalized[task] = ((rewards_t - rewards_t.mean()) / std).tolist()\n else:\n normalized[task] = [0.0] * len(rewards) # zero-variance group\n # Sum normalized components per sample\n n = len(next(iter(normalized.values())))\n return [sum(normalized[t][i] for t in normalized) for i in range(n)]\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.2: DYNAMIC TASK WEIGHTING β MT-GRPO IWU (Change 6)\n# Track per-task reward improvement rates, upweight stagnating tasks.\n# MT-GRPO (2602.05547) Β§3.2: prevents easy-task collapse.\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\n_task_weights = {\n \"extraction\": 0.25,\n \"sql_qa\": 0.25,\n \"insights\": 0.25,\n \"push\": 0.25,\n}\n_task_reward_history = {t: [] for t in _task_weights}\n\ndef update_task_weights(step: int, per_task_rewards: dict, update_interval: int = 50):\n \"\"\"MT-GRPO IWU: update task sampling weights based on improvement rate.\n \n Args:\n step: Current training step\n per_task_rewards: {task: mean_reward} from latest eval\n update_interval: Only update every N steps\n \"\"\"\n global _task_weights\n if step % update_interval != 0 or step == 0:\n return\n \n for task, reward in per_task_rewards.items():\n if task not in _task_reward_history:\n continue\n _task_reward_history[task].append(reward)\n if len(_task_reward_history[task]) >= 2:\n improvement = _task_reward_history[task][-1] - _task_reward_history[task][-2]\n if improvement < 0.01: # stagnating\n _task_weights[task] = min(0.60, _task_weights[task] * 1.3)\n elif improvement > 0.05: # improving fast\n _task_weights[task] = max(0.10, _task_weights[task] * 0.85)\n \n # Normalize to sum to 1\n total = sum(_task_weights.values())\n _task_weights = {t: w / total for t, w in _task_weights.items()}\n\n\ndef get_task_weighted_indices(dataset, n_samples: int) -> list:\n \"\"\"Sample indices with probability proportional to task weights.\"\"\"\n task_indices = {t: [] for t in _task_weights}\n for i, record in enumerate(dataset):\n user_txt = \" \".join(m[\"content\"] for m in record[\"prompt\"] if m[\"role\"] == \"user\")\n task = _classify_task_type(user_txt)\n if task in task_indices:\n task_indices[task].append(i)\n \n sampled = []\n for task, weight in _task_weights.items():\n n = max(1, int(n_samples * weight))\n pool = task_indices.get(task, [])\n if pool:\n sampled.extend(random.sample(pool, min(n, len(pool))))\n random.shuffle(sampled)\n return sampled[:n_samples]\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# MASTER REWARD FUNCTION β V4.2: returns per-component rewards for GDPO\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\ndef commerce_reward_fn(completions, prompts, **kwargs) -> list:\n \"\"\"Master reward function with GDPO normalization + dynamic task weighting.\n \n V4.2 integration with TRL 0.24.0:\n TRL calls this once per step with the full batch (batch_size Γ G completions).\n We exploit this to apply batch-level per-component normalization (GDPO Β§3.1)\n and dynamic task weighting (MT-GRPO IWU Β§3.2) INSIDE the reward function,\n so the trainer receives pre-normalized, weighted rewards without modification.\n \n Pipeline:\n 1. Score each completion with its task-specific reward function (raw)\n 2. Group raw rewards by task type\n 3. GDPO: z-score normalize each task group independently\n 4. IWU: multiply normalized rewards by current _task_weights\n 5. Shift back to [0, 1] range (GRPO with scale_rewards=False expects non-negative)\n 6. Return flat list in original sample order\n \"\"\"\n n = len(completions)\n raw_rewards = [0.0] * n\n task_labels = [\"\"] * n\n \n # ββ Step 1: Compute raw per-sample rewards ββββββββββββββββββββββββββββββ\n for i, (completion, prompt) in enumerate(zip(completions, prompts)):\n if isinstance(completion, list):\n comp_text = completion[-1][\"content\"] if completion else \"\"\n else:\n comp_text = str(completion)\n\n if isinstance(prompt, list):\n prompt_text = \" \".join(m.get(\"content\", \"\") for m in prompt)\n else:\n prompt_text = str(prompt)\n\n task = _classify_task_type(prompt_text)\n task_labels[i] = task\n\n if task == \"extraction\":\n raw_rewards[i] = reward_extraction(comp_text)\n elif task == \"sql_qa\":\n raw_rewards[i] = reward_sql_qa(comp_text)\n elif task == \"insights\":\n raw_rewards[i] = reward_insights(comp_text)\n elif task == \"push\":\n raw_rewards[i] = reward_push(comp_text)\n else:\n raw_rewards[i] = 0.2 if comp_text.strip() else 0.0\n\n # ββ Step 2-4: GDPO per-component normalization + IWU weighting ββββββββββ\n # Group indices by task\n task_indices = {}\n for i, task in enumerate(task_labels):\n if task not in task_indices:\n task_indices[task] = []\n task_indices[task].append(i)\n \n final_rewards = [0.0] * n\n \n for task, indices in task_indices.items():\n task_raw = [raw_rewards[i] for i in indices]\n \n # GDPO: z-score normalize within this task group\n if len(task_raw) > 1:\n t_mean = sum(task_raw) / len(task_raw)\n t_var = sum((r - t_mean) ** 2 for r in task_raw) / (len(task_raw) - 1)\n t_std = t_var ** 0.5\n if t_std > 1e-8:\n normed = [(r - t_mean) / t_std for r in task_raw]\n else:\n normed = [0.0] * len(task_raw)\n else:\n # Single sample in this task group β can't normalize, use raw\n normed = [0.0]\n \n # IWU: scale by dynamic task weight\n weight = _task_weights.get(task, 0.25)\n weighted = [v * weight for v in normed]\n \n for idx_in_group, global_idx in enumerate(indices):\n final_rewards[global_idx] = weighted[idx_in_group]\n \n # ββ Step 5: Shift to non-negative range βββββββββββββββββββββββββββββββββ\n # GRPO with scale_rewards=False computes advantages as reward - mean(rewards).\n # Normalized rewards are already zero-centered per-task, so the advantage\n # computation will work correctly. But TRL may log negative rewards as warnings.\n # Shift so minimum is 0 to keep logging clean, without changing advantage ordering.\n min_r = min(final_rewards) if final_rewards else 0.0\n if min_r < 0:\n final_rewards = [r - min_r for r in final_rewards]\n \n return final_rewards\n\n\ndef commerce_reward_fn_raw(completions, prompts, **kwargs) -> list:\n \"\"\"Raw reward function WITHOUT GDPO/IWU β used for eval metrics.\n \n Eval should report raw task-specific rewards for interpretability.\n The GDPO+IWU normalization is only for shaping the training gradient signal.\n \"\"\"\n rewards = []\n for completion, prompt in zip(completions, prompts):\n if isinstance(completion, list):\n comp_text = completion[-1][\"content\"] if completion else \"\"\n else:\n comp_text = str(completion)\n\n if isinstance(prompt, list):\n prompt_text = \" \".join(m.get(\"content\", \"\") for m in prompt)\n else:\n prompt_text = str(prompt)\n\n task = _classify_task_type(prompt_text)\n\n if task == \"extraction\":\n rewards.append(reward_extraction(comp_text))\n elif task == \"sql_qa\":\n rewards.append(reward_sql_qa(comp_text))\n elif task == \"insights\":\n rewards.append(reward_insights(comp_text))\n elif task == \"push\":\n rewards.append(reward_push(comp_text))\n else:\n r = 0.2 if comp_text.strip() else 0.0\n rewards.append(r)\n return rewards\n\n\nprint(\"β Reward functions defined (V4.2: SQL v2 + GDPO active + IWU active)\")\nprint(f\" Task weights: {_task_weights}\")\nprint(f\" commerce_reward_fn: GDPO+IWU normalized (for training)\")\nprint(f\" commerce_reward_fn_raw: raw scores (for eval/audit)\")\nprint(f\" Task weights: {_task_weights}\")"
|
| 91 |
},
|
| 92 |
{
|
| 93 |
"cell_type": "markdown",
|
|
|
|
| 99 |
"execution_count": null,
|
| 100 |
"metadata": {},
|
| 101 |
"outputs": [],
|
| 102 |
+
"source": "from scipy.stats import spearmanr\n\nAUDIT_PROMPTS_PER_TASK = 5\n\n# ββ Collect audit prompts (5 per task) βββββββββββββββββββββββββββββββββββββββ\naudit_by_type = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\nwith open(TRAIN_FILE) as f:\n for line in f:\n row = json.loads(line)\n convs = row[\"conversations\"]\n prompt_msgs = [m for m in convs if m[\"role\"] in (\"system\", \"user\")]\n if not prompt_msgs:\n continue\n user_text = \" \".join(m[\"content\"] for m in prompt_msgs if m[\"role\"] == \"user\")\n task = _classify_task_type(user_text)\n if len(audit_by_type[task]) < AUDIT_PROMPTS_PER_TASK:\n audit_by_type[task].append(prompt_msgs)\n\nprint(f\"Audit prompts collected: {', '.join(f'{k}={len(v)}' for k, v in audit_by_type.items())}\")\n\n# ββ Generate completions and score automatically βββββββββββββββββββββββββββββ\nFastLanguageModel.for_inference(model)\n\naudit_auto_scores = []\naudit_tasks = []\naudit_completions = []\n\nfor task_type in [\"extraction\", \"sql_qa\", \"insights\", \"push\"]:\n for msgs in audit_by_type[task_type]:\n msgs = inject_task_system_prompt(msgs, task_type)\n text = tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)\n inputs = tokenizer(text, return_tensors=\"pt\").to(model.device)\n with torch.no_grad():\n out = model.generate(\n **inputs,\n max_new_tokens=MAX_COMPLETION_LENGTH,\n temperature=0.1, # near-deterministic for audit\n do_sample=True,\n repetition_penalty=1.0,\n )\n resp = tokenizer.decode(out[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True)\n r = commerce_reward_fn_raw([resp], [text])[0] # Raw rewards for audit (not GDPO-normalized)\n audit_auto_scores.append(r)\n audit_tasks.append(task_type)\n audit_completions.append(resp)\n\nprint(f\"\\n{'='*80}\")\nprint(\"REWARD FUNCTION AUDIT β 20 Completions\")\nprint(f\"{'='*80}\")\nfor i, (task, auto_r, comp) in enumerate(zip(audit_tasks, audit_auto_scores, audit_completions)):\n answer = strip_think(comp)[:200]\n print(f\"\\n--- Sample {i+1}/{len(audit_auto_scores)} [{task}] auto_reward={auto_r:.3f} ---\")\n print(f\"{answer}\")\n\nprint(f\"\\n{'='*80}\")\nprint(\"INSTRUCTIONS:\")\nprint(\"1. Read each completion above\")\nprint(\"2. Assign a 0-10 quality score for each (0=garbage, 10=perfect)\")\nprint(\"3. Fill in the list below and re-run the assertion\")\nprint(f\"{'='*80}\")\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# HUMAN SCORES β fill this in after reading the completions above\n# Each score is 0-10. Order matches the samples printed above.\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\naudit_human_scores = [\n # extraction (5 scores)\n -1, -1, -1, -1, -1,\n # sql_qa (5 scores)\n -1, -1, -1, -1, -1,\n # insights (5 scores)\n -1, -1, -1, -1, -1,\n # push (5 scores)\n -1, -1, -1, -1, -1,\n]\n\n# ββ Compute correlation ββββββββββββββββββββββββββββββββββββββββββββββββββββββ\nif all(s >= 0 for s in audit_human_scores):\n # Normalize human scores to 0-1 range for comparison\n human_normalized = [s / 10.0 for s in audit_human_scores]\n rho, p_value = spearmanr(human_normalized, audit_auto_scores)\n print(f\"\\nReward function calibration: Ο={rho:.3f} (p={p_value:.4f})\")\n print(f\" Human scores (normalized): {[f'{s:.1f}' for s in human_normalized]}\")\n print(f\" Auto scores: {[f'{s:.3f}' for s in audit_auto_scores]}\")\n \n if rho > 0.70:\n print(f\" β
PASS: Ο={rho:.3f} > 0.70 β reward function is calibrated\")\n else:\n print(f\" β FAIL: Ο={rho:.3f} < 0.70 β reward function is miscalibrated\")\n print(\" β Investigate discrepancies before training. Check:\")\n print(\" 1. Is the JSON parser handling all formats correctly?\")\n print(\" 2. Are SQL reward tiers appropriate for this model's output style?\")\n print(\" 3. Are insights/push length penalties calibrated?\")\n # Show biggest discrepancies\n diffs = [(i, abs(human_normalized[i] - audit_auto_scores[i]), audit_tasks[i])\n for i in range(len(audit_human_scores))]\n diffs.sort(key=lambda x: x[1], reverse=True)\n print(f\"\\n Top 5 discrepancies:\")\n for idx, diff, task in diffs[:5]:\n print(f\" Sample {idx+1} [{task}]: human={human_normalized[idx]:.1f}, auto={audit_auto_scores[idx]:.3f}, Ξ={diff:.3f}\")\n \n assert rho > 0.70, f\"Reward function miscalibrated (Ο={rho:.3f} < 0.70). Fix before training.\"\nelse:\n print(\"\\nβ οΈ Human scores not yet filled in. Fill audit_human_scores and re-run.\")\n print(\" You can proceed to Cell 9 to build the eval set while scoring.\")\n print(\" But DO NOT proceed past Cell 11 (smoke test) without completing the audit.\")"
|
| 103 |
},
|
| 104 |
{
|
| 105 |
"cell_type": "markdown",
|
|
|
|
| 159 |
"execution_count": null,
|
| 160 |
"metadata": {},
|
| 161 |
"outputs": [],
|
| 162 |
+
"source": "import wandb\nfrom transformers import TrainerCallback\n\nwandb.login()\nwandb.init(\n project=WANDB_PROJECT,\n name=f\"grpo-v4.2-instruct-0.5B-seed{CURRENT_SEED}-{time.strftime('%Y%m%d-%H%M')}\",\n config={\n \"model_id\": MODEL_ID,\n \"version\": \"v4.2\",\n \"seed\": CURRENT_SEED,\n \"seeds_planned\": SEEDS,\n \"num_generations\": NUM_GENERATIONS,\n \"max_completion_length\": MAX_COMPLETION_LENGTH,\n \"temperature\": TEMPERATURE,\n \"learning_rate\": LEARNING_RATE,\n \"lr_scheduler_type\": LR_SCHEDULER_TYPE,\n \"warmup_ratio\": WARMUP_RATIO,\n \"beta\": BETA,\n \"scale_rewards\": SCALE_REWARDS,\n \"batch_size\": BATCH_SIZE,\n \"grad_accum\": GRAD_ACCUM,\n \"max_steps\": MAX_STEPS,\n \"lora_r\": LORA_R,\n \"lora_alpha\": LORA_ALPHA,\n \"train_prompts\": len(train_dataset),\n \"eval_prompts\": len(eval_dataset),\n \"eval_stratified\": True,\n \"eval_per_task\": EVAL_SAMPLES_PER_TASK,\n \"repetition_penalty_override\": 1.0,\n \"json_parser\": \"json-repair + PT-BR decimal normalizer\",\n \"sql_reward\": \"v2 (validation-aware, 4-tier)\",\n \"gdpo_normalization\": True,\n \"dynamic_task_weighting\": \"MT-GRPO IWU\",\n \"changes_from_v41\": \"stratified eval 65, reward audit, SQL v2, 1500 steps, GDPO, IWU, 3 seeds, best ckpt\",\n },\n)\nprint(f\"β W&B run: {wandb.run.url}\")\n\n\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n# V4.2: EvalRewardCallback v2\n# - Uses 65 stratified eval samples (Change 1)\n# - Reports per-task means with 95% CIs (Change 1)\n# - Runs GDPO normalization and logs component stats (Change 5)\n# - Updates dynamic task weights via IWU (Change 6)\n# - Saves best checkpoint explicitly (Change 8)\n# ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\n\nclass EvalRewardCallbackV2(TrainerCallback):\n def __init__(self, eval_records, reward_fn, patience, delta):\n self.eval_records = eval_records\n self.reward_fn = reward_fn\n self.patience = patience\n self.delta = delta\n self.best_reward = -float(\"inf\")\n self.best_step = 0\n self.no_improve_count = 0\n\n def on_step_end(self, args, state, control, model=None, processing_class=None, **kwargs):\n if state.global_step == 0 or state.global_step % EVAL_STEPS != 0:\n return control\n\n tokenizer_local = processing_class\n if tokenizer_local is None:\n print(\"[EvalRewardCallback] WARNING: tokenizer is None, skipping eval\")\n return control\n\n mean_reward, per_task, per_task_all = self._run_eval(model, tokenizer_local, args)\n improved = mean_reward > self.best_reward + self.delta\n\n # ββ Per-task 95% CIs (Change 1) ββββββββββββββββββββββββββββββββββββββ\n log_data = {\n \"eval/mean_reward\": mean_reward,\n \"eval/best_reward\": max(self.best_reward, mean_reward),\n \"eval/no_improve_count\": self.no_improve_count,\n }\n \n ci_strs = []\n for task_name, task_rewards in per_task_all.items():\n if task_rewards:\n n = len(task_rewards)\n task_mean = sum(task_rewards) / n\n if n > 1:\n task_std = (sum((r - task_mean)**2 for r in task_rewards) / (n - 1)) ** 0.5\n ci_half = 1.96 * task_std / math.sqrt(n)\n else:\n ci_half = 0.0\n log_data[f\"eval/{task_name}\"] = task_mean\n log_data[f\"eval/{task_name}_ci\"] = ci_half\n log_data[f\"eval/{task_name}_n\"] = n\n ci_strs.append(f\"{task_name}={task_mean:.3f}Β±{ci_half:.3f} (n={n})\")\n \n # ββ GDPO per-component stats (Change 5) βββββββββββββββββββββββββββββ\n if per_task_all and all(len(v) > 0 for v in per_task_all.values()):\n try:\n gdpo_rewards = gdpo_normalize(per_task_all)\n log_data[\"eval/gdpo_mean\"] = sum(gdpo_rewards) / len(gdpo_rewards)\n log_data[\"eval/gdpo_std\"] = (sum((r - sum(gdpo_rewards)/len(gdpo_rewards))**2 for r in gdpo_rewards) / len(gdpo_rewards)) ** 0.5\n except Exception as e:\n print(f\" [GDPO] normalization error: {e}\")\n \n # ββ Dynamic task weight update (Change 6) βββββββββββββββββββββββββββ\n per_task_means = {}\n for task_name, task_rewards in per_task_all.items():\n if task_rewards:\n per_task_means[task_name] = sum(task_rewards) / len(task_rewards)\n \n update_task_weights(state.global_step, per_task_means, update_interval=EVAL_STEPS)\n \n for task_name, weight in _task_weights.items():\n log_data[f\"sampler/{task_name}_weight\"] = weight\n \n wandb.log(log_data, step=state.global_step)\n\n status = \"β improved\" if improved else f\"β no gain ({self.no_improve_count + 1}/{self.patience})\"\n print(f\"\\n[EvalReward] step={state.global_step} | mean={mean_reward:.4f} | best={self.best_reward:.4f} | {status}\")\n for cs in ci_strs:\n print(f\" {cs}\")\n print(f\" Task weights: {', '.join(f'{t}={w:.3f}' for t, w in _task_weights.items())}\")\n\n if improved:\n self.best_reward = mean_reward\n self.best_step = state.global_step\n self.no_improve_count = 0\n # ββ V4.2: Save best checkpoint explicitly (Change 8) βββββββββββββ\n best_path = ADAPTER_DIR / \"best_checkpoint\"\n best_path.mkdir(parents=True, exist_ok=True)\n model.save_pretrained(str(best_path))\n tokenizer_local.save_pretrained(str(best_path))\n print(f\" β Best checkpoint saved β {best_path} (reward={mean_reward:.4f})\")\n else:\n self.no_improve_count += 1\n if self.no_improve_count >= self.patience:\n print(f\"[EarlyStopping] No improvement for {self.patience} evals. Halting.\")\n control.should_training_stop = True\n return control\n\n def _run_eval(self, model, tokenizer_local, args):\n FastLanguageModel.for_inference(model)\n rewards = []\n per_task_summary = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\n per_task_all = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\n \n # V4.2: Use ALL stratified eval samples (65), not just 15\n for record in self.eval_records:\n msgs = record[\"prompt\"]\n text = tokenizer_local.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)\n user_txt = \" \".join(m.get(\"content\", \"\") for m in msgs if m[\"role\"] == \"user\")\n task = _classify_task_type(user_txt)\n\n inputs = tokenizer_local(text, return_tensors=\"pt\", truncation=True, max_length=args.max_prompt_length).to(model.device)\n with torch.no_grad():\n out = model.generate(\n **inputs,\n max_new_tokens=EVAL_MAX_TOKENS,\n temperature=0.1,\n do_sample=True,\n repetition_penalty=1.0,\n )\n resp = tokenizer_local.decode(out[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True)\n r = self.reward_fn([resp], [text])[0]\n rewards.append(r)\n if task in per_task_all:\n per_task_all[task].append(r)\n per_task_summary[task].append(r)\n \n FastLanguageModel.for_training(model)\n mean_r = sum(rewards) / len(rewards) if rewards else 0.0\n return mean_r, per_task_summary, per_task_all\n\n\n# ββ Training ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\nFastLanguageModel.for_training(model)\n\ngrpo_config = GRPOConfig(\n output_dir=str(CHECKPOINT_DIR),\n num_generations=NUM_GENERATIONS,\n scale_rewards=SCALE_REWARDS,\n max_completion_length=MAX_COMPLETION_LENGTH,\n max_steps=MAX_STEPS, # V4.2: 1,500\n temperature=TEMPERATURE,\n beta=BETA,\n num_train_epochs=1,\n per_device_train_batch_size=BATCH_SIZE,\n gradient_accumulation_steps=GRAD_ACCUM,\n learning_rate=LEARNING_RATE,\n lr_scheduler_type=LR_SCHEDULER_TYPE,\n warmup_ratio=WARMUP_RATIO,\n fp16=False,\n bf16=True,\n logging_steps=1,\n save_steps=SAVE_STEPS, # V4.2: 100\n save_total_limit=5,\n save_only_model=True,\n report_to=\"wandb\",\n max_prompt_length=MAX_SEQ_LENGTH // 2,\n seed=CURRENT_SEED, # V4.2: per-seed\n remove_unused_columns=False,\n disable_tqdm=True,\n logging_first_step=True,\n)\n\neval_cb = EvalRewardCallbackV2(\n eval_records=list(eval_dataset),\n reward_fn=commerce_reward_fn_raw, # V4.2: raw rewards for eval (no GDPO/IWU distortion)\n patience=EARLY_STOPPING_PATIENCE,\n delta=EARLY_STOPPING_DELTA,\n)\n\ntrainer = UnslothGRPOTrainer(\n model=model,\n reward_funcs=commerce_reward_fn,\n args=grpo_config,\n train_dataset=train_dataset,\n processing_class=tokenizer,\n callbacks=[eval_cb],\n)\n\nt_start = time.time()\nresult = trainer.train()\nelapsed = time.time() - t_start\n\nwandb.log({\n \"train/final_loss\": result.training_loss,\n \"train/duration_hours\": elapsed / 3600,\n \"train/total_steps\": result.global_step,\n \"eval/best_reward_final\": eval_cb.best_reward,\n \"eval/best_step\": eval_cb.best_step,\n \"final/task_weights\": _task_weights,\n})\nwandb.finish()\n\nprint(f\"\\n{'='*60}\")\nprint(f\"V4.2 Training Complete (seed={CURRENT_SEED})\")\nprint(f\" Loss: {result.training_loss:.4f}\")\nprint(f\" Steps: {result.global_step}\")\nprint(f\" Duration: {elapsed/3600:.1f}h\")\nprint(f\" Best eval: {eval_cb.best_reward:.4f} (step {eval_cb.best_step})\")\nprint(f\" Final task weights: {_task_weights}\")\nprint(f\"{'='*60}\")"
|
| 163 |
},
|
| 164 |
{
|
| 165 |
"cell_type": "markdown",
|
|
|
|
| 171 |
"execution_count": null,
|
| 172 |
"metadata": {},
|
| 173 |
"outputs": [],
|
| 174 |
+
"source": "FastLanguageModel.for_inference(model)\n\nval_samples = list(eval_dataset) # All 65 stratified samples\nval_results = {\"extraction\": [], \"sql_qa\": [], \"insights\": [], \"push\": []}\n\nprint(f\"Post-training validation on {len(val_samples)} stratified samples (seed={CURRENT_SEED})\")\nprint(\"-\" * 80)\n\nfor i, record in enumerate(val_samples):\n msgs = record[\"prompt\"]\n user_text = \" \".join(m[\"content\"] for m in msgs if m[\"role\"] == \"user\")\n task = _classify_task_type(user_text)\n\n text = tokenizer.apply_chat_template(msgs, tokenize=False, add_generation_prompt=True)\n inputs = tokenizer(text, return_tensors=\"pt\").to(model.device)\n with torch.no_grad():\n out = model.generate(\n **inputs,\n max_new_tokens=MAX_COMPLETION_LENGTH,\n temperature=0.1,\n do_sample=True,\n repetition_penalty=1.0,\n )\n resp = tokenizer.decode(out[0][inputs[\"input_ids\"].shape[1]:], skip_special_tokens=True)\n r = commerce_reward_fn_raw([resp], [text])[0] # Raw rewards for reporting\n val_results[task].append(r)\n if i < 10 or r < 0.2: # Print first 10 and all low-scoring\n print(f\" [{task:12s}] reward={r:.3f} | {strip_think(resp)[:80]}\")\n\n# ββ Results with 95% CIs ββββββββββββββββββββββββββββββββββββββββββββββββββββ\nprint(f\"\\n{'='*80}\")\nprint(f\"VALIDATION RESULTS β V4.2 Seed {CURRENT_SEED}\")\nprint(f\"{'='*80}\")\nprint(f\"{'Task':15s} {'Mean':>8s} {'Β± 95% CI':>10s} {'Min':>6s} {'Max':>6s} {'N':>4s}\")\nprint(\"-\" * 55)\n\noverall = []\nresults_by_seed = {} # Store for cross-seed comparison\n\nfor task in [\"extraction\", \"sql_qa\", \"insights\", \"push\"]:\n rewards = val_results[task]\n overall.extend(rewards)\n if rewards:\n n = len(rewards)\n mean_r = sum(rewards) / n\n if n > 1:\n std_r = (sum((r - mean_r)**2 for r in rewards) / (n - 1)) ** 0.5\n ci_half = 1.96 * std_r / math.sqrt(n)\n else:\n std_r = 0.0\n ci_half = 0.0\n print(f\"{task:15s} {mean_r:8.3f} {'Β±':>2s}{ci_half:7.3f} {min(rewards):6.3f} {max(rewards):6.3f} {n:4d}\")\n results_by_seed[task] = {\"mean\": mean_r, \"ci\": ci_half, \"n\": n, \"std\": std_r}\n\nif overall:\n n_total = len(overall)\n mean_total = sum(overall) / n_total\n std_total = (sum((r - mean_total)**2 for r in overall) / (n_total - 1)) ** 0.5\n ci_total = 1.96 * std_total / math.sqrt(n_total)\n print(\"-\" * 55)\n print(f\"{'OVERALL':15s} {mean_total:8.3f} {'Β±':>2s}{ci_total:7.3f} {min(overall):6.3f} {max(overall):6.3f} {n_total:4d}\")\n results_by_seed[\"overall\"] = {\"mean\": mean_total, \"ci\": ci_total, \"n\": n_total, \"std\": std_total}\n\n# ββ Save results for cross-seed comparison ββββββββββββββββββββββββββββββββββ\nresults_file = ADAPTER_DIR / f\"eval_results_seed{CURRENT_SEED}.json\"\nresults_file.parent.mkdir(parents=True, exist_ok=True)\nwith open(results_file, \"w\") as f:\n json.dump(results_by_seed, f, indent=2)\nprint(f\"\\nβ Results saved to {results_file}\")\n\n# ββ V4.2 Decision βββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ\nprint(f\"\\n--- V4.2 Questions ---\")\nsql_mean = results_by_seed.get(\"sql_qa\", {}).get(\"mean\", 0)\ninsights_mean = results_by_seed.get(\"insights\", {}).get(\"mean\", 0)\noverall_mean = results_by_seed.get(\"overall\", {}).get(\"mean\", 0)\n\nprint(f\"Q1 SQL reward: {sql_mean:.3f} ({'improved' if sql_mean > 0.60 else 'still stagnant' if sql_mean < 0.56 else 'modest gain'})\")\nprint(f\"Q2 Insights: {insights_mean:.3f} ({'stable' if insights_mean > 0.70 else 'regressed' if insights_mean < 0.60 else 'mixed'})\")\nprint(f\"Q3 Overall: {overall_mean:.3f} ({'above 0.70 target' if overall_mean > 0.70 else 'below target'})\")\nprint(f\"Q4 Seeds: Seed {CURRENT_SEED} done. Run seeds {[s for s in SEEDS if s != CURRENT_SEED]} next.\")"
|
| 175 |
},
|
| 176 |
{
|
| 177 |
"cell_type": "markdown",
|