refactor: modularity fixes + plugin registry + compiled research
Browse files
purpose_agent/orchestrator.py
CHANGED
|
@@ -277,7 +277,7 @@ class Orchestrator:
|
|
| 277 |
)
|
| 278 |
|
| 279 |
# Load existing heuristics into Actor memory
|
| 280 |
-
self.
|
| 281 |
|
| 282 |
# ------------------------------------------------------------------
|
| 283 |
# Main Task Loop
|
|
@@ -427,7 +427,7 @@ class Orchestrator:
|
|
| 427 |
|
| 428 |
# Post-task processing
|
| 429 |
result = TaskResult(trajectory=trajectory, final_state=current_state)
|
| 430 |
-
self.
|
| 431 |
|
| 432 |
logger.info(f"βββ Task complete βββ\n{result.summary()}")
|
| 433 |
return result
|
|
@@ -436,12 +436,17 @@ class Orchestrator:
|
|
| 436 |
# Post-Task: Experience Storage + Optimization
|
| 437 |
# ------------------------------------------------------------------
|
| 438 |
|
| 439 |
-
def
|
| 440 |
self,
|
| 441 |
trajectory: Trajectory,
|
| 442 |
-
used_experiences: list[Any],
|
| 443 |
) -> None:
|
| 444 |
-
"""Post-task processing: store trajectory, maybe optimize, sync memory.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 445 |
|
| 446 |
# Store in experience replay
|
| 447 |
record = self.experience_replay.add(trajectory)
|
|
@@ -480,10 +485,14 @@ class Orchestrator:
|
|
| 480 |
self.optimizer.optimize(top_trajectories)
|
| 481 |
|
| 482 |
# Sync updated heuristics to Actor memory
|
| 483 |
-
self.
|
| 484 |
|
| 485 |
-
def
|
| 486 |
-
"""Push current heuristic library to Actor's memory tiers.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 487 |
self.actor.update_strategic_memory(
|
| 488 |
self.optimizer.get_heuristics_by_tier(MemoryTier.STRATEGIC)
|
| 489 |
)
|
|
|
|
| 277 |
)
|
| 278 |
|
| 279 |
# Load existing heuristics into Actor memory
|
| 280 |
+
self.sync_memory()
|
| 281 |
|
| 282 |
# ------------------------------------------------------------------
|
| 283 |
# Main Task Loop
|
|
|
|
| 427 |
|
| 428 |
# Post-task processing
|
| 429 |
result = TaskResult(trajectory=trajectory, final_state=current_state)
|
| 430 |
+
self.post_task(trajectory, relevant_experiences)
|
| 431 |
|
| 432 |
logger.info(f"βββ Task complete βββ\n{result.summary()}")
|
| 433 |
return result
|
|
|
|
| 436 |
# Post-Task: Experience Storage + Optimization
|
| 437 |
# ------------------------------------------------------------------
|
| 438 |
|
| 439 |
+
def post_task(
|
| 440 |
self,
|
| 441 |
trajectory: Trajectory,
|
| 442 |
+
used_experiences: list[Any] | None = None,
|
| 443 |
) -> None:
|
| 444 |
+
"""Post-task processing: store trajectory, maybe optimize, sync memory.
|
| 445 |
+
|
| 446 |
+
Public API β called by HITLOrchestrator, AsyncOrchestrator, and
|
| 447 |
+
any custom orchestration wrapper after a task completes.
|
| 448 |
+
"""
|
| 449 |
+
used_experiences = used_experiences or []
|
| 450 |
|
| 451 |
# Store in experience replay
|
| 452 |
record = self.experience_replay.add(trajectory)
|
|
|
|
| 485 |
self.optimizer.optimize(top_trajectories)
|
| 486 |
|
| 487 |
# Sync updated heuristics to Actor memory
|
| 488 |
+
self.sync_memory()
|
| 489 |
|
| 490 |
+
def sync_memory(self) -> None:
|
| 491 |
+
"""Push current heuristic library to Actor's memory tiers.
|
| 492 |
+
|
| 493 |
+
Public API β call after manually modifying the heuristic library
|
| 494 |
+
(e.g., human-injected heuristics via HITL).
|
| 495 |
+
"""
|
| 496 |
self.actor.update_strategic_memory(
|
| 497 |
self.optimizer.get_heuristics_by_tier(MemoryTier.STRATEGIC)
|
| 498 |
)
|