File size: 4,778 Bytes
12c2cae
8f2700b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12c2cae
 
8f2700b
12c2cae
8f2700b
12c2cae
8f2700b
 
 
 
 
 
12c2cae
 
 
 
 
8f2700b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12c2cae
c110410
 
 
 
 
 
358e009
 
 
 
 
 
2404239
 
 
12c2cae
8f2700b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c110410
 
 
358e009
 
 
2404239
 
12c2cae
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
"""
purpose_agent — The World's First SLM-Native Self-Improving Agentic Framework

Works with both Small Language Models (SLMs, 0.6B-3B params, local) and
Large Language Models (LLMs, cloud APIs) with equal efficiency.

Architecture based on 8 published papers:
  - MUSE (arxiv:2510.08002): 3-tier hierarchical memory
  - LATS (arxiv:2310.04406): LLM-as-value-function
  - REMEMBERER (arxiv:2306.07929): Q-value experience replay
  - Reflexion (arxiv:2303.11366): Verbal reinforcement
  - SPC (arxiv:2504.19162): Anti-reward-hacking
  - CER (arxiv:2506.06698): Contextual experience distillation
  - MemRL (arxiv:2601.03192): Two-phase retrieval
  - TinyAgent (arxiv:2409.00608): SLM-native agent patterns

Modules:
  Core:       types, llm_backend, actor, purpose_function, experience_replay, optimizer, orchestrator
  SLM:        slm_backends (Ollama, llama-cpp, prompt compression)
  Streaming:  streaming (async generators, event streaming)
  Tools:      tools (Tool base class, built-in tools, Tool RAG)
  Observe:    observability (cost tracking, callbacks, metrics)
  Multi:      multi_agent (shared memory, agent delegation, teams)
  HITL:       hitl (checkpoint, interrupt, resume, Φ overrides)
  Eval:       evaluation (benchmark runner, improvement curves)
"""

__version__ = "0.2.0"

# Core
from purpose_agent.types import (
    State, Action, Trajectory, TrajectoryStep,
    Heuristic, PurposeScore, MemoryRecord, MemoryTier,
)
from purpose_agent.llm_backend import (
    LLMBackend, MockLLMBackend, HFInferenceBackend,
    OpenAICompatibleBackend, ChatMessage,
)
from purpose_agent.actor import Actor
from purpose_agent.purpose_function import PurposeFunction
from purpose_agent.experience_replay import ExperienceReplay
from purpose_agent.optimizer import HeuristicOptimizer
from purpose_agent.orchestrator import Orchestrator, Environment, SimpleEnvironment, TaskResult

# SLM-Native Backends
from purpose_agent.slm_backends import (
    OllamaBackend, LlamaCppBackend, SLMPromptCompressor,
    create_slm_backend, SLM_REGISTRY,
)

# Streaming & Async
from purpose_agent.streaming import StreamingMixin, StreamEvent, AsyncOrchestrator

# Tools
from purpose_agent.tools import (
    Tool, FunctionTool, ToolResult, ToolRegistry,
    CalculatorTool, PythonExecTool, ReadFileTool, WriteFileTool,
)

# Observability
from purpose_agent.observability import (
    CostTracker, TokenUsage, CallbackManager,
    AgentEvent, EventType, LoggingCallback, MetricsCollector,
)

# Multi-Agent
from purpose_agent.multi_agent import AgentSpec, AgentTeam

# Human-in-the-Loop
from purpose_agent.hitl import (
    HITLOrchestrator, Checkpoint, HumanInputHandler,
    CLIInputHandler, AutoApproveHandler, InterruptType,
)

# Evaluation
from purpose_agent.evaluation import BenchmarkTask, BenchmarkRunner, BenchmarkResult

# Plugin Registry
from purpose_agent.registry import (
    PluginRegistry, backend_registry, callback_registry, model_registry,
    EmbeddingBackend, default_embedding,
)

# Unified Capabilities (LangGraph + CrewAI + AutoGen + OpenAI SDK + LlamaIndex)
from purpose_agent.unified import (
    Agent, Graph, parallel, Conversation, KnowledgeStore,
    START, END, Message,
)

# Easy API (the only thing beginners need)
from purpose_agent.easy import purpose, Team, quickstart, TEAM_TEMPLATES

__all__ = [
    # Core
    "State", "Action", "Trajectory", "TrajectoryStep", "Heuristic",
    "PurposeScore", "MemoryRecord", "MemoryTier",
    "LLMBackend", "MockLLMBackend", "HFInferenceBackend",
    "OpenAICompatibleBackend", "ChatMessage",
    "Actor", "PurposeFunction", "ExperienceReplay", "HeuristicOptimizer",
    "Orchestrator", "Environment", "SimpleEnvironment", "TaskResult",
    # SLM
    "OllamaBackend", "LlamaCppBackend", "SLMPromptCompressor",
    "create_slm_backend", "SLM_REGISTRY",
    # Streaming
    "StreamingMixin", "StreamEvent", "AsyncOrchestrator",
    # Tools
    "Tool", "FunctionTool", "ToolResult", "ToolRegistry",
    "CalculatorTool", "PythonExecTool", "ReadFileTool", "WriteFileTool",
    # Observability
    "CostTracker", "TokenUsage", "CallbackManager",
    "AgentEvent", "EventType", "LoggingCallback", "MetricsCollector",
    # Multi-Agent
    "AgentSpec", "AgentTeam",
    # HITL
    "HITLOrchestrator", "Checkpoint", "HumanInputHandler",
    "CLIInputHandler", "AutoApproveHandler", "InterruptType",
    # Evaluation
    "BenchmarkTask", "BenchmarkRunner", "BenchmarkResult",
    # Plugin Registry
    "PluginRegistry", "backend_registry", "callback_registry", "model_registry",
    "EmbeddingBackend", "default_embedding",
    # Unified Capabilities
    "Agent", "Graph", "parallel", "Conversation", "KnowledgeStore",
    "START", "END", "Message",
    # Easy API
    "purpose", "Team", "quickstart", "TEAM_TEMPLATES",
]