Upload agent/core/model_switcher.py
Browse files- agent/core/model_switcher.py +279 -0
agent/core/model_switcher.py
ADDED
|
@@ -0,0 +1,279 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Model-switching logic for the interactive CLI's ``/model`` command.
|
| 2 |
+
|
| 3 |
+
Split out of ``agent.main`` so the REPL dispatcher stays focused on input
|
| 4 |
+
parsing. Exposes:
|
| 5 |
+
|
| 6 |
+
* ``SUGGESTED_MODELS`` β the short list shown by ``/model`` with no arg.
|
| 7 |
+
* ``is_valid_model_id`` β loose format check on user input.
|
| 8 |
+
* ``probe_and_switch_model`` β async: checks routing, fires a 1-token
|
| 9 |
+
probe to resolve the effort cascade, then commits the switch (or
|
| 10 |
+
rejects it on hard error).
|
| 11 |
+
|
| 12 |
+
The probe's cascade lives in ``agent.core.effort_probe``; this module
|
| 13 |
+
glues it to CLI output + session state.
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
from __future__ import annotations
|
| 17 |
+
|
| 18 |
+
from agent.core.effort_probe import ProbeInconclusive, probe_effort
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
# Suggested models shown by `/model` (not a gate). Users can paste any HF
|
| 22 |
+
# model id (e.g. "MiniMaxAI/MiniMax-M2.7") or an `anthropic/` / `openai/`
|
| 23 |
+
# prefix for direct API access. For HF ids, append ":fastest" /
|
| 24 |
+
# ":cheapest" / ":preferred" / ":<provider>" to override the default
|
| 25 |
+
# routing policy (auto = fastest with failover).
|
| 26 |
+
SUGGESTED_MODELS = [
|
| 27 |
+
{"id": "openai/gpt-5.5", "label": "GPT-5.5"},
|
| 28 |
+
{"id": "openai/gpt-5.4", "label": "GPT-5.4"},
|
| 29 |
+
{"id": "anthropic/claude-opus-4-7", "label": "Claude Opus 4.7"},
|
| 30 |
+
{"id": "anthropic/claude-opus-4-6", "label": "Claude Opus 4.6"},
|
| 31 |
+
{
|
| 32 |
+
"id": "bedrock/us.anthropic.claude-opus-4-6-v1",
|
| 33 |
+
"label": "Claude Opus 4.6 via Bedrock",
|
| 34 |
+
},
|
| 35 |
+
{"id": "MiniMaxAI/MiniMax-M2.7", "label": "MiniMax M2.7"},
|
| 36 |
+
{"id": "moonshotai/Kimi-K2.6", "label": "Kimi K2.6"},
|
| 37 |
+
{"id": "zai-org/GLM-5.1", "label": "GLM 5.1"},
|
| 38 |
+
{"id": "deepseek-ai/DeepSeek-V4-Pro:deepinfra", "label": "DeepSeek V4 Pro"},
|
| 39 |
+
# Local / self-hosted providers (OpenAI-compatible endpoints)
|
| 40 |
+
{"id": "llamacpp/llama-3-8b", "label": "llama.cpp (local)"},
|
| 41 |
+
{"id": "lmstudio/llama-3-8b", "label": "LM Studio (local)"},
|
| 42 |
+
{"id": "mlx/llama-3-8b", "label": "MLX (Apple Silicon, local)"},
|
| 43 |
+
{"id": "nim/llama-3-8b", "label": "NVIDIA NIM (local)"},
|
| 44 |
+
{"id": "local/llama-3-8b", "label": "Custom local server"},
|
| 45 |
+
{"id": "ollama/llama3.1", "label": "Ollama (local)"},
|
| 46 |
+
{"id": "vllm/llama-3-8b", "label": "vLLM (local)"},
|
| 47 |
+
{"id": "tgi/llama-3-8b", "label": "TGI (local)"},
|
| 48 |
+
]
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
_ROUTING_POLICIES = {"fastest", "cheapest", "preferred"}
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
# Local / self-hosted provider prefixes that route to OpenAI-compatible
|
| 55 |
+
# endpoints on the user's machine.
|
| 56 |
+
_LOCAL_PREFIXES = {
|
| 57 |
+
"llamacpp",
|
| 58 |
+
"lmstudio",
|
| 59 |
+
"mlx",
|
| 60 |
+
"nim",
|
| 61 |
+
"local",
|
| 62 |
+
"ollama",
|
| 63 |
+
"vllm",
|
| 64 |
+
"tgi",
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
def is_valid_model_id(model_id: str) -> bool:
|
| 69 |
+
"""Loose format check β lets users pick any model id.
|
| 70 |
+
|
| 71 |
+
Accepts:
|
| 72 |
+
β’ anthropic/<model>
|
| 73 |
+
β’ openai/<model>
|
| 74 |
+
β’ <org>/<model>[:<tag>] (HF router; tag = provider or policy)
|
| 75 |
+
β’ huggingface/<org>/<model>[:<tag>] (same, accepts legacy prefix)
|
| 76 |
+
β’ <local-prefix>/<model> (local OpenAI-compatible server)
|
| 77 |
+
|
| 78 |
+
Actual availability is verified against the HF router catalog on
|
| 79 |
+
switch, and by the provider on the probe's ping call.
|
| 80 |
+
"""
|
| 81 |
+
if not model_id or "/" not in model_id:
|
| 82 |
+
return False
|
| 83 |
+
head = model_id.split(":", 1)[0]
|
| 84 |
+
parts = head.split("/")
|
| 85 |
+
if len(parts) >= 2 and all(parts):
|
| 86 |
+
return True
|
| 87 |
+
# Local prefixes only need one part after the prefix (e.g. "ollama/llama3")
|
| 88 |
+
if parts[0] in _LOCAL_PREFIXES and len(parts) == 2 and parts[1]:
|
| 89 |
+
return True
|
| 90 |
+
return False
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _print_hf_routing_info(model_id: str, console) -> bool:
|
| 94 |
+
"""Show HF router catalog info (providers, price, context, tool support)
|
| 95 |
+
for an HF-router model id. Returns ``True`` to signal the caller can
|
| 96 |
+
proceed with the switch, ``False`` to indicate a hard problem the user
|
| 97 |
+
should notice before we fire the effort probe.
|
| 98 |
+
|
| 99 |
+
Anthropic / OpenAI ids return ``True`` without printing anything β
|
| 100 |
+
the probe below covers "does this model exist".
|
| 101 |
+
"""
|
| 102 |
+
if model_id.startswith(("anthropic/", "openai/")):
|
| 103 |
+
return True
|
| 104 |
+
|
| 105 |
+
# Local providers bypass the HF router catalog entirely.
|
| 106 |
+
if model_id.split("/", 1)[0] in _LOCAL_PREFIXES:
|
| 107 |
+
return True
|
| 108 |
+
|
| 109 |
+
from agent.core import hf_router_catalog as cat
|
| 110 |
+
|
| 111 |
+
bare, _, tag = model_id.partition(":")
|
| 112 |
+
info = cat.lookup(bare)
|
| 113 |
+
if info is None:
|
| 114 |
+
console.print(
|
| 115 |
+
f"[bold red]Warning:[/bold red] '{bare}' isn't in the HF router "
|
| 116 |
+
"catalog. Checking anyway β first call may fail."
|
| 117 |
+
)
|
| 118 |
+
suggestions = cat.fuzzy_suggest(bare)
|
| 119 |
+
if suggestions:
|
| 120 |
+
console.print(f"[dim]Did you mean: {', '.join(suggestions)}[/dim]")
|
| 121 |
+
return True
|
| 122 |
+
|
| 123 |
+
live = info.live_providers
|
| 124 |
+
if not live:
|
| 125 |
+
console.print(
|
| 126 |
+
f"[bold red]Warning:[/bold red] '{bare}' has no live providers "
|
| 127 |
+
"right now. First call will likely fail."
|
| 128 |
+
)
|
| 129 |
+
return True
|
| 130 |
+
|
| 131 |
+
if tag and tag not in _ROUTING_POLICIES:
|
| 132 |
+
matched = [p for p in live if p.provider == tag]
|
| 133 |
+
if not matched:
|
| 134 |
+
names = ", ".join(p.provider for p in live)
|
| 135 |
+
console.print(
|
| 136 |
+
f"[bold red]Warning:[/bold red] provider '{tag}' doesn't serve "
|
| 137 |
+
f"'{bare}'. Live providers: {names}. Checking anyway."
|
| 138 |
+
)
|
| 139 |
+
|
| 140 |
+
if not info.any_supports_tools:
|
| 141 |
+
console.print(
|
| 142 |
+
f"[bold red]Warning:[/bold red] no provider for '{bare}' advertises "
|
| 143 |
+
"tool-call support. This agent relies on tool calls β expect errors."
|
| 144 |
+
)
|
| 145 |
+
|
| 146 |
+
if tag in _ROUTING_POLICIES:
|
| 147 |
+
policy = tag
|
| 148 |
+
elif tag:
|
| 149 |
+
policy = f"pinned to {tag}"
|
| 150 |
+
else:
|
| 151 |
+
policy = "auto (fastest)"
|
| 152 |
+
console.print(f" [dim]routing: {policy}[/dim]")
|
| 153 |
+
for p in live:
|
| 154 |
+
price = (
|
| 155 |
+
f"${p.input_price:g}/${p.output_price:g} per M tok"
|
| 156 |
+
if p.input_price is not None and p.output_price is not None
|
| 157 |
+
else "price n/a"
|
| 158 |
+
)
|
| 159 |
+
ctx = f"{p.context_length:,} ctx" if p.context_length else "ctx n/a"
|
| 160 |
+
tools = "tools" if p.supports_tools else "no tools"
|
| 161 |
+
console.print(f" [dim]{p.provider}: {price}, {ctx}, {tools}[/dim]")
|
| 162 |
+
return True
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def print_model_listing(config, console) -> None:
|
| 166 |
+
"""Render the default ``/model`` (no-arg) view: current + suggested."""
|
| 167 |
+
current = config.model_name if config else ""
|
| 168 |
+
console.print("[bold]Current model:[/bold]")
|
| 169 |
+
console.print(f" {current}")
|
| 170 |
+
console.print("\n[bold]Suggested:[/bold]")
|
| 171 |
+
for m in SUGGESTED_MODELS:
|
| 172 |
+
marker = " [dim]<-- current[/dim]" if m["id"] == current else ""
|
| 173 |
+
console.print(f" {m['id']} [dim]({m['label']})[/dim]{marker}")
|
| 174 |
+
console.print(
|
| 175 |
+
"\n[dim]Paste any HF model id (e.g. 'MiniMaxAI/MiniMax-M2.7').\n"
|
| 176 |
+
"Add ':fastest', ':cheapest', ':preferred', or ':<provider>' to override routing.\n"
|
| 177 |
+
"Use 'anthropic/<model>' or 'openai/<model>' for direct API access.[/dim]"
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
def print_invalid_id(arg: str, console) -> None:
|
| 182 |
+
console.print(f"[bold red]Invalid model id format:[/bold red] {arg}")
|
| 183 |
+
console.print(
|
| 184 |
+
"[dim]Expected:\n"
|
| 185 |
+
" β’ <org>/<model>[:tag] (HF router β paste from huggingface.co)\n"
|
| 186 |
+
" β’ anthropic/<model>\n"
|
| 187 |
+
" β’ openai/<model>\n"
|
| 188 |
+
" β’ llamacpp/<model> (llama.cpp server, http://localhost:8080)\n"
|
| 189 |
+
" β’ lmstudio/<model> (LM Studio, http://localhost:1234)\n"
|
| 190 |
+
" β’ mlx/<model> (MLX OpenAI-compatible server)\n"
|
| 191 |
+
" β’ nim/<model> (NVIDIA NIM, http://localhost:8000)\n"
|
| 192 |
+
" β’ ollama/<model> (Ollama, http://localhost:11434)\n"
|
| 193 |
+
" β’ vllm/<model> (vLLM, http://localhost:8000)\n"
|
| 194 |
+
" β’ tgi/<model> (TGI, http://localhost:8080)\n"
|
| 195 |
+
" β’ local/<model> (custom LOCAL_API_BASE)[/dim]"
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
|
| 199 |
+
async def probe_and_switch_model(
|
| 200 |
+
model_id: str,
|
| 201 |
+
config,
|
| 202 |
+
session,
|
| 203 |
+
console,
|
| 204 |
+
hf_token: str | None,
|
| 205 |
+
) -> None:
|
| 206 |
+
"""Validate model+effort with a 1-token ping, cache the effective effort,
|
| 207 |
+
then commit the switch.
|
| 208 |
+
|
| 209 |
+
Three visible outcomes:
|
| 210 |
+
|
| 211 |
+
* β ``effort: <level>`` β model accepted the preferred effort (or a
|
| 212 |
+
fallback from the cascade; the note explains if so)
|
| 213 |
+
* β ``effort: off`` β model doesn't support thinking; we'll strip it
|
| 214 |
+
* β hard error (auth, model-not-found, quota) β we reject the switch
|
| 215 |
+
and keep the current model so the user isn't stranded
|
| 216 |
+
|
| 217 |
+
Transient errors (5xx, timeout) complete the switch with a yellow
|
| 218 |
+
warning; the next real call re-surfaces the error if it's persistent.
|
| 219 |
+
"""
|
| 220 |
+
preference = config.reasoning_effort
|
| 221 |
+
if not _print_hf_routing_info(model_id, console):
|
| 222 |
+
return
|
| 223 |
+
|
| 224 |
+
if not preference:
|
| 225 |
+
# Nothing to validate with a ping that we couldn't validate on the
|
| 226 |
+
# first real call just as cheaply. Skip the probe entirely.
|
| 227 |
+
_commit_switch(model_id, config, session, effective=None, cache=False)
|
| 228 |
+
console.print(
|
| 229 |
+
f"[green]Model switched to {model_id}[/green] [dim](effort: off)[/dim]"
|
| 230 |
+
)
|
| 231 |
+
return
|
| 232 |
+
|
| 233 |
+
console.print(f"[dim]checking {model_id} (effort: {preference})...[/dim]")
|
| 234 |
+
try:
|
| 235 |
+
outcome = await probe_effort(model_id, preference, hf_token, session=session)
|
| 236 |
+
except ProbeInconclusive as e:
|
| 237 |
+
_commit_switch(model_id, config, session, effective=None, cache=False)
|
| 238 |
+
console.print(
|
| 239 |
+
f"[yellow]Model switched to {model_id}[/yellow] "
|
| 240 |
+
f"[dim](couldn't validate: {e}; will verify on first message)[/dim]"
|
| 241 |
+
)
|
| 242 |
+
return
|
| 243 |
+
except Exception as e:
|
| 244 |
+
# Hard persistent error β auth, unknown model, quota. Don't switch.
|
| 245 |
+
console.print(f"[bold red]Switch failed:[/bold red] {e}")
|
| 246 |
+
console.print(f"[dim]Keeping current model: {config.model_name}[/dim]")
|
| 247 |
+
return
|
| 248 |
+
|
| 249 |
+
_commit_switch(
|
| 250 |
+
model_id,
|
| 251 |
+
config,
|
| 252 |
+
session,
|
| 253 |
+
effective=outcome.effective_effort,
|
| 254 |
+
cache=True,
|
| 255 |
+
)
|
| 256 |
+
effort_label = outcome.effective_effort or "off"
|
| 257 |
+
suffix = f" β {outcome.note}" if outcome.note else ""
|
| 258 |
+
console.print(
|
| 259 |
+
f"[green]Model switched to {model_id}[/green] "
|
| 260 |
+
f"[dim](effort: {effort_label}{suffix}, {outcome.elapsed_ms}ms)[/dim]"
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
|
| 264 |
+
def _commit_switch(model_id, config, session, effective, cache: bool) -> None:
|
| 265 |
+
"""Apply the switch to the session (or bare config if no session yet).
|
| 266 |
+
|
| 267 |
+
``effective`` is the probe's resolved effort; ``cache=True`` stores it
|
| 268 |
+
in the session's per-model cache so real calls use the resolved level
|
| 269 |
+
instead of re-probing. ``cache=False`` (inconclusive probe / effort
|
| 270 |
+
off) leaves the cache untouched β next call falls back to preference.
|
| 271 |
+
"""
|
| 272 |
+
if session is not None:
|
| 273 |
+
session.update_model(model_id)
|
| 274 |
+
if cache:
|
| 275 |
+
session.model_effective_effort[model_id] = effective
|
| 276 |
+
else:
|
| 277 |
+
session.model_effective_effort.pop(model_id, None)
|
| 278 |
+
else:
|
| 279 |
+
config.model_name = model_id
|