Spaces:
Running on CPU Upgrade
Running on CPU Upgrade
File size: 13,916 Bytes
5fe810b fa4ba99 79b2fcc c60bea4 4a5166d 1e9763b fa4ba99 5fe810b c60bea4 1e9763b 5fe810b 93bf088 5fe810b 79b2fcc 28b8f2b 0a9e96d 28b8f2b 0a9e96d 28b8f2b 0a9e96d 28b8f2b 0a9e96d 79b2fcc 5fe810b 1e9763b 5fe810b 1e9763b 5099f9d b6155b0 33f29a8 577ec48 f6f7fe8 1e9763b 33f29a8 f6f7fe8 5099f9d 577ec48 5099f9d b6155b0 28b8f2b 5099f9d 33f29a8 577ec48 5099f9d 5fe810b 4a5166d 63e9959 2fac9ff 63e9959 5fe810b ecbfd3c 122b05c a644598 3c91fc8 5fe810b fa4ba99 c60bea4 2a2e170 fa4ba99 e2552e8 5fe810b fa4ba99 5fe810b fa4ba99 2a2e170 ecbfd3c fa4ba99 82b0c13 28b8f2b 82b0c13 e2552e8 c60bea4 79b2fcc c60bea4 fa4ba99 2a2e170 f6f7fe8 fa4ba99 f6f7fe8 fa4ba99 f6f7fe8 fa4ba99 2a2e170 fa4ba99 c60bea4 fa4ba99 c60bea4 fa4ba99 c60bea4 fa4ba99 c60bea4 fa4ba99 c60bea4 fa4ba99 2a2e170 c60bea4 2a2e170 c60bea4 2a2e170 fa4ba99 c60bea4 79b2fcc c60bea4 fa4ba99 c60bea4 fa4ba99 c60bea4 fa4ba99 c60bea4 fa4ba99 79b2fcc c60bea4 fa4ba99 c60bea4 79b2fcc c60bea4 79b2fcc | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 | import asyncio
import json
import logging
import subprocess
import sys
import uuid
from dataclasses import dataclass
from datetime import datetime
from enum import Enum
from pathlib import Path
from typing import Any, Optional
from agent.config import Config
from agent.context_manager.manager import ContextManager
logger = logging.getLogger(__name__)
_DEFAULT_MAX_TOKENS = 200_000
def _get_max_tokens_safe(model_name: str) -> int:
"""Return the max input-context tokens for a model.
Primary source: ``litellm.get_model_info(model)['max_input_tokens']`` —
LiteLLM maintains an upstream catalog that knows Claude Opus 4.6 is
1M, GPT-5 is 272k, Sonnet 4.5 is 200k, and so on. Strips any HF routing
suffix / huggingface/ prefix so tagged ids ('moonshotai/Kimi-K2.6:cheapest')
look up the bare model. Falls back to a conservative 200k default for
models not in the catalog (typically HF-router-only models).
"""
from litellm import get_model_info
candidates = [model_name]
stripped = model_name.removeprefix("huggingface/").split(":", 1)[0]
if stripped != model_name:
candidates.append(stripped)
for candidate in candidates:
try:
info = get_model_info(candidate)
max_input = info.get("max_input_tokens") if info else None
if isinstance(max_input, int) and max_input > 0:
return max_input
except Exception:
continue
logger.info(
"No litellm.get_model_info entry for %s, falling back to %d",
model_name, _DEFAULT_MAX_TOKENS,
)
return _DEFAULT_MAX_TOKENS
class OpType(Enum):
USER_INPUT = "user_input"
EXEC_APPROVAL = "exec_approval"
INTERRUPT = "interrupt"
UNDO = "undo"
COMPACT = "compact"
SHUTDOWN = "shutdown"
@dataclass
class Event:
event_type: str
data: Optional[dict[str, Any]] = None
class Session:
"""
Maintains agent session state
Similar to Session in codex-rs/core/src/codex.rs
"""
def __init__(
self,
event_queue: asyncio.Queue,
config: Config | None = None,
tool_router=None,
context_manager: ContextManager | None = None,
hf_token: str | None = None,
local_mode: bool = False,
stream: bool = True,
user_id: str | None = None,
):
self.hf_token: Optional[str] = hf_token
self.user_id: Optional[str] = user_id
self.tool_router = tool_router
self.stream = stream
tool_specs = tool_router.get_tool_specs_for_llm() if tool_router else []
self.context_manager = context_manager or ContextManager(
model_max_tokens=_get_max_tokens_safe(config.model_name),
compact_size=0.1,
untouched_messages=5,
tool_specs=tool_specs,
hf_token=hf_token,
local_mode=local_mode,
)
self.event_queue = event_queue
self.session_id = str(uuid.uuid4())
self.config = config or Config(
model_name="bedrock/us.anthropic.claude-sonnet-4-5-20250929-v1:0",
)
self.is_running = True
self._cancelled = asyncio.Event()
self.pending_approval: Optional[dict[str, Any]] = None
self.sandbox = None
self._running_job_ids: set[str] = set() # HF job IDs currently executing
# Session trajectory logging
self.logged_events: list[dict] = []
self.session_start_time = datetime.now().isoformat()
self.turn_count: int = 0
self.last_auto_save_turn: int = 0
# Stable local save path so heartbeat saves overwrite one file instead
# of spamming session_logs/. ``_last_heartbeat_ts`` is owned by
# ``agent.core.telemetry.HeartbeatSaver`` and lazily initialised there.
self._local_save_path: Optional[str] = None
self._last_heartbeat_ts: Optional[float] = None
# Per-model probed reasoning-effort cache. Populated by the probe
# on /model switch, read by ``effective_effort_for`` below. Keys are
# raw model ids (including any ``:tag``). Values:
# str → the effort level to send (may be a downgrade from the
# preference, e.g. "high" when user asked for "max")
# None → model rejected all efforts in the cascade; send no
# thinking params at all
# Key absent → not probed yet; fall back to the raw preference.
self.model_effective_effort: dict[str, str | None] = {}
async def send_event(self, event: Event) -> None:
"""Send event back to client and log to trajectory"""
await self.event_queue.put(event)
# Log event to trajectory
self.logged_events.append(
{
"timestamp": datetime.now().isoformat(),
"event_type": event.event_type,
"data": event.data,
}
)
# Mid-turn heartbeat flush (owned by telemetry module).
from agent.core.telemetry import HeartbeatSaver
HeartbeatSaver.maybe_fire(self)
def cancel(self) -> None:
"""Signal cancellation to the running agent loop."""
self._cancelled.set()
def reset_cancel(self) -> None:
"""Clear the cancellation flag before a new run."""
self._cancelled.clear()
@property
def is_cancelled(self) -> bool:
return self._cancelled.is_set()
def update_model(self, model_name: str) -> None:
"""Switch the active model and update the context window limit."""
self.config.model_name = model_name
self.context_manager.model_max_tokens = _get_max_tokens_safe(model_name)
def effective_effort_for(self, model_name: str) -> str | None:
"""Resolve the effort level to actually send for ``model_name``.
Returns the probed result when we have one (may be ``None`` meaning
"model doesn't do thinking, strip it"), else the raw preference.
Unknown-model case falls back to the preference so a stale cache
from a prior ``/model`` can't poison research sub-calls that use a
different model id.
"""
if model_name in self.model_effective_effort:
return self.model_effective_effort[model_name]
return self.config.reasoning_effort
def increment_turn(self) -> None:
"""Increment turn counter (called after each user interaction)"""
self.turn_count += 1
async def auto_save_if_needed(self) -> None:
"""Check if auto-save should trigger and save if so (completely non-blocking)"""
if not self.config.save_sessions:
return
interval = self.config.auto_save_interval
if interval <= 0:
return
turns_since_last_save = self.turn_count - self.last_auto_save_turn
if turns_since_last_save >= interval:
logger.info(f"Auto-saving session (turn {self.turn_count})...")
# Fire-and-forget save - returns immediately
self.save_and_upload_detached(self.config.session_dataset_repo)
self.last_auto_save_turn = self.turn_count
def get_trajectory(self) -> dict:
"""Serialize complete session trajectory for logging"""
tools: list = []
if self.tool_router is not None:
try:
tools = self.tool_router.get_tool_specs_for_llm() or []
except Exception:
tools = []
# Sum per-call cost from llm_call events so analyzers don't have to
# walk the events array themselves. Each `llm_call` event already
# carries cost_usd from `agent.core.telemetry.record_llm_call`.
total_cost_usd = sum(
float((e.get("data") or {}).get("cost_usd") or 0.0)
for e in self.logged_events
if e.get("event_type") == "llm_call"
)
return {
"session_id": self.session_id,
"user_id": self.user_id,
"session_start_time": self.session_start_time,
"session_end_time": datetime.now().isoformat(),
"model_name": self.config.model_name,
"total_cost_usd": total_cost_usd,
"messages": [msg.model_dump() for msg in self.context_manager.items],
"events": self.logged_events,
"tools": tools,
}
def save_trajectory_local(
self,
directory: str = "session_logs",
upload_status: str = "pending",
dataset_url: Optional[str] = None,
) -> Optional[str]:
"""
Save trajectory to local JSON file as backup with upload status
Args:
directory: Directory to save logs (default: "session_logs")
upload_status: Status of upload attempt ("pending", "success", "failed")
dataset_url: URL of dataset if upload succeeded
Returns:
Path to saved file if successful, None otherwise
"""
try:
log_dir = Path(directory)
log_dir.mkdir(parents=True, exist_ok=True)
trajectory = self.get_trajectory()
# Scrub secrets at save time so session_logs/ never holds raw
# tokens on disk — a log aggregator, crash dump, or filesystem
# snapshot between heartbeats would otherwise leak them.
try:
from agent.core.redact import scrub
for key in ("messages", "events", "tools"):
if key in trajectory:
trajectory[key] = scrub(trajectory[key])
except Exception as _e:
logger.debug("Redact-on-save failed (non-fatal): %s", _e)
# Add upload metadata
trajectory["upload_status"] = upload_status
trajectory["upload_url"] = dataset_url
trajectory["last_save_time"] = datetime.now().isoformat()
# Reuse one stable path per session so heartbeat saves overwrite
# the same file instead of creating a new timestamped file every
# minute. The timestamp in the filename is kept for first-save
# ordering; subsequent saves just rewrite that file.
if self._local_save_path and Path(self._local_save_path).parent == log_dir:
filepath = Path(self._local_save_path)
else:
filename = (
f"session_{self.session_id}_"
f"{datetime.now().strftime('%Y%m%d_%H%M%S')}.json"
)
filepath = log_dir / filename
self._local_save_path = str(filepath)
# Atomic-ish write: stage to .tmp then rename so a crash mid-write
# doesn't leave a truncated JSON that breaks the retry scanner.
tmp_path = filepath.with_suffix(filepath.suffix + ".tmp")
with open(tmp_path, "w") as f:
json.dump(trajectory, f, indent=2)
tmp_path.replace(filepath)
return str(filepath)
except Exception as e:
logger.error(f"Failed to save session locally: {e}")
return None
def update_local_save_status(
self, filepath: str, upload_status: str, dataset_url: Optional[str] = None
) -> bool:
"""Update the upload status of an existing local save file"""
try:
with open(filepath, "r") as f:
data = json.load(f)
data["upload_status"] = upload_status
data["upload_url"] = dataset_url
data["last_save_time"] = datetime.now().isoformat()
with open(filepath, "w") as f:
json.dump(data, f, indent=2)
return True
except Exception as e:
logger.error(f"Failed to update local save status: {e}")
return False
def save_and_upload_detached(self, repo_id: str) -> Optional[str]:
"""
Save session locally and spawn detached subprocess for upload (fire-and-forget)
Args:
repo_id: HuggingFace dataset repo ID
Returns:
Path to local save file
"""
# Save locally first (fast, synchronous)
local_path = self.save_trajectory_local(upload_status="pending")
if not local_path:
return None
# Spawn detached subprocess for upload (fire-and-forget)
try:
uploader_script = Path(__file__).parent / "session_uploader.py"
# Use Popen with detached process
subprocess.Popen(
[sys.executable, str(uploader_script), "upload", local_path, repo_id],
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
start_new_session=True, # Detach from parent
)
except Exception as e:
logger.warning(f"Failed to spawn upload subprocess: {e}")
return local_path
@staticmethod
def retry_failed_uploads_detached(
directory: str = "session_logs", repo_id: Optional[str] = None
) -> None:
"""
Spawn detached subprocess to retry failed/pending uploads (fire-and-forget)
Args:
directory: Directory containing session logs
repo_id: Target dataset repo ID
"""
if not repo_id:
return
try:
uploader_script = Path(__file__).parent / "session_uploader.py"
# Spawn detached subprocess for retry
subprocess.Popen(
[sys.executable, str(uploader_script), "retry", directory, repo_id],
stdin=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL,
start_new_session=True, # Detach from parent
)
except Exception as e:
logger.warning(f"Failed to spawn retry subprocess: {e}")
|