File size: 12,051 Bytes
234574a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfb7184
 
 
cf0a8ed
234574a
cf0a8ed
234574a
 
 
 
 
 
 
 
bfb7184
234574a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfb7184
234574a
 
 
bfb7184
234574a
 
 
 
 
 
 
 
 
 
bfb7184
234574a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfb7184
234574a
 
 
 
 
 
 
 
 
 
 
 
 
 
bfb7184
234574a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
cf0a8ed
234574a
cf0a8ed
 
 
 
 
 
234574a
 
 
 
 
cf0a8ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234574a
cf0a8ed
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234574a
 
cf0a8ed
234574a
 
 
 
 
 
 
 
 
 
cf0a8ed
234574a
 
 
 
 
 
 
 
 
 
 
 
cf0a8ed
 
 
bfb7184
 
cf0a8ed
bfb7184
 
 
 
 
 
 
 
234574a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
bfb7184
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
"""VRAM-pressure-aware eviction cache - IMPROVEMENT-002.

Replaces static TTL-based eviction with adaptive LRU/LFU hybrid that responds
to actual GPU memory pressure. Monitors MI300X VRAM via PyRSMI and adjusts
eviction policy dynamically.

Eviction modes:
- RELAXED (VRAM < 70%): No eviction, TTL = 10 minutes
- NORMAL (70-85%): LRU eviction of entries idle > 2 min
- PRESSURE (85-92%): LFU by token_count, evict heaviest first
- CRITICAL (92-96%): Offload inactive KV tensors to CPU RAM
- EMERGENCY (VRAM >= 96%): Hard evict all idle > 30s, block new registrations
"""
import asyncio
import heapq
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import TYPE_CHECKING, Any, Optional

if TYPE_CHECKING:
    from apohara_context_forge.scheduling.step_graph import AgentStepGraph

from apohara_context_forge.metrics.vram_monitor import VRAMMonitor


class EvictionMode(Enum):
    RELAXED = "relaxed"
    NORMAL = "normal"
    PRESSURE = "pressure"
    CRITICAL = "critical"
    EMERGENCY = "emergency"
    WORKFLOW_AWARE = "workflow_aware"


@dataclass(order=True)
class CacheEntry:
    # Priority for heap (lower = evict first): last_accessed - (access_count * 10)
    # LFU/LRU hybrid: frequent+recent entries survive longer
    priority: float = field(compare=True)
    last_accessed: float = field(compare=False, default_factory=time.monotonic)
    access_count: int = field(compare=False, default=0)
    token_count: int = field(compare=False, default=0)
    key: str = field(compare=False, default="")
    value: Any = field(compare=False, default=None)
    offloaded_to_cpu: bool = field(compare=False, default=False)


class VRAMAwareCache:
    """
    LRU/LFU hybrid cache with VRAM pressure-responsive eviction.
    Monitors AMD MI300X memory in real-time via PyRSMI.
    
    Usage:
        cache = VRAMAwareCache(max_token_budget=50_000_000)  # 50M tokens = ~3GB
        await cache.start()
        await cache.set("agent1", context_entry, token_count=500)
        entry = await cache.get("agent1")
        await cache.stop()
    """
    
    VRAM_CHECK_INTERVAL = 2.0  # seconds between VRAM pressure checks
    
    def __init__(self, max_token_budget: int = 50_000_000, step_graph: Optional["AgentStepGraph"] = None):
        """
        Args:
            max_token_budget: Maximum tokens to hold in cache (~3GB for 64-layer model)
            step_graph: Optional workflow dependency graph for WORKFLOW_AWARE eviction
        """
        self._store: dict[str, CacheEntry] = {}
        self._heap: list[CacheEntry] = []
        self._total_tokens: int = 0
        self._max_token_budget = max_token_budget
        self._vram = VRAMMonitor()
        self._mode = EvictionMode.RELAXED
        self._lock = asyncio.Lock()
        self._monitor_task: Optional[asyncio.Task] = None
        self._blocked = False
        self._step_graph = step_graph
    
    async def start(self) -> None:
        """Start background VRAM monitor."""
        if self._monitor_task is not None:
            return
        self._monitor_task = asyncio.create_task(self._vram_monitor_loop())
    
    async def stop(self) -> None:
        """Stop background monitoring."""
        if self._monitor_task:
            self._monitor_task.cancel()
            try:
                await self._monitor_task
            except asyncio.CancelledError:
                pass
            self._monitor_task = None
    
    async def _vram_monitor_loop(self) -> None:
        """Background loop: check VRAM pressure every interval."""
        while True:
            try:
                pressure = self._vram.get_pressure()
                new_mode = self._pressure_to_mode(pressure, self._step_graph)
                if new_mode != self._mode:
                    self._mode = new_mode
                    if new_mode == EvictionMode.EMERGENCY:
                        self._blocked = True
                    elif self._mode == EvictionMode.EMERGENCY:
                        self._blocked = False
                    await self._apply_eviction_policy()
                await asyncio.sleep(self.VRAM_CHECK_INTERVAL)
            except asyncio.CancelledError:
                break
            except Exception as e:
                await asyncio.sleep(1)  # Brief backoff on error
    
    @staticmethod
    def _pressure_to_mode(pressure: float, step_graph=None) -> EvictionMode:
        """Convert VRAM pressure to eviction mode."""
        if pressure < 0.70:   return EvictionMode.RELAXED
        if pressure < 0.85:   return EvictionMode.NORMAL
        if pressure < 0.92:   return EvictionMode.PRESSURE
        if pressure < 0.96:   return EvictionMode.CRITICAL
        return EvictionMode.EMERGENCY
    
    async def set(self, key: str, value: Any, token_count: int) -> bool:
        """
        Store value in cache.
        
        Args:
            key: Cache key (e.g., "context:agent1")
            value: Value to store
            token_count: Token count for VRAM tracking
        
        Returns:
            True if stored, False if blocked in EMERGENCY mode
        """
        if self._blocked:
            return False
        
        entry = CacheEntry(
            priority=time.monotonic(),  # Will be updated by LRU/LFU formula
            last_accessed=time.monotonic(),
            access_count=1,
            token_count=token_count,
            key=key,
            value=value,
        )
        
        async with self._lock:
            # Evict old entry if key exists
            if key in self._store:
                old_entry = self._store[key]
                self._total_tokens -= old_entry.token_count
            
            self._store[key] = entry
            heapq.heappush(self._heap, entry)
            self._total_tokens += token_count
        
        # Trigger eviction check if needed
        if self._mode in (EvictionMode.PRESSURE, EvictionMode.CRITICAL, EvictionMode.EMERGENCY):
            await self._apply_eviction_policy()
        
        return True
    
    async def get(self, key: str) -> Any | None:
        """Retrieve value, updating access metadata."""
        async with self._lock:
            entry = self._store.get(key)
            if entry is None:
                return None
            
            # Update access metadata
            entry.last_accessed = time.monotonic()
            entry.access_count += 1
            # Recalculate priority: lower = evict first
            entry.priority = entry.last_accessed - (entry.access_count * 10)
            
            return entry.value
    
    async def delete(self, key: str) -> bool:
        """Delete entry from cache."""
        async with self._lock:
            entry = self._store.pop(key, None)
            if entry:
                self._total_tokens -= entry.token_count
                return True
            return False
    
    async def _apply_eviction_policy(self, pressure: Optional[float] = None) -> int:
        """
        Apply eviction policy based on current mode (or pressure override for testing).

        Args:
            pressure: Optional pressure value to use for mode determination (for testing).
                     If None, uses the actual VRAM pressure reading.

        Returns:
            Number of entries evicted
        """
        evicted = 0
        now = time.monotonic()

        # Determine mode: use pressure override if provided (for testing),
        # else respect pre-set _mode (tests set it directly),
        # else read from VRAM monitor (production)
        if pressure is not None:
            mode = self._pressure_to_mode(pressure, self._step_graph)
        elif hasattr(self, '_mode') and self._mode is not None:
            mode = self._mode
        else:
            mode = self._pressure_to_mode(self._vram.get_pressure(), self._step_graph)

        # Update internal mode if pressure override was provided (for testing)
        if pressure is not None:
            self._mode = mode

        async with self._lock:
            match mode:
                case EvictionMode.EMERGENCY:
                    self._blocked = True
                    # Hard evict everything idle > 30s
                    to_evict = [
                        k for k, e in self._store.items()
                        if now - e.last_accessed > 30
                    ]
                    for k in to_evict:
                        self._evict(k)
                        evicted += 1
                
                case EvictionMode.CRITICAL:
                    self._blocked = False
                    # Mark inactive for CPU offload instead of destroying
                    for entry in self._store.values():
                        if now - entry.last_accessed > 30 and not entry.offloaded_to_cpu:
                            entry.offloaded_to_cpu = True
                
                case EvictionMode.NORMAL:
                    self._blocked = False
                    # LRU: evict entries idle > 120s
                    to_evict = [
                        k for k, e in self._store.items()
                        if now - e.last_accessed > 120
                    ]
                    for k in to_evict:
                        self._evict(k)
                        evicted += 1
                
                case EvictionMode.PRESSURE:
                    self._blocked = False
                    # LFU by token_count: evict heaviest, least used first
                    candidates = sorted(
                        self._store.values(),
                        key=lambda e: e.token_count / max(e.access_count, 1),
                        reverse=True
                    )
                    # Evict top 25%
                    target = max(1, int(len(candidates) * 0.25))
                    for entry in candidates[:target]:
                        self._evict(entry.key)
                        evicted += 1
                
                case EvictionMode.RELAXED:
                    self._blocked = False
                    # No eviction needed
                
                case EvictionMode.WORKFLOW_AWARE:
                    self._blocked = False
                    if self._step_graph is not None:
                        priority_order = self._step_graph.get_eviction_priority_order()
                        # Evict in reverse priority order (lowest priority first)
                        for agent_id in reversed(priority_order):
                            key = f"context:{agent_id}"
                            if key in self._store:
                                self._evict(key)
                                evicted += 1
        
        if evicted > 0:
            await self._reheap()
        
        return evicted
    
    def _evict(self, key: str) -> None:
        """Remove entry. Must be called under lock."""
        entry = self._store.pop(key, None)
        if entry:
            self._total_tokens -= entry.token_count
    
    async def _reheap(self) -> None:
        """Rebuild heap after evictions."""
        self._heap = list(self._store.values())
        heapq.heapify(self._heap)
    
    async def clear(self) -> None:
        """Clear all entries."""
        async with self._lock:
            self._store.clear()
            self._heap.clear()
            self._total_tokens = 0
    
    @property
    def size(self) -> int:
        """Number of entries."""
        return len(self._store)
    
    @property
    def total_tokens(self) -> int:
        """Total token count in cache."""
        return self._total_tokens
    
    @property
    def mode(self) -> EvictionMode:
        """Current eviction mode."""
        return self._mode
    
    @property
    def is_blocked(self) -> bool:
        """True if new registrations are blocked (EMERGENCY mode)."""
        return self._blocked
    
    @property
    def step_graph(self) -> Optional["AgentStepGraph"]:
        """The workflow dependency graph for WORKFLOW_AWARE eviction."""
        return self._step_graph