| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| export declare const PERF_CONSTANTS: { |
| readonly DEFAULT_CACHE_SIZE: 1000; |
| readonly DEFAULT_BUFFER_POOL_SIZE: 64; |
| readonly DEFAULT_BATCH_SIZE: 32; |
| readonly MIN_PARALLEL_BATCH_SIZE: 8; |
| readonly UNROLL_THRESHOLD: 32; |
| }; |
| |
| |
| |
| |
| export declare class LRUCache<K, V> { |
| private capacity; |
| private map; |
| private head; |
| private tail; |
| private hits; |
| private misses; |
| constructor(capacity?: number); |
| |
| |
| |
| get(key: K): V | undefined; |
| |
| |
| |
| set(key: K, value: V): void; |
| |
| |
| |
| has(key: K): boolean; |
| |
| |
| |
| delete(key: K): boolean; |
| |
| |
| |
| clear(): void; |
| |
| |
| |
| get size(): number; |
| |
| |
| |
| getStats(): { |
| size: number; |
| capacity: number; |
| hits: number; |
| misses: number; |
| hitRate: number; |
| }; |
| |
| |
| |
| resetStats(): void; |
| private moveToHead; |
| private addToHead; |
| private removeNode; |
| private evictLRU; |
| |
| |
| |
| entries(): Generator<[K, V]>; |
| } |
| |
| |
| |
| |
| export declare class Float32BufferPool { |
| private pools; |
| private maxPoolSize; |
| private allocations; |
| private reuses; |
| constructor(maxPoolSize?: number); |
| |
| |
| |
| acquire(size: number): Float32Array; |
| |
| |
| |
| release(buffer: Float32Array): void; |
| |
| |
| |
| prewarm(sizes: number[], count?: number): void; |
| |
| |
| |
| clear(): void; |
| |
| |
| |
| getStats(): { |
| allocations: number; |
| reuses: number; |
| reuseRate: number; |
| pooledBuffers: number; |
| }; |
| } |
| |
| |
| |
| |
| export declare class TensorBufferManager { |
| private bufferPool; |
| private workingBuffers; |
| constructor(pool?: Float32BufferPool); |
| |
| |
| |
| getWorking(name: string, size: number): Float32Array; |
| |
| |
| |
| getTemp(size: number): Float32Array; |
| |
| |
| |
| releaseTemp(buffer: Float32Array): void; |
| |
| |
| |
| releaseAll(): void; |
| |
| |
| |
| getPool(): Float32BufferPool; |
| } |
| |
| |
| |
| |
| export declare const VectorOps: { |
| |
| |
| |
| dot(a: Float32Array, b: Float32Array): number; |
| |
| |
| |
| normSq(a: Float32Array): number; |
| |
| |
| |
| norm(a: Float32Array): number; |
| |
| |
| |
| |
| cosine(a: Float32Array, b: Float32Array): number; |
| |
| |
| |
| distanceSq(a: Float32Array, b: Float32Array): number; |
| |
| |
| |
| distance(a: Float32Array, b: Float32Array): number; |
| |
| |
| |
| add(a: Float32Array, b: Float32Array, out: Float32Array): Float32Array; |
| |
| |
| |
| sub(a: Float32Array, b: Float32Array, out: Float32Array): Float32Array; |
| |
| |
| |
| scale(a: Float32Array, scalar: number, out: Float32Array): Float32Array; |
| |
| |
| |
| normalize(a: Float32Array): Float32Array; |
| |
| |
| |
| mean(vectors: Float32Array[], out: Float32Array): Float32Array; |
| }; |
| export interface BatchResult<T> { |
| results: T[]; |
| timing: { |
| totalMs: number; |
| perItemMs: number; |
| }; |
| } |
| |
| |
| |
| |
| export declare class ParallelBatchProcessor { |
| private batchSize; |
| private maxConcurrency; |
| constructor(options?: { |
| batchSize?: number; |
| maxConcurrency?: number; |
| }); |
| |
| |
| |
| processBatch<T, R>(items: T[], processor: (item: T, index: number) => Promise<R> | R): Promise<BatchResult<R>>; |
| |
| |
| |
| processSync<T, R>(items: T[], processor: (item: T, index: number) => R): BatchResult<R>; |
| |
| |
| |
| batchSimilarity(queries: Float32Array[], corpus: Float32Array[], k?: number): Array<Array<{ |
| index: number; |
| score: number; |
| }>>; |
| private chunkArray; |
| } |
| export interface CachedMemoryEntry { |
| id: string; |
| embedding: Float32Array; |
| content: string; |
| score: number; |
| } |
| |
| |
| |
| export declare class OptimizedMemoryStore { |
| private cache; |
| private bufferPool; |
| private dimension; |
| constructor(options?: { |
| cacheSize?: number; |
| dimension?: number; |
| }); |
| |
| |
| |
| store(id: string, embedding: Float32Array | number[], content: string): void; |
| |
| |
| |
| get(id: string): CachedMemoryEntry | undefined; |
| |
| |
| |
| search(query: Float32Array, k?: number): CachedMemoryEntry[]; |
| |
| |
| |
| delete(id: string): boolean; |
| |
| |
| |
| getStats(): { |
| cache: ReturnType<LRUCache<string, CachedMemoryEntry>['getStats']>; |
| buffers: ReturnType<Float32BufferPool['getStats']>; |
| }; |
| } |
| declare const _default: { |
| LRUCache: typeof LRUCache; |
| Float32BufferPool: typeof Float32BufferPool; |
| TensorBufferManager: typeof TensorBufferManager; |
| VectorOps: { |
| |
| |
| |
| dot(a: Float32Array, b: Float32Array): number; |
| |
| |
| |
| normSq(a: Float32Array): number; |
| |
| |
| |
| norm(a: Float32Array): number; |
| |
| |
| |
| |
| cosine(a: Float32Array, b: Float32Array): number; |
| |
| |
| |
| distanceSq(a: Float32Array, b: Float32Array): number; |
| |
| |
| |
| distance(a: Float32Array, b: Float32Array): number; |
| |
| |
| |
| add(a: Float32Array, b: Float32Array, out: Float32Array): Float32Array; |
| |
| |
| |
| sub(a: Float32Array, b: Float32Array, out: Float32Array): Float32Array; |
| |
| |
| |
| scale(a: Float32Array, scalar: number, out: Float32Array): Float32Array; |
| |
| |
| |
| normalize(a: Float32Array): Float32Array; |
| |
| |
| |
| mean(vectors: Float32Array[], out: Float32Array): Float32Array; |
| }; |
| ParallelBatchProcessor: typeof ParallelBatchProcessor; |
| OptimizedMemoryStore: typeof OptimizedMemoryStore; |
| PERF_CONSTANTS: { |
| readonly DEFAULT_CACHE_SIZE: 1000; |
| readonly DEFAULT_BUFFER_POOL_SIZE: 64; |
| readonly DEFAULT_BATCH_SIZE: 32; |
| readonly MIN_PARALLEL_BATCH_SIZE: 8; |
| readonly UNROLL_THRESHOLD: 32; |
| }; |
| }; |
| export default _default; |
| |