File size: 3,124 Bytes
bcce530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import crypto from 'crypto'
import { redisGet, redisSet } from './redis'
import { getRedis } from './redis'

const CACHE_TTL = 1 * 60 * 60 // 1 hour in seconds (reduced from 7 days)
const CACHE_PREFIX = 'cache:prompt:'

interface CachedResponse {
    response: string
    model: string
    timestamp: number
    promptId: string
}

/**
 * Generate cache key from prompt inputs
 * Uses SHA256 hash of: promptId + sorted inputs + model
 */
export function generateCacheKey(
    promptId: string,
    inputs: Record<string, unknown>,
    model: string
): string {
    // Sort inputs by key for consistent hashing
    const sortedInputs = Object.keys(inputs)
        .sort()
        .reduce((acc, key) => {
            acc[key] = inputs[key]
            return acc
        }, {} as Record<string, unknown>)

    const data = JSON.stringify({
        promptId,
        inputs: sortedInputs,
        model,
    })

    const hash = crypto.createHash('sha256').update(data).digest('hex')

    return `${CACHE_PREFIX}${hash}`
}

/**
 * Get cached AI response if available
 */
export async function getCachedResponse(
    promptId: string,
    inputs: Record<string, unknown>,
    model: string
): Promise<string | null> {
    const key = generateCacheKey(promptId, inputs, model)

    // Check if cache has been invalidated via version key
    const client = getRedis()
    if (client) {
        const versionKey = `cache:version:${promptId}`
        const invalidatedAt = await client.get<string>(versionKey)
        if (invalidatedAt) {
            const cached = await redisGet<CachedResponse>(key)
            // If the cached entry is older than the invalidation, skip it
            if (cached && cached.timestamp < parseInt(invalidatedAt)) {
                return null
            }
        }
    }

    const cached = await redisGet<CachedResponse>(key)

    if (!cached) {
        return null
    }

    // Verify the cached data is for the correct prompt/model
    if (cached.promptId !== promptId || cached.model !== model) {
        return null
    }

    return cached.response
}

/**
 * Store AI response in cache
 */
export async function cacheResponse(
    promptId: string,
    inputs: Record<string, unknown>,
    model: string,
    response: string
): Promise<boolean> {
    const key = generateCacheKey(promptId, inputs, model)

    const data: CachedResponse = {
        response,
        model,
        timestamp: Date.now(),
        promptId,
    }

    return await redisSet(key, data, CACHE_TTL)
}

/**
 * Invalidate all cached responses for a prompt.
 * Uses a prompt-specific version key to effectively invalidate all entries.
 */
export async function invalidatePromptCache(promptId: string): Promise<void> {
    const client = getRedis()
    if (!client) return

    try {
        // Set a version key for this prompt — any cached entries with an older
        // version will be considered stale
        const versionKey = `cache:version:${promptId}`
        await client.set(versionKey, Date.now().toString())
    } catch (error) {
        console.error('Cache invalidation error:', error)
    }
}