Spaces:
Configuration error
Configuration error
| import { NextRequest, NextResponse } from "next/server" | |
| import prisma from "@/lib/prisma" | |
| import { generateText } from "ai" | |
| import { openai } from "@ai-sdk/openai" | |
| import { getAuthUser } from "@/lib/auth" | |
| import { checkRateLimit, getClientIdentifier } from "@/lib/rate-limit" | |
| import { redisGet, redisSet } from "@/lib/redis" | |
| // GET - Search for prompts (rate limited, semantic search cached) | |
| export async function GET(request: NextRequest) { | |
| try { | |
| const { searchParams } = new URL(request.url) | |
| const query = searchParams.get("q") | |
| const category = searchParams.get("category") | |
| const model = searchParams.get("model") | |
| const semantic = searchParams.get("semantic") === "true" | |
| const limit = Math.min(parseInt(searchParams.get("limit") || "20"), 100) | |
| const offset = Math.max(parseInt(searchParams.get("offset") || "0"), 0) | |
| if (!query || query.trim().length < 2) { | |
| return NextResponse.json( | |
| { error: "Query must be at least 2 characters" }, | |
| { status: 400 } | |
| ) | |
| } | |
| // Rate limit — semantic search is expensive, use a stricter key prefix | |
| const authUser = await getAuthUser() | |
| const identifier = getClientIdentifier(request, authUser?.id ?? undefined) | |
| const rateLimitKey = semantic ? `search-semantic:${identifier}` : `search:${identifier}` | |
| const rateLimit = await checkRateLimit(rateLimitKey, !!authUser, false) | |
| if (!rateLimit.success) { | |
| return NextResponse.json({ error: rateLimit.error }, { status: 429 }) | |
| } | |
| // For semantic search, apply a secondary per-minute burst limit | |
| if (semantic) { | |
| const burstKey = `search-semantic-burst:${identifier}` | |
| const burstLimit = await checkRateLimit(burstKey, !!authUser, false) | |
| if (!burstLimit.success) { | |
| return NextResponse.json( | |
| { error: "Semantic search rate limit exceeded. Please wait a moment." }, | |
| { status: 429 } | |
| ) | |
| } | |
| } | |
| // Build base query | |
| const whereClause: Record<string, unknown> = { | |
| visibility: "public", | |
| } | |
| if (category && category !== "all") { | |
| whereClause.category = category | |
| } | |
| if (model) { | |
| whereClause.modelAllowed = { has: model } | |
| } | |
| // If semantic search is enabled, use AI to expand the query (with caching) | |
| let searchTerms = [query.toLowerCase()] | |
| if (semantic) { | |
| const cacheKey = `search:semantic:${query.toLowerCase()}` | |
| const cached = await redisGet<string[]>(cacheKey) | |
| if (cached) { | |
| searchTerms = cached | |
| } else { | |
| try { | |
| const { text } = await generateText({ | |
| model: openai("gpt-4o-mini"), | |
| prompt: `Given this search query for AI prompts: "${query}" | |
| Generate 5 related search terms that would help find relevant prompts. Include: | |
| - Synonyms and related concepts | |
| - Common use cases | |
| - Related tasks | |
| Return ONLY a comma-separated list of terms, nothing else.`, | |
| }) | |
| const expandedTerms = text.split(",").map(t => t.trim().toLowerCase()) | |
| searchTerms = [...searchTerms, ...expandedTerms] | |
| // Cache for 1 hour | |
| await redisSet(cacheKey, searchTerms, 3600) | |
| } catch (error) { | |
| console.error("Semantic expansion failed, falling back to basic search:", error) | |
| } | |
| } | |
| } | |
| // Build OR clause for search | |
| const orClause = searchTerms.flatMap(term => [ | |
| { title: { contains: term, mode: "insensitive" as const } }, | |
| { description: { contains: term, mode: "insensitive" as const } }, | |
| { tags: { has: term } }, | |
| ]) | |
| // Search prompts with expanded terms | |
| const [prompts, total] = await Promise.all([ | |
| prisma.prompt.findMany({ | |
| where: { | |
| ...whereClause, | |
| OR: orClause, | |
| }, | |
| select: { | |
| id: true, | |
| slug: true, | |
| title: true, | |
| description: true, | |
| category: true, | |
| tags: true, | |
| totalRuns: true, | |
| starsCount: true, | |
| remixesCount: true, | |
| modelDefault: true, | |
| badges: true, | |
| framework: true, | |
| creator: { | |
| select: { | |
| id: true, | |
| name: true, | |
| username: true, | |
| image: true, | |
| } | |
| } | |
| }, | |
| orderBy: [ | |
| { starsCount: "desc" }, | |
| { totalRuns: "desc" }, | |
| ], | |
| take: limit, | |
| skip: offset, | |
| }), | |
| prisma.prompt.count({ | |
| where: { | |
| ...whereClause, | |
| OR: orClause, | |
| }, | |
| }), | |
| ]) | |
| // Calculate relevance scores for ranking | |
| const scoredPrompts = prompts.map(prompt => { | |
| let score = 0 | |
| const lowerQuery = query.toLowerCase() | |
| // Title match (highest weight) | |
| if (prompt.title.toLowerCase().includes(lowerQuery)) { | |
| score += 100 | |
| } | |
| // Description match | |
| if (prompt.description?.toLowerCase().includes(lowerQuery)) { | |
| score += 50 | |
| } | |
| // Tag match | |
| if (prompt.tags.some(tag => tag.toLowerCase().includes(lowerQuery))) { | |
| score += 30 | |
| } | |
| // Engagement boost | |
| score += Math.log(prompt.totalRuns + 1) * 5 | |
| score += prompt.starsCount * 2 | |
| score += prompt.remixesCount * 3 | |
| // Badge boost | |
| if (prompt.badges && prompt.badges.length > 0) { | |
| score += prompt.badges.length * 10 | |
| } | |
| return { ...prompt, relevanceScore: score } | |
| }) | |
| // Sort by relevance score | |
| scoredPrompts.sort((a, b) => b.relevanceScore - a.relevanceScore) | |
| return NextResponse.json({ | |
| prompts: scoredPrompts, | |
| searchTerms: semantic ? searchTerms : [query], | |
| total, | |
| hasMore: offset + prompts.length < total, | |
| }, { | |
| headers: { | |
| 'Cache-Control': 'public, s-maxage=60, stale-while-revalidate=300', | |
| }, | |
| }) | |
| } catch (error) { | |
| console.error("Search error:", error) | |
| return NextResponse.json( | |
| { error: "Search failed" }, | |
| { status: 500 } | |
| ) | |
| } | |
| } | |