Spaces:
Configuration error
Configuration error
| import { Prompt } from "@prisma/client" | |
| export interface VariableSchema { | |
| name: string | |
| type: "text" | "textarea" | "dropdown" | "number" | "boolean" | |
| label: string | |
| placeholder?: string | |
| description?: string | |
| required?: boolean | |
| default?: string | number | boolean | |
| options?: string[] // For dropdown type | |
| min?: number // For number type | |
| max?: number // For number type | |
| } | |
| export interface PromptSchema { | |
| variables: VariableSchema[] | |
| output?: { | |
| format: "markdown" | "text" | "code" | |
| streaming: boolean | |
| } | |
| } | |
| export interface PromptWithCreator extends Prompt { | |
| creator: { | |
| id: string | |
| name: string | null | |
| username: string | null | |
| image: string | null | |
| } | null | |
| parent?: { | |
| id: string | |
| slug: string | |
| title: string | |
| } | null | |
| _count?: { | |
| remixes: number | |
| stars: number | |
| } | |
| } | |
| export interface RunConfig { | |
| promptId: string | |
| template: string | |
| variables: Record<string, string | number | boolean> | |
| model: string | |
| maxTokens?: number | |
| } | |
| // Cloud AI Model configurations | |
| export const AI_MODELS = { | |
| "gemini-2.5-flash": { | |
| name: "Gemini 2.5 Flash", | |
| provider: "google", | |
| description: "Latest & fastest Gemini model", | |
| maxTokens: 8192, | |
| }, | |
| "gemini-2.0-flash": { | |
| name: "Gemini 2.0 Flash", | |
| provider: "google", | |
| description: "Fast multimodal", | |
| maxTokens: 4096, | |
| }, | |
| "gpt-4o-mini": { | |
| name: "GPT-4o Mini", | |
| provider: "openai", | |
| description: "Fast and affordable", | |
| maxTokens: 4096, | |
| }, | |
| "gpt-4o": { | |
| name: "GPT-4o", | |
| provider: "openai", | |
| description: "Most capable GPT", | |
| maxTokens: 4096, | |
| }, | |
| "claude-3-5-sonnet": { | |
| name: "Claude 3.5 Sonnet", | |
| provider: "anthropic", | |
| description: "Great for long content", | |
| maxTokens: 4096, | |
| }, | |
| } as const | |
| // Ollama Model configurations (local models) | |
| export const OLLAMA_MODELS = { | |
| // Vision-Language Models | |
| "qwen3-vl:2b": { name: "Qwen3-VL 2B", description: "Smallest vision-language model", maxTokens: 4096 }, | |
| "qwen3-vl:4b": { name: "Qwen3-VL 4B", description: "Compact vision-language model", maxTokens: 4096 }, | |
| "qwen3-vl:8b": { name: "Qwen3-VL 8B", description: "Balanced vision-language model", maxTokens: 4096 }, | |
| "qwen3-vl:30b": { name: "Qwen3-VL 30B", description: "Powerful vision-language model", maxTokens: 8192 }, | |
| // Coding Models | |
| "devstral-small-2:24b": { name: "Devstral Small 2", description: "24B coding agent model", maxTokens: 8192 }, | |
| "qwen3-coder:30b": { name: "Qwen3 Coder 30B", description: "Coding & agentic tasks", maxTokens: 8192 }, | |
| "rnj-1:8b": { name: "RNJ-1 8B", description: "Code & STEM optimized", maxTokens: 4096 }, | |
| // General Models | |
| "gemma3:12b": { name: "Gemma 3 12B", description: "Capable single GPU model", maxTokens: 4096 }, | |
| "gemma3:27b": { name: "Gemma 3 27B", description: "Largest Gemma model", maxTokens: 4096 }, | |
| "ministral-3:3b": { name: "Ministral 3 3B", description: "Edge deployment model", maxTokens: 4096 }, | |
| "ministral-3:8b": { name: "Ministral 3 8B", description: "Balanced edge model", maxTokens: 4096 }, | |
| "glm-4.6": { name: "GLM 4.6", description: "Advanced agentic & reasoning", maxTokens: 4096 }, | |
| // Thinking/Reasoning Models | |
| "gpt-oss:20b": { name: "GPT-OSS 20B", description: "OpenAI open-weight model", maxTokens: 4096 }, | |
| "gpt-oss:120b": { name: "GPT-OSS 120B", description: "Large reasoning model", maxTokens: 8192 }, | |
| "deepseek-v3.1:671b": { name: "DeepSeek V3.1", description: "Hybrid thinking model", maxTokens: 8192 }, | |
| "deepseek-v3.2": { name: "DeepSeek V3.2", description: "Efficient reasoning & agents", maxTokens: 8192 }, | |
| "cogito-2.1:671b": { name: "Cogito 2.1", description: "MIT licensed, commercial use", maxTokens: 8192 }, | |
| "kimi-k2": { name: "Kimi K2", description: "MoE model for coding agents", maxTokens: 8192 }, | |
| "kimi-k2-thinking": { name: "Kimi K2 Thinking", description: "Best open thinking model", maxTokens: 8192 }, | |
| "qwen3-next:80b": { name: "Qwen3 Next 80B", description: "Efficient inference thinking", maxTokens: 8192 }, | |
| // Generic fallbacks | |
| "llama3.2:3b": { name: "Llama 3.2 3B", description: "Compact Llama model", maxTokens: 4096 }, | |
| "llama3.2:8b": { name: "Llama 3.2 8B", description: "Balanced Llama model", maxTokens: 4096 }, | |
| "mistral:7b": { name: "Mistral 7B", description: "Fast general model", maxTokens: 4096 }, | |
| "mixtral:8x7b": { name: "Mixtral 8x7B", description: "MoE general model", maxTokens: 8192 }, | |
| } as const | |
| export type ModelId = keyof typeof AI_MODELS | |
| export type OllamaModelId = keyof typeof OLLAMA_MODELS | |
| export type AnyModelId = ModelId | OllamaModelId | string | |
| // Gemini 2.5 Flash as default since user has that API key | |
| export const DEFAULT_MODEL: ModelId = "gemini-2.5-flash" | |
| // Helper to check if model is Ollama | |
| export function isOllamaModel(modelId: string): boolean { | |
| return modelId in OLLAMA_MODELS || modelId.includes(":") | |
| } | |
| // Get all models for selection | |
| export function getAllModels() { | |
| const cloudModels = Object.entries(AI_MODELS).map(([id, config]) => ({ | |
| id, | |
| ...config, | |
| provider: config.provider, | |
| isOllama: false, | |
| })) | |
| const ollamaModels = Object.entries(OLLAMA_MODELS).map(([id, config]) => ({ | |
| id, | |
| ...config, | |
| provider: "ollama", | |
| isOllama: true, | |
| })) | |
| return { cloudModels, ollamaModels } | |
| } | |