muthuk1 commited on
Commit
641deaa
Β·
verified Β·
1 Parent(s): ceb4fb2

Add universal LLM provider layer supporting 12 providers including Ollama

Browse files
Files changed (1) hide show
  1. web/src/lib/llm-providers.ts +462 -0
web/src/lib/llm-providers.ts ADDED
@@ -0,0 +1,462 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Universal LLM Provider Layer
3
+ * =============================
4
+ * Single interface for 12+ LLM providers using OpenAI SDK with dynamic baseURL.
5
+ * Zero extra dependencies β€” all providers speak OpenAI-compatible API.
6
+ *
7
+ * Supported: OpenAI, Anthropic Claude, Google Gemini, Mistral, Cohere,
8
+ * Ollama (local), OpenRouter, Groq, xAI Grok, Together AI,
9
+ * HuggingFace Inference, DeepSeek
10
+ */
11
+
12
+ export type ProviderId =
13
+ | "openai"
14
+ | "anthropic"
15
+ | "gemini"
16
+ | "mistral"
17
+ | "cohere"
18
+ | "ollama"
19
+ | "openrouter"
20
+ | "groq"
21
+ | "xai"
22
+ | "together"
23
+ | "huggingface"
24
+ | "deepseek";
25
+
26
+ export interface ProviderConfig {
27
+ id: ProviderId;
28
+ name: string;
29
+ baseURL: string;
30
+ apiKeyEnv: string; // env var name
31
+ defaultModel: string;
32
+ models: ModelInfo[];
33
+ isLocal?: boolean; // Ollama etc
34
+ requiresApiKey?: boolean;
35
+ costPer1kInput: number; // USD
36
+ costPer1kOutput: number;
37
+ supportsStreaming: boolean;
38
+ supportsJSON: boolean;
39
+ maxContextWindow: number;
40
+ notes?: string;
41
+ }
42
+
43
+ export interface ModelInfo {
44
+ id: string;
45
+ name: string;
46
+ contextWindow: number;
47
+ costPer1kInput: number;
48
+ costPer1kOutput: number;
49
+ speed: "fast" | "medium" | "slow";
50
+ quality: "high" | "medium" | "low";
51
+ }
52
+
53
+ export interface LLMRequest {
54
+ provider: ProviderId;
55
+ model?: string;
56
+ messages: { role: "system" | "user" | "assistant"; content: string }[];
57
+ temperature?: number;
58
+ maxTokens?: number;
59
+ jsonMode?: boolean;
60
+ stream?: boolean;
61
+ }
62
+
63
+ export interface LLMResponse {
64
+ content: string;
65
+ inputTokens: number;
66
+ outputTokens: number;
67
+ totalTokens: number;
68
+ latencyMs: number;
69
+ costUsd: number;
70
+ model: string;
71
+ provider: ProviderId;
72
+ }
73
+
74
+ // ── Provider Registry ──────────────────────────────────
75
+
76
+ export const PROVIDERS: Record<ProviderId, ProviderConfig> = {
77
+ openai: {
78
+ id: "openai",
79
+ name: "OpenAI",
80
+ baseURL: "https://api.openai.com/v1",
81
+ apiKeyEnv: "OPENAI_API_KEY",
82
+ defaultModel: "gpt-4o-mini",
83
+ costPer1kInput: 0.00015,
84
+ costPer1kOutput: 0.0006,
85
+ supportsStreaming: true,
86
+ supportsJSON: true,
87
+ maxContextWindow: 128000,
88
+ requiresApiKey: true,
89
+ models: [
90
+ { id: "gpt-4o", name: "GPT-4o", contextWindow: 128000, costPer1kInput: 0.0025, costPer1kOutput: 0.01, speed: "medium", quality: "high" },
91
+ { id: "gpt-4o-mini", name: "GPT-4o Mini", contextWindow: 128000, costPer1kInput: 0.00015, costPer1kOutput: 0.0006, speed: "fast", quality: "medium" },
92
+ { id: "gpt-4.1", name: "GPT-4.1", contextWindow: 1047576, costPer1kInput: 0.002, costPer1kOutput: 0.008, speed: "medium", quality: "high" },
93
+ { id: "gpt-4.1-mini", name: "GPT-4.1 Mini", contextWindow: 1047576, costPer1kInput: 0.0004, costPer1kOutput: 0.0016, speed: "fast", quality: "medium" },
94
+ { id: "o3-mini", name: "o3-mini", contextWindow: 200000, costPer1kInput: 0.0011, costPer1kOutput: 0.0044, speed: "slow", quality: "high" },
95
+ ],
96
+ },
97
+
98
+ anthropic: {
99
+ id: "anthropic",
100
+ name: "Anthropic Claude",
101
+ baseURL: "https://api.anthropic.com/v1",
102
+ apiKeyEnv: "ANTHROPIC_API_KEY",
103
+ defaultModel: "claude-sonnet-4-20250514",
104
+ costPer1kInput: 0.003,
105
+ costPer1kOutput: 0.015,
106
+ supportsStreaming: true,
107
+ supportsJSON: false, // uses tool_use pattern
108
+ maxContextWindow: 200000,
109
+ requiresApiKey: true,
110
+ notes: "Uses native Anthropic SDK, not OpenAI-compat",
111
+ models: [
112
+ { id: "claude-sonnet-4-20250514", name: "Claude Sonnet 4", contextWindow: 200000, costPer1kInput: 0.003, costPer1kOutput: 0.015, speed: "medium", quality: "high" },
113
+ { id: "claude-opus-4-20250514", name: "Claude Opus 4", contextWindow: 200000, costPer1kInput: 0.015, costPer1kOutput: 0.075, speed: "slow", quality: "high" },
114
+ { id: "claude-haiku-4-20250514", name: "Claude Haiku 4", contextWindow: 200000, costPer1kInput: 0.0008, costPer1kOutput: 0.004, speed: "fast", quality: "medium" },
115
+ { id: "claude-3-5-sonnet-20241022", name: "Claude 3.5 Sonnet", contextWindow: 200000, costPer1kInput: 0.003, costPer1kOutput: 0.015, speed: "medium", quality: "high" },
116
+ ],
117
+ },
118
+
119
+ gemini: {
120
+ id: "gemini",
121
+ name: "Google Gemini",
122
+ baseURL: "https://generativelanguage.googleapis.com/v1beta/openai/",
123
+ apiKeyEnv: "GEMINI_API_KEY",
124
+ defaultModel: "gemini-2.0-flash",
125
+ costPer1kInput: 0.0001,
126
+ costPer1kOutput: 0.0004,
127
+ supportsStreaming: true,
128
+ supportsJSON: true,
129
+ maxContextWindow: 1048576,
130
+ requiresApiKey: true,
131
+ models: [
132
+ { id: "gemini-2.5-flash", name: "Gemini 2.5 Flash", contextWindow: 1048576, costPer1kInput: 0.00015, costPer1kOutput: 0.0006, speed: "fast", quality: "high" },
133
+ { id: "gemini-2.0-flash", name: "Gemini 2.0 Flash", contextWindow: 1048576, costPer1kInput: 0.0001, costPer1kOutput: 0.0004, speed: "fast", quality: "medium" },
134
+ { id: "gemini-2.5-pro", name: "Gemini 2.5 Pro", contextWindow: 1048576, costPer1kInput: 0.00125, costPer1kOutput: 0.005, speed: "medium", quality: "high" },
135
+ ],
136
+ },
137
+
138
+ mistral: {
139
+ id: "mistral",
140
+ name: "Mistral AI",
141
+ baseURL: "https://api.mistral.ai/v1",
142
+ apiKeyEnv: "MISTRAL_API_KEY",
143
+ defaultModel: "mistral-large-latest",
144
+ costPer1kInput: 0.002,
145
+ costPer1kOutput: 0.006,
146
+ supportsStreaming: true,
147
+ supportsJSON: true,
148
+ maxContextWindow: 128000,
149
+ requiresApiKey: true,
150
+ models: [
151
+ { id: "mistral-large-latest", name: "Mistral Large", contextWindow: 128000, costPer1kInput: 0.002, costPer1kOutput: 0.006, speed: "medium", quality: "high" },
152
+ { id: "mistral-small-latest", name: "Mistral Small", contextWindow: 128000, costPer1kInput: 0.0002, costPer1kOutput: 0.0006, speed: "fast", quality: "medium" },
153
+ { id: "codestral-latest", name: "Codestral", contextWindow: 256000, costPer1kInput: 0.0003, costPer1kOutput: 0.0009, speed: "fast", quality: "high" },
154
+ ],
155
+ },
156
+
157
+ cohere: {
158
+ id: "cohere",
159
+ name: "Cohere",
160
+ baseURL: "https://api.cohere.ai/compatibility/v1",
161
+ apiKeyEnv: "COHERE_API_KEY",
162
+ defaultModel: "command-r-plus",
163
+ costPer1kInput: 0.0025,
164
+ costPer1kOutput: 0.01,
165
+ supportsStreaming: true,
166
+ supportsJSON: true,
167
+ maxContextWindow: 128000,
168
+ requiresApiKey: true,
169
+ models: [
170
+ { id: "command-r-plus", name: "Command R+", contextWindow: 128000, costPer1kInput: 0.0025, costPer1kOutput: 0.01, speed: "medium", quality: "high" },
171
+ { id: "command-r", name: "Command R", contextWindow: 128000, costPer1kInput: 0.00015, costPer1kOutput: 0.0006, speed: "fast", quality: "medium" },
172
+ ],
173
+ },
174
+
175
+ ollama: {
176
+ id: "ollama",
177
+ name: "Ollama (Local)",
178
+ baseURL: "http://localhost:11434/v1",
179
+ apiKeyEnv: "",
180
+ defaultModel: "llama3.2",
181
+ costPer1kInput: 0,
182
+ costPer1kOutput: 0,
183
+ supportsStreaming: true,
184
+ supportsJSON: true,
185
+ maxContextWindow: 131072,
186
+ isLocal: true,
187
+ requiresApiKey: false,
188
+ notes: "Free, runs locally. Install Ollama then: ollama pull llama3.2",
189
+ models: [
190
+ { id: "llama3.2", name: "Llama 3.2 3B", contextWindow: 131072, costPer1kInput: 0, costPer1kOutput: 0, speed: "fast", quality: "medium" },
191
+ { id: "llama3.2:1b", name: "Llama 3.2 1B", contextWindow: 131072, costPer1kInput: 0, costPer1kOutput: 0, speed: "fast", quality: "low" },
192
+ { id: "qwen2.5:7b", name: "Qwen 2.5 7B", contextWindow: 131072, costPer1kInput: 0, costPer1kOutput: 0, speed: "medium", quality: "medium" },
193
+ { id: "qwen2.5:14b", name: "Qwen 2.5 14B", contextWindow: 131072, costPer1kInput: 0, costPer1kOutput: 0, speed: "slow", quality: "high" },
194
+ { id: "deepseek-r1:7b", name: "DeepSeek R1 7B", contextWindow: 65536, costPer1kInput: 0, costPer1kOutput: 0, speed: "medium", quality: "high" },
195
+ { id: "mistral:7b", name: "Mistral 7B", contextWindow: 32768, costPer1kInput: 0, costPer1kOutput: 0, speed: "fast", quality: "medium" },
196
+ { id: "gemma2:9b", name: "Gemma 2 9B", contextWindow: 8192, costPer1kInput: 0, costPer1kOutput: 0, speed: "medium", quality: "medium" },
197
+ { id: "phi3:14b", name: "Phi-3 14B", contextWindow: 131072, costPer1kInput: 0, costPer1kOutput: 0, speed: "medium", quality: "high" },
198
+ ],
199
+ },
200
+
201
+ openrouter: {
202
+ id: "openrouter",
203
+ name: "OpenRouter",
204
+ baseURL: "https://openrouter.ai/api/v1",
205
+ apiKeyEnv: "OPENROUTER_API_KEY",
206
+ defaultModel: "meta-llama/llama-3.3-70b-instruct",
207
+ costPer1kInput: 0.0004,
208
+ costPer1kOutput: 0.0004,
209
+ supportsStreaming: true,
210
+ supportsJSON: true,
211
+ maxContextWindow: 131072,
212
+ requiresApiKey: true,
213
+ notes: "Access 200+ models from all providers via single API key",
214
+ models: [
215
+ { id: "meta-llama/llama-3.3-70b-instruct", name: "Llama 3.3 70B", contextWindow: 131072, costPer1kInput: 0.0004, costPer1kOutput: 0.0004, speed: "medium", quality: "high" },
216
+ { id: "google/gemini-2.0-flash-exp:free", name: "Gemini Flash (Free)", contextWindow: 1048576, costPer1kInput: 0, costPer1kOutput: 0, speed: "fast", quality: "medium" },
217
+ { id: "deepseek/deepseek-r1", name: "DeepSeek R1", contextWindow: 65536, costPer1kInput: 0.0005, costPer1kOutput: 0.002, speed: "slow", quality: "high" },
218
+ { id: "qwen/qwen-2.5-72b-instruct", name: "Qwen 2.5 72B", contextWindow: 131072, costPer1kInput: 0.0004, costPer1kOutput: 0.0004, speed: "medium", quality: "high" },
219
+ ],
220
+ },
221
+
222
+ groq: {
223
+ id: "groq",
224
+ name: "Groq",
225
+ baseURL: "https://api.groq.com/openai/v1",
226
+ apiKeyEnv: "GROQ_API_KEY",
227
+ defaultModel: "llama-3.3-70b-versatile",
228
+ costPer1kInput: 0.00059,
229
+ costPer1kOutput: 0.00079,
230
+ supportsStreaming: true,
231
+ supportsJSON: true,
232
+ maxContextWindow: 131072,
233
+ requiresApiKey: true,
234
+ notes: "Ultra-fast inference on custom LPU hardware",
235
+ models: [
236
+ { id: "llama-3.3-70b-versatile", name: "Llama 3.3 70B", contextWindow: 131072, costPer1kInput: 0.00059, costPer1kOutput: 0.00079, speed: "fast", quality: "high" },
237
+ { id: "llama-3.1-8b-instant", name: "Llama 3.1 8B", contextWindow: 131072, costPer1kInput: 0.00005, costPer1kOutput: 0.00008, speed: "fast", quality: "medium" },
238
+ { id: "mixtral-8x7b-32768", name: "Mixtral 8x7B", contextWindow: 32768, costPer1kInput: 0.00024, costPer1kOutput: 0.00024, speed: "fast", quality: "medium" },
239
+ ],
240
+ },
241
+
242
+ xai: {
243
+ id: "xai",
244
+ name: "xAI Grok",
245
+ baseURL: "https://api.x.ai/v1",
246
+ apiKeyEnv: "XAI_API_KEY",
247
+ defaultModel: "grok-3-mini",
248
+ costPer1kInput: 0.003,
249
+ costPer1kOutput: 0.015,
250
+ supportsStreaming: true,
251
+ supportsJSON: true,
252
+ maxContextWindow: 131072,
253
+ requiresApiKey: true,
254
+ models: [
255
+ { id: "grok-3-mini", name: "Grok 3 Mini", contextWindow: 131072, costPer1kInput: 0.0003, costPer1kOutput: 0.0005, speed: "fast", quality: "medium" },
256
+ { id: "grok-3", name: "Grok 3", contextWindow: 131072, costPer1kInput: 0.003, costPer1kOutput: 0.015, speed: "medium", quality: "high" },
257
+ ],
258
+ },
259
+
260
+ together: {
261
+ id: "together",
262
+ name: "Together AI",
263
+ baseURL: "https://api.together.xyz/v1",
264
+ apiKeyEnv: "TOGETHER_API_KEY",
265
+ defaultModel: "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
266
+ costPer1kInput: 0.00088,
267
+ costPer1kOutput: 0.00088,
268
+ supportsStreaming: true,
269
+ supportsJSON: true,
270
+ maxContextWindow: 131072,
271
+ requiresApiKey: true,
272
+ models: [
273
+ { id: "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo", name: "Llama 3.1 70B Turbo", contextWindow: 131072, costPer1kInput: 0.00088, costPer1kOutput: 0.00088, speed: "fast", quality: "high" },
274
+ { id: "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo", name: "Llama 3.1 8B Turbo", contextWindow: 131072, costPer1kInput: 0.00018, costPer1kOutput: 0.00018, speed: "fast", quality: "medium" },
275
+ { id: "Qwen/Qwen2.5-72B-Instruct-Turbo", name: "Qwen 2.5 72B Turbo", contextWindow: 131072, costPer1kInput: 0.0012, costPer1kOutput: 0.0012, speed: "fast", quality: "high" },
276
+ ],
277
+ },
278
+
279
+ huggingface: {
280
+ id: "huggingface",
281
+ name: "HuggingFace Inference",
282
+ baseURL: "https://api-inference.huggingface.co/v1",
283
+ apiKeyEnv: "HF_TOKEN",
284
+ defaultModel: "meta-llama/Llama-3.3-70B-Instruct",
285
+ costPer1kInput: 0,
286
+ costPer1kOutput: 0,
287
+ supportsStreaming: true,
288
+ supportsJSON: false,
289
+ maxContextWindow: 131072,
290
+ requiresApiKey: true,
291
+ notes: "Free tier available, PRO for higher limits",
292
+ models: [
293
+ { id: "meta-llama/Llama-3.3-70B-Instruct", name: "Llama 3.3 70B", contextWindow: 131072, costPer1kInput: 0, costPer1kOutput: 0, speed: "medium", quality: "high" },
294
+ { id: "Qwen/Qwen2.5-72B-Instruct", name: "Qwen 2.5 72B", contextWindow: 131072, costPer1kInput: 0, costPer1kOutput: 0, speed: "medium", quality: "high" },
295
+ { id: "mistralai/Mistral-7B-Instruct-v0.3", name: "Mistral 7B", contextWindow: 32768, costPer1kInput: 0, costPer1kOutput: 0, speed: "fast", quality: "medium" },
296
+ ],
297
+ },
298
+
299
+ deepseek: {
300
+ id: "deepseek",
301
+ name: "DeepSeek",
302
+ baseURL: "https://api.deepseek.com/v1",
303
+ apiKeyEnv: "DEEPSEEK_API_KEY",
304
+ defaultModel: "deepseek-chat",
305
+ costPer1kInput: 0.00014,
306
+ costPer1kOutput: 0.00028,
307
+ supportsStreaming: true,
308
+ supportsJSON: true,
309
+ maxContextWindow: 65536,
310
+ requiresApiKey: true,
311
+ models: [
312
+ { id: "deepseek-chat", name: "DeepSeek V3", contextWindow: 65536, costPer1kInput: 0.00014, costPer1kOutput: 0.00028, speed: "fast", quality: "high" },
313
+ { id: "deepseek-reasoner", name: "DeepSeek R1", contextWindow: 65536, costPer1kInput: 0.00055, costPer1kOutput: 0.00219, speed: "slow", quality: "high" },
314
+ ],
315
+ },
316
+ };
317
+
318
+ // ── Universal LLM Client ─────────────────────────────────
319
+
320
+ export async function callLLM(request: LLMRequest): Promise<LLMResponse> {
321
+ const provider = PROVIDERS[request.provider];
322
+ if (!provider) throw new Error(`Unknown provider: ${request.provider}`);
323
+
324
+ const model = request.model || provider.defaultModel;
325
+ const modelInfo = provider.models.find((m) => m.id === model) || provider.models[0];
326
+ const startTime = Date.now();
327
+
328
+ // ── Anthropic uses its own SDK ───────────────────────
329
+ if (request.provider === "anthropic") {
330
+ return callAnthropic(request, provider, modelInfo, startTime);
331
+ }
332
+
333
+ // ── All other providers use OpenAI SDK ───────────────
334
+ const OpenAI = (await import("openai")).default;
335
+
336
+ const apiKey = provider.isLocal
337
+ ? "ollama"
338
+ : process.env[provider.apiKeyEnv] || "";
339
+
340
+ if (!apiKey && provider.requiresApiKey) {
341
+ throw new Error(`Missing API key: set ${provider.apiKeyEnv} environment variable`);
342
+ }
343
+
344
+ const client = new OpenAI({
345
+ baseURL: provider.baseURL,
346
+ apiKey,
347
+ });
348
+
349
+ const params: Record<string, unknown> = {
350
+ model,
351
+ messages: request.messages,
352
+ temperature: request.temperature ?? 0,
353
+ max_tokens: request.maxTokens ?? 1024,
354
+ };
355
+
356
+ if (request.jsonMode && provider.supportsJSON) {
357
+ params.response_format = { type: "json_object" };
358
+ }
359
+
360
+ const response = await client.chat.completions.create(params as never);
361
+ const latencyMs = Date.now() - startTime;
362
+
363
+ const content = response.choices?.[0]?.message?.content || "";
364
+ const usage = response.usage || { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 };
365
+
366
+ return {
367
+ content,
368
+ inputTokens: usage.prompt_tokens,
369
+ outputTokens: usage.completion_tokens,
370
+ totalTokens: usage.total_tokens,
371
+ latencyMs,
372
+ costUsd:
373
+ (usage.prompt_tokens / 1000) * modelInfo.costPer1kInput +
374
+ (usage.completion_tokens / 1000) * modelInfo.costPer1kOutput,
375
+ model,
376
+ provider: request.provider,
377
+ };
378
+ }
379
+
380
+ // ── Anthropic-specific handler ────────────────────────
381
+
382
+ async function callAnthropic(
383
+ request: LLMRequest,
384
+ provider: ProviderConfig,
385
+ modelInfo: ModelInfo,
386
+ startTime: number
387
+ ): Promise<LLMResponse> {
388
+ const Anthropic = (await import("@anthropic-ai/sdk")).default;
389
+ const client = new Anthropic({ apiKey: process.env[provider.apiKeyEnv] });
390
+
391
+ const systemMsg = request.messages.find((m) => m.role === "system");
392
+ const userMsgs = request.messages.filter((m) => m.role !== "system");
393
+
394
+ const model = request.model || provider.defaultModel;
395
+
396
+ const msg = await client.messages.create({
397
+ model,
398
+ max_tokens: request.maxTokens ?? 1024,
399
+ temperature: request.temperature ?? 0,
400
+ system: systemMsg?.content || undefined,
401
+ messages: userMsgs.map((m) => ({ role: m.role as "user" | "assistant", content: m.content })),
402
+ });
403
+
404
+ const latencyMs = Date.now() - startTime;
405
+ const content = msg.content[0]?.type === "text" ? msg.content[0].text : "";
406
+
407
+ return {
408
+ content,
409
+ inputTokens: msg.usage.input_tokens,
410
+ outputTokens: msg.usage.output_tokens,
411
+ totalTokens: msg.usage.input_tokens + msg.usage.output_tokens,
412
+ latencyMs,
413
+ costUsd:
414
+ (msg.usage.input_tokens / 1000) * modelInfo.costPer1kInput +
415
+ (msg.usage.output_tokens / 1000) * modelInfo.costPer1kOutput,
416
+ model,
417
+ provider: "anthropic",
418
+ };
419
+ }
420
+
421
+ // ── Helper: Check if Ollama is running ───────────────
422
+
423
+ export async function checkOllamaHealth(): Promise<{ ok: boolean; models: string[] }> {
424
+ try {
425
+ const res = await fetch("http://localhost:11434/api/tags", { signal: AbortSignal.timeout(2000) });
426
+ if (!res.ok) return { ok: false, models: [] };
427
+ const data = await res.json();
428
+ return {
429
+ ok: true,
430
+ models: (data.models || []).map((m: { name: string }) => m.name),
431
+ };
432
+ } catch {
433
+ return { ok: false, models: [] };
434
+ }
435
+ }
436
+
437
+ // ── Helper: List available providers (those with API keys set) ──
438
+
439
+ export function getAvailableProviders(): ProviderId[] {
440
+ return (Object.keys(PROVIDERS) as ProviderId[]).filter((id) => {
441
+ const p = PROVIDERS[id];
442
+ if (p.isLocal) return true; // Ollama always listed
443
+ if (!p.requiresApiKey) return true;
444
+ return !!process.env[p.apiKeyEnv];
445
+ });
446
+ }
447
+
448
+ // ── Provider display info for UI ─────────────────────
449
+
450
+ export function getProviderDisplayInfo() {
451
+ return Object.values(PROVIDERS).map((p) => ({
452
+ id: p.id,
453
+ name: p.name,
454
+ isLocal: p.isLocal ?? false,
455
+ hasApiKey: p.isLocal || !p.requiresApiKey || !!process.env[p.apiKeyEnv],
456
+ defaultModel: p.defaultModel,
457
+ models: p.models,
458
+ costPer1kInput: p.costPer1kInput,
459
+ costPer1kOutput: p.costPer1kOutput,
460
+ notes: p.notes,
461
+ }));
462
+ }