| import express from 'express';
|
| import fetch from 'node-fetch';
|
| import { getConfig, getModelById, getEndpointByType, getSystemPrompt, getModelReasoning } from './config.js';
|
| import { logInfo, logDebug, logError, logRequest, logResponse } from './logger.js';
|
| import { transformToAnthropic, getAnthropicHeaders } from './transformers/request-anthropic.js';
|
| import { transformToOpenAI, getOpenAIHeaders } from './transformers/request-openai.js';
|
| import { transformToCommon, getCommonHeaders } from './transformers/request-common.js';
|
| import { AnthropicResponseTransformer } from './transformers/response-anthropic.js';
|
| import { OpenAIResponseTransformer } from './transformers/response-openai.js';
|
| import { getApiKey } from './auth.js';
|
|
|
| const router = express.Router();
|
|
|
| |
| |
| |
|
|
| function convertResponseToChatCompletion(resp) {
|
| if (!resp || typeof resp !== 'object') {
|
| throw new Error('Invalid response object');
|
| }
|
|
|
| const outputMsg = (resp.output || []).find(o => o.type === 'message');
|
| const textBlocks = outputMsg?.content?.filter(c => c.type === 'output_text') || [];
|
| const content = textBlocks.map(c => c.text).join('');
|
|
|
| const chatCompletion = {
|
| id: resp.id ? resp.id.replace(/^resp_/, 'chatcmpl-') : `chatcmpl-${Date.now()}`,
|
| object: 'chat.completion',
|
| created: resp.created_at || Math.floor(Date.now() / 1000),
|
| model: resp.model || 'unknown-model',
|
| choices: [
|
| {
|
| index: 0,
|
| message: {
|
| role: outputMsg?.role || 'assistant',
|
| content: content || ''
|
| },
|
| finish_reason: resp.status === 'completed' ? 'stop' : 'unknown'
|
| }
|
| ],
|
| usage: {
|
| prompt_tokens: resp.usage?.input_tokens ?? 0,
|
| completion_tokens: resp.usage?.output_tokens ?? 0,
|
| total_tokens: resp.usage?.total_tokens ?? 0
|
| }
|
| };
|
|
|
| return chatCompletion;
|
| }
|
|
|
| router.get('/v1/models', (req, res) => {
|
| logInfo('GET /v1/models');
|
|
|
| try {
|
| const config = getConfig();
|
| const models = config.models.map(model => ({
|
| id: model.id,
|
| object: 'model',
|
| created: Date.now(),
|
| owned_by: model.type,
|
| permission: [],
|
| root: model.id,
|
| parent: null
|
| }));
|
|
|
| const response = {
|
| object: 'list',
|
| data: models
|
| };
|
|
|
| logResponse(200, null, response);
|
| res.json(response);
|
| } catch (error) {
|
| logError('Error in GET /v1/models', error);
|
| res.status(500).json({ error: 'Internal server error' });
|
| }
|
| });
|
|
|
|
|
| async function handleChatCompletions(req, res) {
|
| logInfo('POST /v1/chat/completions');
|
|
|
| try {
|
| const openaiRequest = req.body;
|
| const modelId = openaiRequest.model;
|
|
|
| if (!modelId) {
|
| return res.status(400).json({ error: 'model is required' });
|
| }
|
|
|
| const model = getModelById(modelId);
|
| if (!model) {
|
| return res.status(404).json({ error: `Model ${modelId} not found` });
|
| }
|
|
|
| const endpoint = getEndpointByType(model.type);
|
| if (!endpoint) {
|
| return res.status(500).json({ error: `Endpoint type ${model.type} not found` });
|
| }
|
|
|
| logInfo(`Routing to ${model.type} endpoint: ${endpoint.base_url}`);
|
|
|
|
|
| let authHeader;
|
| try {
|
| authHeader = await getApiKey(req.headers.authorization);
|
| } catch (error) {
|
| logError('Failed to get API key', error);
|
| return res.status(500).json({
|
| error: 'API key not available',
|
| message: 'Failed to get or refresh API key. Please check server logs.'
|
| });
|
| }
|
|
|
| let transformedRequest;
|
| let headers;
|
| const clientHeaders = req.headers;
|
|
|
|
|
| logDebug('Client headers received', {
|
| 'x-factory-client': clientHeaders['x-factory-client'],
|
| 'x-session-id': clientHeaders['x-session-id'],
|
| 'x-assistant-message-id': clientHeaders['x-assistant-message-id'],
|
| 'user-agent': clientHeaders['user-agent']
|
| });
|
|
|
| if (model.type === 'anthropic') {
|
| transformedRequest = transformToAnthropic(openaiRequest);
|
| const isStreaming = openaiRequest.stream === true;
|
| headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId);
|
| } else if (model.type === 'openai') {
|
| transformedRequest = transformToOpenAI(openaiRequest);
|
| headers = getOpenAIHeaders(authHeader, clientHeaders);
|
| } else if (model.type === 'common') {
|
| transformedRequest = transformToCommon(openaiRequest);
|
| headers = getCommonHeaders(authHeader, clientHeaders);
|
| } else {
|
| return res.status(500).json({ error: `Unknown endpoint type: ${model.type}` });
|
| }
|
|
|
| logRequest('POST', endpoint.base_url, headers, transformedRequest);
|
|
|
| const response = await fetch(endpoint.base_url, {
|
| method: 'POST',
|
| headers,
|
| body: JSON.stringify(transformedRequest)
|
| });
|
|
|
| logInfo(`Response status: ${response.status}`);
|
|
|
| if (!response.ok) {
|
| const errorText = await response.text();
|
| logError(`Endpoint error: ${response.status}`, new Error(errorText));
|
| return res.status(response.status).json({
|
| error: `Endpoint returned ${response.status}`,
|
| details: errorText
|
| });
|
| }
|
|
|
| const isStreaming = transformedRequest.stream === true;
|
|
|
| if (isStreaming) {
|
| res.setHeader('Content-Type', 'text/event-stream');
|
| res.setHeader('Cache-Control', 'no-cache');
|
| res.setHeader('Connection', 'keep-alive');
|
|
|
|
|
| if (model.type === 'common') {
|
| try {
|
| for await (const chunk of response.body) {
|
| res.write(chunk);
|
| }
|
| res.end();
|
| logInfo('Stream forwarded (common type)');
|
| } catch (streamError) {
|
| logError('Stream error', streamError);
|
| res.end();
|
| }
|
| } else {
|
|
|
| let transformer;
|
| if (model.type === 'anthropic') {
|
| transformer = new AnthropicResponseTransformer(modelId, `chatcmpl-${Date.now()}`);
|
| } else if (model.type === 'openai') {
|
| transformer = new OpenAIResponseTransformer(modelId, `chatcmpl-${Date.now()}`);
|
| }
|
|
|
| try {
|
| for await (const chunk of transformer.transformStream(response.body)) {
|
| res.write(chunk);
|
| }
|
| res.end();
|
| logInfo('Stream completed');
|
| } catch (streamError) {
|
| logError('Stream error', streamError);
|
| res.end();
|
| }
|
| }
|
| } else {
|
| const data = await response.json();
|
| if (model.type === 'openai') {
|
| try {
|
| const converted = convertResponseToChatCompletion(data);
|
| logResponse(200, null, converted);
|
| res.json(converted);
|
| } catch (e) {
|
|
|
| logResponse(200, null, data);
|
| res.json(data);
|
| }
|
| } else {
|
|
|
| logResponse(200, null, data);
|
| res.json(data);
|
| }
|
| }
|
|
|
| } catch (error) {
|
| logError('Error in /v1/chat/completions', error);
|
| res.status(500).json({
|
| error: 'Internal server error',
|
| message: error.message
|
| });
|
| }
|
| }
|
|
|
|
|
| async function handleDirectResponses(req, res) {
|
| logInfo('POST /v1/responses');
|
|
|
| try {
|
| const openaiRequest = req.body;
|
| const modelId = openaiRequest.model;
|
|
|
| if (!modelId) {
|
| return res.status(400).json({ error: 'model is required' });
|
| }
|
|
|
| const model = getModelById(modelId);
|
| if (!model) {
|
| return res.status(404).json({ error: `Model ${modelId} not found` });
|
| }
|
|
|
|
|
| if (model.type !== 'openai') {
|
| return res.status(400).json({
|
| error: 'Invalid endpoint type',
|
| message: `/v1/responses 接口只支持 openai 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
|
| });
|
| }
|
|
|
| const endpoint = getEndpointByType(model.type);
|
| if (!endpoint) {
|
| return res.status(500).json({ error: `Endpoint type ${model.type} not found` });
|
| }
|
|
|
| logInfo(`Direct forwarding to ${model.type} endpoint: ${endpoint.base_url}`);
|
|
|
|
|
| let authHeader;
|
| try {
|
| const clientAuthFromXApiKey = req.headers['x-api-key']
|
| ? `Bearer ${req.headers['x-api-key']}`
|
| : null;
|
| authHeader = await getApiKey(req.headers.authorization || clientAuthFromXApiKey);
|
| } catch (error) {
|
| logError('Failed to get API key', error);
|
| return res.status(500).json({
|
| error: 'API key not available',
|
| message: 'Failed to get or refresh API key. Please check server logs.'
|
| });
|
| }
|
|
|
| const clientHeaders = req.headers;
|
|
|
|
|
| const headers = getOpenAIHeaders(authHeader, clientHeaders);
|
|
|
|
|
| const systemPrompt = getSystemPrompt();
|
| const modifiedRequest = { ...openaiRequest };
|
| if (systemPrompt) {
|
|
|
| if (modifiedRequest.instructions) {
|
| modifiedRequest.instructions = systemPrompt + modifiedRequest.instructions;
|
| } else {
|
|
|
| modifiedRequest.instructions = systemPrompt;
|
| }
|
| }
|
|
|
|
|
| const reasoningLevel = getModelReasoning(modelId);
|
| if (reasoningLevel === 'auto') {
|
|
|
|
|
| } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
|
| modifiedRequest.reasoning = {
|
| effort: reasoningLevel,
|
| summary: 'auto'
|
| };
|
| } else {
|
|
|
| delete modifiedRequest.reasoning;
|
| }
|
|
|
| logRequest('POST', endpoint.base_url, headers, modifiedRequest);
|
|
|
|
|
| const response = await fetch(endpoint.base_url, {
|
| method: 'POST',
|
| headers,
|
| body: JSON.stringify(modifiedRequest)
|
| });
|
|
|
| logInfo(`Response status: ${response.status}`);
|
|
|
| if (!response.ok) {
|
| const errorText = await response.text();
|
| logError(`Endpoint error: ${response.status}`, new Error(errorText));
|
| return res.status(response.status).json({
|
| error: `Endpoint returned ${response.status}`,
|
| details: errorText
|
| });
|
| }
|
|
|
| const isStreaming = openaiRequest.stream === true;
|
|
|
| if (isStreaming) {
|
|
|
| res.setHeader('Content-Type', 'text/event-stream');
|
| res.setHeader('Cache-Control', 'no-cache');
|
| res.setHeader('Connection', 'keep-alive');
|
|
|
| try {
|
|
|
| for await (const chunk of response.body) {
|
| res.write(chunk);
|
| }
|
| res.end();
|
| logInfo('Stream forwarded successfully');
|
| } catch (streamError) {
|
| logError('Stream error', streamError);
|
| res.end();
|
| }
|
| } else {
|
|
|
| const data = await response.json();
|
| logResponse(200, null, data);
|
| res.json(data);
|
| }
|
|
|
| } catch (error) {
|
| logError('Error in /v1/responses', error);
|
| res.status(500).json({
|
| error: 'Internal server error',
|
| message: error.message
|
| });
|
| }
|
| }
|
|
|
|
|
| async function handleDirectMessages(req, res) {
|
| logInfo('POST /v1/messages');
|
|
|
| try {
|
| const anthropicRequest = req.body;
|
| const modelId = anthropicRequest.model;
|
|
|
| if (!modelId) {
|
| return res.status(400).json({ error: 'model is required' });
|
| }
|
|
|
| const model = getModelById(modelId);
|
| if (!model) {
|
| return res.status(404).json({ error: `Model ${modelId} not found` });
|
| }
|
|
|
|
|
| if (model.type !== 'anthropic') {
|
| return res.status(400).json({
|
| error: 'Invalid endpoint type',
|
| message: `/v1/messages 接口只支持 anthropic 类型端点,当前模型 ${modelId} 是 ${model.type} 类型`
|
| });
|
| }
|
|
|
| const endpoint = getEndpointByType(model.type);
|
| if (!endpoint) {
|
| return res.status(500).json({ error: `Endpoint type ${model.type} not found` });
|
| }
|
|
|
| logInfo(`Direct forwarding to ${model.type} endpoint: ${endpoint.base_url}`);
|
|
|
|
|
| let authHeader;
|
| try {
|
| const clientAuthFromXApiKey = req.headers['x-api-key']
|
| ? `Bearer ${req.headers['x-api-key']}`
|
| : null;
|
| authHeader = await getApiKey(req.headers.authorization || clientAuthFromXApiKey);
|
| } catch (error) {
|
| logError('Failed to get API key', error);
|
| return res.status(500).json({
|
| error: 'API key not available',
|
| message: 'Failed to get or refresh API key. Please check server logs.'
|
| });
|
| }
|
|
|
| const clientHeaders = req.headers;
|
|
|
|
|
| const isStreaming = anthropicRequest.stream === true;
|
| const headers = getAnthropicHeaders(authHeader, clientHeaders, isStreaming, modelId);
|
|
|
|
|
| const systemPrompt = getSystemPrompt();
|
| const modifiedRequest = { ...anthropicRequest };
|
| if (systemPrompt) {
|
| if (modifiedRequest.system && Array.isArray(modifiedRequest.system)) {
|
|
|
| modifiedRequest.system = [
|
| { type: 'text', text: systemPrompt },
|
| ...modifiedRequest.system
|
| ];
|
| } else {
|
|
|
| modifiedRequest.system = [
|
| { type: 'text', text: systemPrompt }
|
| ];
|
| }
|
| }
|
|
|
|
|
| const reasoningLevel = getModelReasoning(modelId);
|
| if (reasoningLevel === 'auto') {
|
|
|
|
|
| } else if (reasoningLevel && ['low', 'medium', 'high'].includes(reasoningLevel)) {
|
| const budgetTokens = {
|
| 'low': 4096,
|
| 'medium': 12288,
|
| 'high': 24576
|
| };
|
|
|
| modifiedRequest.thinking = {
|
| type: 'enabled',
|
| budget_tokens: budgetTokens[reasoningLevel]
|
| };
|
| } else {
|
|
|
| delete modifiedRequest.thinking;
|
| }
|
|
|
| logRequest('POST', endpoint.base_url, headers, modifiedRequest);
|
|
|
|
|
| const response = await fetch(endpoint.base_url, {
|
| method: 'POST',
|
| headers,
|
| body: JSON.stringify(modifiedRequest)
|
| });
|
|
|
| logInfo(`Response status: ${response.status}`);
|
|
|
| if (!response.ok) {
|
| const errorText = await response.text();
|
| logError(`Endpoint error: ${response.status}`, new Error(errorText));
|
| return res.status(response.status).json({
|
| error: `Endpoint returned ${response.status}`,
|
| details: errorText
|
| });
|
| }
|
|
|
| if (isStreaming) {
|
|
|
| res.setHeader('Content-Type', 'text/event-stream');
|
| res.setHeader('Cache-Control', 'no-cache');
|
| res.setHeader('Connection', 'keep-alive');
|
|
|
| try {
|
|
|
| for await (const chunk of response.body) {
|
| res.write(chunk);
|
| }
|
| res.end();
|
| logInfo('Stream forwarded successfully');
|
| } catch (streamError) {
|
| logError('Stream error', streamError);
|
| res.end();
|
| }
|
| } else {
|
|
|
| const data = await response.json();
|
| logResponse(200, null, data);
|
| res.json(data);
|
| }
|
|
|
| } catch (error) {
|
| logError('Error in /v1/messages', error);
|
| res.status(500).json({
|
| error: 'Internal server error',
|
| message: error.message
|
| });
|
| }
|
| }
|
|
|
|
|
| router.post('/v1/chat/completions', handleChatCompletions);
|
| router.post('/v1/responses', handleDirectResponses);
|
| router.post('/v1/messages', handleDirectMessages);
|
|
|
| export default router;
|
|
|