| import { Run, Providers } from '@librechat/agents'; |
| import { providerEndpointMap, KnownEndpoints } from 'librechat-data-provider'; |
| import type { |
| MultiAgentGraphConfig, |
| OpenAIClientOptions, |
| StandardGraphConfig, |
| AgentInputs, |
| GenericTool, |
| RunConfig, |
| IState, |
| } from '@librechat/agents'; |
| import type { IUser } from '@librechat/data-schemas'; |
| import type { Agent } from 'librechat-data-provider'; |
| import type * as t from '~/types'; |
| import { resolveHeaders, createSafeUser } from '~/utils/env'; |
|
|
| const customProviders = new Set([ |
| Providers.XAI, |
| Providers.DEEPSEEK, |
| Providers.OPENROUTER, |
| KnownEndpoints.ollama, |
| ]); |
|
|
| export function getReasoningKey( |
| provider: Providers, |
| llmConfig: t.RunLLMConfig, |
| agentEndpoint?: string | null, |
| ): 'reasoning_content' | 'reasoning' { |
| let reasoningKey: 'reasoning_content' | 'reasoning' = 'reasoning_content'; |
| if (provider === Providers.GOOGLE) { |
| reasoningKey = 'reasoning'; |
| } else if ( |
| llmConfig.configuration?.baseURL?.includes(KnownEndpoints.openrouter) || |
| (agentEndpoint && agentEndpoint.toLowerCase().includes(KnownEndpoints.openrouter)) |
| ) { |
| reasoningKey = 'reasoning'; |
| } else if ( |
| (llmConfig as OpenAIClientOptions).useResponsesApi === true && |
| (provider === Providers.OPENAI || provider === Providers.AZURE) |
| ) { |
| reasoningKey = 'reasoning'; |
| } |
| return reasoningKey; |
| } |
|
|
| type RunAgent = Omit<Agent, 'tools'> & { |
| tools?: GenericTool[]; |
| maxContextTokens?: number; |
| useLegacyContent?: boolean; |
| toolContextMap?: Record<string, string>; |
| }; |
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| export async function createRun({ |
| runId, |
| signal, |
| agents, |
| requestBody, |
| user, |
| tokenCounter, |
| customHandlers, |
| indexTokenCountMap, |
| streaming = true, |
| streamUsage = true, |
| }: { |
| agents: RunAgent[]; |
| signal: AbortSignal; |
| runId?: string; |
| streaming?: boolean; |
| streamUsage?: boolean; |
| requestBody?: t.RequestBody; |
| user?: IUser; |
| } & Pick<RunConfig, 'tokenCounter' | 'customHandlers' | 'indexTokenCountMap'>): Promise< |
| Run<IState> |
| > { |
| const agentInputs: AgentInputs[] = []; |
| const buildAgentContext = (agent: RunAgent) => { |
| const provider = |
| (providerEndpointMap[ |
| agent.provider as keyof typeof providerEndpointMap |
| ] as unknown as Providers) ?? agent.provider; |
|
|
| const llmConfig: t.RunLLMConfig = Object.assign( |
| { |
| provider, |
| streaming, |
| streamUsage, |
| }, |
| agent.model_parameters, |
| ); |
|
|
| const systemMessage = Object.values(agent.toolContextMap ?? {}) |
| .join('\n') |
| .trim(); |
|
|
| const systemContent = [ |
| systemMessage, |
| agent.instructions ?? '', |
| agent.additional_instructions ?? '', |
| ] |
| .join('\n') |
| .trim(); |
|
|
| |
| |
| |
| |
| |
| |
| if (llmConfig?.configuration?.defaultHeaders != null) { |
| llmConfig.configuration.defaultHeaders = resolveHeaders({ |
| headers: llmConfig.configuration.defaultHeaders as Record<string, string>, |
| user: createSafeUser(user), |
| body: requestBody, |
| }); |
| } |
|
|
| |
| if ( |
| customProviders.has(agent.provider) || |
| (agent.provider === Providers.OPENAI && agent.endpoint !== agent.provider) |
| ) { |
| llmConfig.streamUsage = false; |
| llmConfig.usage = true; |
| } |
|
|
| const reasoningKey = getReasoningKey(provider, llmConfig, agent.endpoint); |
| const agentInput: AgentInputs = { |
| provider, |
| reasoningKey, |
| agentId: agent.id, |
| tools: agent.tools, |
| clientOptions: llmConfig, |
| instructions: systemContent, |
| maxContextTokens: agent.maxContextTokens, |
| useLegacyContent: agent.useLegacyContent ?? false, |
| }; |
| agentInputs.push(agentInput); |
| }; |
|
|
| for (const agent of agents) { |
| buildAgentContext(agent); |
| } |
|
|
| const graphConfig: RunConfig['graphConfig'] = { |
| signal, |
| agents: agentInputs, |
| edges: agents[0].edges, |
| }; |
|
|
| if (agentInputs.length > 1 || ((graphConfig as MultiAgentGraphConfig).edges?.length ?? 0) > 0) { |
| (graphConfig as unknown as MultiAgentGraphConfig).type = 'multi-agent'; |
| } else { |
| (graphConfig as StandardGraphConfig).type = 'standard'; |
| } |
|
|
| return Run.create({ |
| runId, |
| graphConfig, |
| tokenCounter, |
| customHandlers, |
| indexTokenCountMap, |
| }); |
| } |
|
|