Spaces:
Paused
feat: dynamic model list fetching from Codex backend (#9)
Browse files* feat: dynamic model list fetching from Codex backend
Replace static-only model loading with a hybrid approach:
- Static YAML models as fallback baseline
- Background fetcher probes /codex/models and /models endpoints
- Backend models merge into catalog (backend wins, YAML fills gaps)
- Aliases always from YAML (user-customizable)
- New /debug/models endpoint for diagnostics
- All fetch errors non-fatal β gracefully falls back to static models
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: PR #9 review issues + complete Codex model catalog
- Remove dead _backendModelCount variable from model-store
- Fix hot-reload wiping backend models: add triggerImmediateRefresh()
in model-fetcher, chain it in reloadAllConfigs()
- Fix premature "hot-reloaded" log message ordering in config.ts
- Gate raw model sample logging to first fetch only in codex-api
- Remove dead re-exports from routes/models.ts
- Expand getModels() to probe sentinel/chat-requirements + flatten
nested category responses
- Rename ClaudeCodeSetup β AnthropicSetup with dynamic model dropdown
- Update translations (claudeCodeSetup β anthropicSetup)
- Complete Codex model catalog: 5.3/5.2/5.1 families with
base/high/mid/low/max/mini variants (23 static Codex models)
- Default model changed to gpt-5.2-codex, alias codex β gpt-5.2-codex
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
* fix: address PR #9 review issues
- Fix dead code in getModels(): scope flattened array per endpoint probe
- Replace async lazy imports in reloadAllConfigs() with direct sync imports
- Fix reasoning efforts merge using explicit flag instead of length > 1
- Add try/finally in scheduleNext() to prevent refresh loop from stopping
- Suppress noisy warn when unauthenticated by checking isAuthenticated()
- Return shallow copies from getModelCatalog()/getModelAliases()
- Rename ClaudeCodeSetup.tsx β AnthropicSetup.tsx to match export
- Use 'codex' alias instead of hardcoded 'gpt-5.2-codex' in use-status
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
---------
Co-authored-by: icebear0828 <icebear0828@users.noreply.github.com>
Co-authored-by: Claude Opus 4.6 <noreply@anthropic.com>
- config/default.yaml +1 -1
- config/models.yaml +279 -61
- src/config.ts +8 -3
- src/index.ts +9 -0
- src/models/model-fetcher.ts +117 -0
- src/models/model-store.ts +223 -0
- src/proxy/codex-api.ts +98 -0
- src/routes/gemini.ts +1 -1
- src/routes/models.ts +27 -66
- src/translation/anthropic-to-codex.ts +1 -1
- src/translation/gemini-to-codex.ts +1 -1
- src/translation/openai-to-codex.ts +1 -1
- web/src/App.tsx +4 -2
- web/src/components/{ClaudeCodeSetup.tsx β AnthropicSetup.tsx} +17 -29
- web/src/hooks/use-status.ts +1 -1
- web/src/i18n/translations.ts +4 -4
|
@@ -9,7 +9,7 @@ client:
|
|
| 9 |
arch: arm64
|
| 10 |
chromium_version: "137"
|
| 11 |
model:
|
| 12 |
-
default: gpt-5.
|
| 13 |
default_reasoning_effort: medium
|
| 14 |
suppress_desktop_directives: true
|
| 15 |
auth:
|
|
|
|
| 9 |
arch: arm64
|
| 10 |
chromium_version: "137"
|
| 11 |
model:
|
| 12 |
+
default: gpt-5.2-codex
|
| 13 |
default_reasoning_effort: medium
|
| 14 |
suppress_desktop_directives: true
|
| 15 |
auth:
|
|
@@ -1,83 +1,301 @@
|
|
| 1 |
-
# Codex model catalog
|
| 2 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
models:
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
isDefault: true
|
| 9 |
supportedReasoningEfforts:
|
| 10 |
-
- { reasoningEffort:
|
| 11 |
-
- { reasoningEffort:
|
| 12 |
-
- { reasoningEffort:
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
supportsPersonality: true
|
| 17 |
upgrade: null
|
| 18 |
|
| 19 |
-
- id:
|
| 20 |
-
displayName:
|
| 21 |
-
description:
|
| 22 |
isDefault: false
|
| 23 |
supportedReasoningEfforts:
|
| 24 |
-
- { reasoningEffort:
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
|
| 33 |
-
- id:
|
| 34 |
-
displayName:
|
| 35 |
-
description:
|
| 36 |
isDefault: false
|
| 37 |
supportedReasoningEfforts:
|
| 38 |
-
- { reasoningEffort:
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
- { reasoningEffort: "xhigh", description: "Extra high reasoning depth for complex problems" }
|
| 42 |
-
defaultReasoningEffort: "medium"
|
| 43 |
-
inputModalities: ["text", "image"]
|
| 44 |
supportsPersonality: false
|
| 45 |
-
upgrade:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 46 |
|
| 47 |
-
- id:
|
| 48 |
-
displayName:
|
| 49 |
-
description:
|
| 50 |
isDefault: false
|
| 51 |
supportedReasoningEfforts:
|
| 52 |
-
- { reasoningEffort:
|
| 53 |
-
- { reasoningEffort:
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
defaultReasoningEffort: "medium"
|
| 57 |
-
inputModalities: ["text", "image"]
|
| 58 |
supportsPersonality: false
|
| 59 |
-
upgrade:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
|
| 61 |
-
- id:
|
| 62 |
-
displayName:
|
| 63 |
-
description:
|
| 64 |
isDefault: false
|
| 65 |
supportedReasoningEfforts:
|
| 66 |
-
- { reasoningEffort:
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
inputModalities: ["text", "image"]
|
| 70 |
supportsPersonality: false
|
| 71 |
-
upgrade:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
aliases:
|
| 74 |
-
codex: "gpt-5.
|
| 75 |
-
codex-max: "gpt-5.1-codex-max"
|
| 76 |
-
codex-mini: "gpt-5.1-codex-mini"
|
| 77 |
-
# Claude Code model aliases
|
| 78 |
-
claude-opus-4-6: "gpt-5.3-codex"
|
| 79 |
-
claude-sonnet-4-6: "gpt-5.2-codex"
|
| 80 |
-
claude-haiku-4-5-20251001: "gpt-5.1-codex-mini"
|
| 81 |
-
opus: "gpt-5.3-codex"
|
| 82 |
-
sonnet: "gpt-5.2-codex"
|
| 83 |
-
haiku: "gpt-5.1-codex-mini"
|
|
|
|
| 1 |
+
# Codex model catalog
|
| 2 |
+
#
|
| 3 |
+
# Sources:
|
| 4 |
+
# 1. Static (below) β Codex-specific models (not returned by /backend-api/models)
|
| 5 |
+
# 2. Dynamic β general ChatGPT models fetched from /backend-api/models
|
| 6 |
+
#
|
| 7 |
+
# Dynamic fetch merges with static; backend entries win for shared IDs.
|
| 8 |
|
| 9 |
models:
|
| 10 |
+
# ββ GPT-5.3 Codex family ββββββββββββββββββββββββββββββββββββββββββ
|
| 11 |
+
- id: gpt-5.3-codex
|
| 12 |
+
displayName: GPT-5.3 Codex
|
| 13 |
+
description: Latest Codex flagship model
|
| 14 |
+
isDefault: false
|
| 15 |
+
supportedReasoningEfforts:
|
| 16 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 17 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 18 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 19 |
+
defaultReasoningEffort: medium
|
| 20 |
+
inputModalities: [text]
|
| 21 |
+
supportsPersonality: false
|
| 22 |
+
upgrade: null
|
| 23 |
+
|
| 24 |
+
- id: gpt-5.3-codex-high
|
| 25 |
+
displayName: GPT-5.3 Codex High
|
| 26 |
+
description: GPT-5.3 Codex β high reasoning tier
|
| 27 |
+
isDefault: false
|
| 28 |
+
supportedReasoningEfforts:
|
| 29 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 30 |
+
defaultReasoningEffort: high
|
| 31 |
+
inputModalities: [text]
|
| 32 |
+
supportsPersonality: false
|
| 33 |
+
upgrade: null
|
| 34 |
+
|
| 35 |
+
- id: gpt-5.3-codex-mid
|
| 36 |
+
displayName: GPT-5.3 Codex Mid
|
| 37 |
+
description: GPT-5.3 Codex β mid reasoning tier
|
| 38 |
+
isDefault: false
|
| 39 |
+
supportedReasoningEfforts:
|
| 40 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 41 |
+
defaultReasoningEffort: medium
|
| 42 |
+
inputModalities: [text]
|
| 43 |
+
supportsPersonality: false
|
| 44 |
+
upgrade: null
|
| 45 |
+
|
| 46 |
+
- id: gpt-5.3-codex-low
|
| 47 |
+
displayName: GPT-5.3 Codex Low
|
| 48 |
+
description: GPT-5.3 Codex β low reasoning tier (fastest)
|
| 49 |
+
isDefault: false
|
| 50 |
+
supportedReasoningEfforts:
|
| 51 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 52 |
+
defaultReasoningEffort: low
|
| 53 |
+
inputModalities: [text]
|
| 54 |
+
supportsPersonality: false
|
| 55 |
+
upgrade: null
|
| 56 |
+
|
| 57 |
+
- id: gpt-5.3-codex-max
|
| 58 |
+
displayName: GPT-5.3 Codex Max
|
| 59 |
+
description: GPT-5.3 Codex β extended context / deepest reasoning
|
| 60 |
+
isDefault: false
|
| 61 |
+
supportedReasoningEfforts:
|
| 62 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 63 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 64 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 65 |
+
defaultReasoningEffort: medium
|
| 66 |
+
inputModalities: [text]
|
| 67 |
+
supportsPersonality: false
|
| 68 |
+
upgrade: null
|
| 69 |
+
|
| 70 |
+
- id: gpt-5.3-codex-mini
|
| 71 |
+
displayName: GPT-5.3 Codex Mini
|
| 72 |
+
description: GPT-5.3 Codex β lightweight, low-latency
|
| 73 |
+
isDefault: false
|
| 74 |
+
supportedReasoningEfforts:
|
| 75 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 76 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 77 |
+
defaultReasoningEffort: low
|
| 78 |
+
inputModalities: [text]
|
| 79 |
+
supportsPersonality: false
|
| 80 |
+
upgrade: null
|
| 81 |
+
|
| 82 |
+
# ββ GPT-5.2 Codex family ββββββββββββββββββββββββββββββββββββββββββ
|
| 83 |
+
- id: gpt-5.2-codex
|
| 84 |
+
displayName: GPT-5.2 Codex
|
| 85 |
+
description: GPT-5.2 Codex flagship
|
| 86 |
isDefault: true
|
| 87 |
supportedReasoningEfforts:
|
| 88 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 89 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 90 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 91 |
+
defaultReasoningEffort: medium
|
| 92 |
+
inputModalities: [text]
|
| 93 |
+
supportsPersonality: false
|
|
|
|
| 94 |
upgrade: null
|
| 95 |
|
| 96 |
+
- id: gpt-5.2-codex-high
|
| 97 |
+
displayName: GPT-5.2 Codex High
|
| 98 |
+
description: GPT-5.2 Codex β high reasoning tier
|
| 99 |
isDefault: false
|
| 100 |
supportedReasoningEfforts:
|
| 101 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 102 |
+
defaultReasoningEffort: high
|
| 103 |
+
inputModalities: [text]
|
| 104 |
+
supportsPersonality: false
|
| 105 |
+
upgrade: null
|
| 106 |
+
|
| 107 |
+
- id: gpt-5.2-codex-mid
|
| 108 |
+
displayName: GPT-5.2 Codex Mid
|
| 109 |
+
description: GPT-5.2 Codex β mid reasoning tier
|
| 110 |
+
isDefault: false
|
| 111 |
+
supportedReasoningEfforts:
|
| 112 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 113 |
+
defaultReasoningEffort: medium
|
| 114 |
+
inputModalities: [text]
|
| 115 |
+
supportsPersonality: false
|
| 116 |
+
upgrade: null
|
| 117 |
|
| 118 |
+
- id: gpt-5.2-codex-low
|
| 119 |
+
displayName: GPT-5.2 Codex Low
|
| 120 |
+
description: GPT-5.2 Codex β low reasoning tier (fastest)
|
| 121 |
isDefault: false
|
| 122 |
supportedReasoningEfforts:
|
| 123 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 124 |
+
defaultReasoningEffort: low
|
| 125 |
+
inputModalities: [text]
|
|
|
|
|
|
|
|
|
|
| 126 |
supportsPersonality: false
|
| 127 |
+
upgrade: null
|
| 128 |
+
|
| 129 |
+
- id: gpt-5.2-codex-max
|
| 130 |
+
displayName: GPT-5.2 Codex Max
|
| 131 |
+
description: GPT-5.2 Codex β extended context / deepest reasoning
|
| 132 |
+
isDefault: false
|
| 133 |
+
supportedReasoningEfforts:
|
| 134 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 135 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 136 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 137 |
+
defaultReasoningEffort: medium
|
| 138 |
+
inputModalities: [text]
|
| 139 |
+
supportsPersonality: false
|
| 140 |
+
upgrade: null
|
| 141 |
|
| 142 |
+
- id: gpt-5.2-codex-mini
|
| 143 |
+
displayName: GPT-5.2 Codex Mini
|
| 144 |
+
description: GPT-5.2 Codex β lightweight, low-latency
|
| 145 |
isDefault: false
|
| 146 |
supportedReasoningEfforts:
|
| 147 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 148 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 149 |
+
defaultReasoningEffort: low
|
| 150 |
+
inputModalities: [text]
|
|
|
|
|
|
|
| 151 |
supportsPersonality: false
|
| 152 |
+
upgrade: null
|
| 153 |
+
|
| 154 |
+
# ββ GPT-5.1 Codex family ββββββββββββββββββββββββββββββββββββββββββ
|
| 155 |
+
- id: gpt-5.1-codex
|
| 156 |
+
displayName: GPT-5.1 Codex
|
| 157 |
+
description: GPT-5.1 Codex
|
| 158 |
+
isDefault: false
|
| 159 |
+
supportedReasoningEfforts:
|
| 160 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 161 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 162 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 163 |
+
defaultReasoningEffort: medium
|
| 164 |
+
inputModalities: [text]
|
| 165 |
+
supportsPersonality: false
|
| 166 |
+
upgrade: null
|
| 167 |
|
| 168 |
+
- id: gpt-5.1-codex-high
|
| 169 |
+
displayName: GPT-5.1 Codex High
|
| 170 |
+
description: GPT-5.1 Codex β high reasoning tier
|
| 171 |
isDefault: false
|
| 172 |
supportedReasoningEfforts:
|
| 173 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 174 |
+
defaultReasoningEffort: high
|
| 175 |
+
inputModalities: [text]
|
|
|
|
| 176 |
supportsPersonality: false
|
| 177 |
+
upgrade: null
|
| 178 |
+
|
| 179 |
+
- id: gpt-5.1-codex-mid
|
| 180 |
+
displayName: GPT-5.1 Codex Mid
|
| 181 |
+
description: GPT-5.1 Codex β mid reasoning tier
|
| 182 |
+
isDefault: false
|
| 183 |
+
supportedReasoningEfforts:
|
| 184 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 185 |
+
defaultReasoningEffort: medium
|
| 186 |
+
inputModalities: [text]
|
| 187 |
+
supportsPersonality: false
|
| 188 |
+
upgrade: null
|
| 189 |
+
|
| 190 |
+
- id: gpt-5.1-codex-low
|
| 191 |
+
displayName: GPT-5.1 Codex Low
|
| 192 |
+
description: GPT-5.1 Codex β low reasoning tier (fastest)
|
| 193 |
+
isDefault: false
|
| 194 |
+
supportedReasoningEfforts:
|
| 195 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 196 |
+
defaultReasoningEffort: low
|
| 197 |
+
inputModalities: [text]
|
| 198 |
+
supportsPersonality: false
|
| 199 |
+
upgrade: null
|
| 200 |
+
|
| 201 |
+
- id: gpt-5.1-codex-max
|
| 202 |
+
displayName: GPT-5.1 Codex Max
|
| 203 |
+
description: GPT-5.1 Codex β extended context / deepest reasoning
|
| 204 |
+
isDefault: false
|
| 205 |
+
supportedReasoningEfforts:
|
| 206 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 207 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 208 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 209 |
+
defaultReasoningEffort: medium
|
| 210 |
+
inputModalities: [text]
|
| 211 |
+
supportsPersonality: false
|
| 212 |
+
upgrade: null
|
| 213 |
+
|
| 214 |
+
- id: gpt-5.1-codex-mini
|
| 215 |
+
displayName: GPT-5.1 Codex Mini
|
| 216 |
+
description: GPT-5.1 Codex Mini β lightweight, low-latency
|
| 217 |
+
isDefault: false
|
| 218 |
+
supportedReasoningEfforts:
|
| 219 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 220 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 221 |
+
defaultReasoningEffort: low
|
| 222 |
+
inputModalities: [text]
|
| 223 |
+
supportsPersonality: false
|
| 224 |
+
upgrade: null
|
| 225 |
+
|
| 226 |
+
# ββ GPT-5 Codex (legacy) ββββββββββββββββββββββββββββββββββββββββββ
|
| 227 |
+
- id: gpt-5-codex
|
| 228 |
+
displayName: GPT-5 Codex
|
| 229 |
+
description: Original GPT-5 Codex
|
| 230 |
+
isDefault: false
|
| 231 |
+
supportedReasoningEfforts:
|
| 232 |
+
- { reasoningEffort: medium, description: "Default" }
|
| 233 |
+
defaultReasoningEffort: medium
|
| 234 |
+
inputModalities: [text]
|
| 235 |
+
supportsPersonality: false
|
| 236 |
+
upgrade: null
|
| 237 |
+
|
| 238 |
+
- id: gpt-5-codex-mini
|
| 239 |
+
displayName: GPT-5 Codex Mini
|
| 240 |
+
description: Original lightweight Codex
|
| 241 |
+
isDefault: false
|
| 242 |
+
supportedReasoningEfforts:
|
| 243 |
+
- { reasoningEffort: medium, description: "Default" }
|
| 244 |
+
defaultReasoningEffort: medium
|
| 245 |
+
inputModalities: [text]
|
| 246 |
+
supportsPersonality: false
|
| 247 |
+
upgrade: null
|
| 248 |
+
|
| 249 |
+
# ββ Base GPT models (also usable via Codex endpoint) βββββββββββββββ
|
| 250 |
+
- id: gpt-5.3
|
| 251 |
+
displayName: GPT-5.3
|
| 252 |
+
description: General-purpose GPT-5.3
|
| 253 |
+
isDefault: false
|
| 254 |
+
supportedReasoningEfforts:
|
| 255 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 256 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 257 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 258 |
+
defaultReasoningEffort: medium
|
| 259 |
+
inputModalities: [text, image]
|
| 260 |
+
supportsPersonality: true
|
| 261 |
+
upgrade: null
|
| 262 |
+
|
| 263 |
+
- id: gpt-5.2
|
| 264 |
+
displayName: GPT-5.2
|
| 265 |
+
description: General-purpose GPT-5.2
|
| 266 |
+
isDefault: false
|
| 267 |
+
supportedReasoningEfforts:
|
| 268 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 269 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 270 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 271 |
+
defaultReasoningEffort: medium
|
| 272 |
+
inputModalities: [text, image]
|
| 273 |
+
supportsPersonality: true
|
| 274 |
+
upgrade: null
|
| 275 |
+
|
| 276 |
+
- id: gpt-5.1
|
| 277 |
+
displayName: GPT-5.1
|
| 278 |
+
description: General-purpose GPT-5.1
|
| 279 |
+
isDefault: false
|
| 280 |
+
supportedReasoningEfforts:
|
| 281 |
+
- { reasoningEffort: low, description: "Fastest responses" }
|
| 282 |
+
- { reasoningEffort: medium, description: "Balanced speed and quality" }
|
| 283 |
+
- { reasoningEffort: high, description: "Deepest reasoning" }
|
| 284 |
+
defaultReasoningEffort: medium
|
| 285 |
+
inputModalities: [text, image]
|
| 286 |
+
supportsPersonality: true
|
| 287 |
+
upgrade: null
|
| 288 |
+
|
| 289 |
+
- id: gpt-5
|
| 290 |
+
displayName: GPT-5
|
| 291 |
+
description: General-purpose GPT-5
|
| 292 |
+
isDefault: false
|
| 293 |
+
supportedReasoningEfforts:
|
| 294 |
+
- { reasoningEffort: medium, description: "Default" }
|
| 295 |
+
defaultReasoningEffort: medium
|
| 296 |
+
inputModalities: [text, image]
|
| 297 |
+
supportsPersonality: true
|
| 298 |
+
upgrade: null
|
| 299 |
|
| 300 |
aliases:
|
| 301 |
+
codex: "gpt-5.2-codex"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
@@ -2,6 +2,8 @@ import { readFileSync } from "fs";
|
|
| 2 |
import { resolve } from "path";
|
| 3 |
import yaml from "js-yaml";
|
| 4 |
import { z } from "zod";
|
|
|
|
|
|
|
| 5 |
|
| 6 |
const ConfigSchema = z.object({
|
| 7 |
api: z.object({
|
|
@@ -17,7 +19,7 @@ const ConfigSchema = z.object({
|
|
| 17 |
chromium_version: z.string().default("136"),
|
| 18 |
}),
|
| 19 |
model: z.object({
|
| 20 |
-
default: z.string().default("gpt-5.
|
| 21 |
default_reasoning_effort: z.string().default("medium"),
|
| 22 |
suppress_desktop_directives: z.boolean().default(true),
|
| 23 |
}),
|
|
@@ -148,9 +150,12 @@ export function reloadFingerprint(configDir?: string): FingerprintConfig {
|
|
| 148 |
return _fingerprint;
|
| 149 |
}
|
| 150 |
|
| 151 |
-
/** Reload both config and fingerprint from disk. */
|
| 152 |
export function reloadAllConfigs(configDir?: string): void {
|
| 153 |
reloadConfig(configDir);
|
| 154 |
reloadFingerprint(configDir);
|
| 155 |
-
|
|
|
|
|
|
|
|
|
|
| 156 |
}
|
|
|
|
| 2 |
import { resolve } from "path";
|
| 3 |
import yaml from "js-yaml";
|
| 4 |
import { z } from "zod";
|
| 5 |
+
import { loadStaticModels } from "./models/model-store.js";
|
| 6 |
+
import { triggerImmediateRefresh } from "./models/model-fetcher.js";
|
| 7 |
|
| 8 |
const ConfigSchema = z.object({
|
| 9 |
api: z.object({
|
|
|
|
| 19 |
chromium_version: z.string().default("136"),
|
| 20 |
}),
|
| 21 |
model: z.object({
|
| 22 |
+
default: z.string().default("gpt-5.2-codex"),
|
| 23 |
default_reasoning_effort: z.string().default("medium"),
|
| 24 |
suppress_desktop_directives: z.boolean().default(true),
|
| 25 |
}),
|
|
|
|
| 150 |
return _fingerprint;
|
| 151 |
}
|
| 152 |
|
| 153 |
+
/** Reload both config and fingerprint from disk, plus static models. */
|
| 154 |
export function reloadAllConfigs(configDir?: string): void {
|
| 155 |
reloadConfig(configDir);
|
| 156 |
reloadFingerprint(configDir);
|
| 157 |
+
loadStaticModels(configDir);
|
| 158 |
+
console.log("[Config] Hot-reloaded config, fingerprint, and models from disk");
|
| 159 |
+
// Re-merge backend models so hot-reload doesn't wipe them for ~1h
|
| 160 |
+
triggerImmediateRefresh();
|
| 161 |
}
|
|
@@ -18,6 +18,8 @@ import { CookieJar } from "./proxy/cookie-jar.js";
|
|
| 18 |
import { startUpdateChecker, stopUpdateChecker } from "./update-checker.js";
|
| 19 |
import { initProxy } from "./tls/curl-binary.js";
|
| 20 |
import { initTransport } from "./tls/transport.js";
|
|
|
|
|
|
|
| 21 |
|
| 22 |
async function main() {
|
| 23 |
// Load configuration
|
|
@@ -33,6 +35,9 @@ async function main() {
|
|
| 33 |
process.exit(1);
|
| 34 |
}
|
| 35 |
|
|
|
|
|
|
|
|
|
|
| 36 |
// Detect proxy (config > env > auto-detect local ports)
|
| 37 |
await initProxy();
|
| 38 |
|
|
@@ -100,6 +105,9 @@ async function main() {
|
|
| 100 |
// Start background update checker
|
| 101 |
startUpdateChecker();
|
| 102 |
|
|
|
|
|
|
|
|
|
|
| 103 |
const server = serve({
|
| 104 |
fetch: app.fetch,
|
| 105 |
hostname: host,
|
|
@@ -138,6 +146,7 @@ async function main() {
|
|
| 138 |
cleanupDone = true;
|
| 139 |
try {
|
| 140 |
stopUpdateChecker();
|
|
|
|
| 141 |
refreshScheduler.destroy();
|
| 142 |
sessionManager.destroy();
|
| 143 |
cookieJar.destroy();
|
|
|
|
| 18 |
import { startUpdateChecker, stopUpdateChecker } from "./update-checker.js";
|
| 19 |
import { initProxy } from "./tls/curl-binary.js";
|
| 20 |
import { initTransport } from "./tls/transport.js";
|
| 21 |
+
import { loadStaticModels } from "./models/model-store.js";
|
| 22 |
+
import { startModelRefresh, stopModelRefresh } from "./models/model-fetcher.js";
|
| 23 |
|
| 24 |
async function main() {
|
| 25 |
// Load configuration
|
|
|
|
| 35 |
process.exit(1);
|
| 36 |
}
|
| 37 |
|
| 38 |
+
// Load static model catalog (before transport/auth init)
|
| 39 |
+
loadStaticModels();
|
| 40 |
+
|
| 41 |
// Detect proxy (config > env > auto-detect local ports)
|
| 42 |
await initProxy();
|
| 43 |
|
|
|
|
| 105 |
// Start background update checker
|
| 106 |
startUpdateChecker();
|
| 107 |
|
| 108 |
+
// Start background model refresh (requires auth to be ready)
|
| 109 |
+
startModelRefresh(accountPool, cookieJar);
|
| 110 |
+
|
| 111 |
const server = serve({
|
| 112 |
fetch: app.fetch,
|
| 113 |
hostname: host,
|
|
|
|
| 146 |
cleanupDone = true;
|
| 147 |
try {
|
| 148 |
stopUpdateChecker();
|
| 149 |
+
stopModelRefresh();
|
| 150 |
refreshScheduler.destroy();
|
| 151 |
sessionManager.destroy();
|
| 152 |
cookieJar.destroy();
|
|
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Model Fetcher β background model list refresh from Codex backend.
|
| 3 |
+
*
|
| 4 |
+
* - Probes known endpoints to discover the models list
|
| 5 |
+
* - Normalizes and merges into the model store
|
| 6 |
+
* - Non-fatal: all errors log warnings but never crash the server
|
| 7 |
+
*/
|
| 8 |
+
|
| 9 |
+
import { CodexApi } from "../proxy/codex-api.js";
|
| 10 |
+
import { applyBackendModels, type BackendModelEntry } from "./model-store.js";
|
| 11 |
+
import type { AccountPool } from "../auth/account-pool.js";
|
| 12 |
+
import type { CookieJar } from "../proxy/cookie-jar.js";
|
| 13 |
+
import { jitter } from "../utils/jitter.js";
|
| 14 |
+
|
| 15 |
+
const REFRESH_INTERVAL_HOURS = 1;
|
| 16 |
+
const INITIAL_DELAY_MS = 5_000; // 5s after startup
|
| 17 |
+
|
| 18 |
+
let _refreshTimer: ReturnType<typeof setTimeout> | null = null;
|
| 19 |
+
let _accountPool: AccountPool | null = null;
|
| 20 |
+
let _cookieJar: CookieJar | null = null;
|
| 21 |
+
|
| 22 |
+
/**
|
| 23 |
+
* Fetch models from the Codex backend using an available account.
|
| 24 |
+
*/
|
| 25 |
+
async function fetchModelsFromBackend(
|
| 26 |
+
accountPool: AccountPool,
|
| 27 |
+
cookieJar: CookieJar,
|
| 28 |
+
): Promise<void> {
|
| 29 |
+
if (!accountPool.isAuthenticated()) return; // silently skip when no accounts
|
| 30 |
+
|
| 31 |
+
const acquired = accountPool.acquire();
|
| 32 |
+
if (!acquired) {
|
| 33 |
+
console.warn("[ModelFetcher] No available account β skipping model fetch");
|
| 34 |
+
return;
|
| 35 |
+
}
|
| 36 |
+
|
| 37 |
+
try {
|
| 38 |
+
const api = new CodexApi(
|
| 39 |
+
acquired.token,
|
| 40 |
+
acquired.accountId,
|
| 41 |
+
cookieJar,
|
| 42 |
+
acquired.entryId,
|
| 43 |
+
);
|
| 44 |
+
|
| 45 |
+
const models = await api.getModels();
|
| 46 |
+
if (models && models.length > 0) {
|
| 47 |
+
applyBackendModels(models);
|
| 48 |
+
console.log(`[ModelFetcher] Fetched ${models.length} models from backend`);
|
| 49 |
+
} else {
|
| 50 |
+
console.log("[ModelFetcher] Backend returned empty model list β keeping static models");
|
| 51 |
+
}
|
| 52 |
+
} catch (err) {
|
| 53 |
+
const msg = err instanceof Error ? err.message : String(err);
|
| 54 |
+
console.warn(`[ModelFetcher] Backend fetch failed: ${msg}`);
|
| 55 |
+
} finally {
|
| 56 |
+
accountPool.release(acquired.entryId);
|
| 57 |
+
}
|
| 58 |
+
}
|
| 59 |
+
|
| 60 |
+
/**
|
| 61 |
+
* Start the background model refresh loop.
|
| 62 |
+
* - First fetch after a short delay (auth must be ready)
|
| 63 |
+
* - Subsequent fetches every ~1 hour with jitter
|
| 64 |
+
*/
|
| 65 |
+
export function startModelRefresh(
|
| 66 |
+
accountPool: AccountPool,
|
| 67 |
+
cookieJar: CookieJar,
|
| 68 |
+
): void {
|
| 69 |
+
_accountPool = accountPool;
|
| 70 |
+
_cookieJar = cookieJar;
|
| 71 |
+
|
| 72 |
+
// Initial fetch after short delay
|
| 73 |
+
_refreshTimer = setTimeout(async () => {
|
| 74 |
+
await fetchModelsFromBackend(accountPool, cookieJar);
|
| 75 |
+
scheduleNext(accountPool, cookieJar);
|
| 76 |
+
}, INITIAL_DELAY_MS);
|
| 77 |
+
|
| 78 |
+
console.log("[ModelFetcher] Scheduled initial model fetch in 5s");
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
function scheduleNext(
|
| 82 |
+
accountPool: AccountPool,
|
| 83 |
+
cookieJar: CookieJar,
|
| 84 |
+
): void {
|
| 85 |
+
const intervalMs = jitter(REFRESH_INTERVAL_HOURS * 3600 * 1000, 0.15);
|
| 86 |
+
_refreshTimer = setTimeout(async () => {
|
| 87 |
+
try {
|
| 88 |
+
await fetchModelsFromBackend(accountPool, cookieJar);
|
| 89 |
+
} finally {
|
| 90 |
+
scheduleNext(accountPool, cookieJar);
|
| 91 |
+
}
|
| 92 |
+
}, intervalMs);
|
| 93 |
+
}
|
| 94 |
+
|
| 95 |
+
/**
|
| 96 |
+
* Trigger an immediate model refresh (e.g. after hot-reload).
|
| 97 |
+
* No-op if startModelRefresh() hasn't been called yet.
|
| 98 |
+
*/
|
| 99 |
+
export function triggerImmediateRefresh(): void {
|
| 100 |
+
if (_accountPool && _cookieJar) {
|
| 101 |
+
fetchModelsFromBackend(_accountPool, _cookieJar).catch((err) => {
|
| 102 |
+
const msg = err instanceof Error ? err.message : String(err);
|
| 103 |
+
console.warn(`[ModelFetcher] Immediate refresh failed: ${msg}`);
|
| 104 |
+
});
|
| 105 |
+
}
|
| 106 |
+
}
|
| 107 |
+
|
| 108 |
+
/**
|
| 109 |
+
* Stop the background refresh timer.
|
| 110 |
+
*/
|
| 111 |
+
export function stopModelRefresh(): void {
|
| 112 |
+
if (_refreshTimer) {
|
| 113 |
+
clearTimeout(_refreshTimer);
|
| 114 |
+
_refreshTimer = null;
|
| 115 |
+
console.log("[ModelFetcher] Stopped model refresh");
|
| 116 |
+
}
|
| 117 |
+
}
|
|
@@ -0,0 +1,223 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
/**
|
| 2 |
+
* Model Store β mutable singleton for model catalog + aliases.
|
| 3 |
+
*
|
| 4 |
+
* Data flow:
|
| 5 |
+
* 1. loadStaticModels() β load from config/models.yaml (fallback baseline)
|
| 6 |
+
* 2. applyBackendModels() β merge backend-fetched models (backend wins for shared IDs)
|
| 7 |
+
* 3. getters β runtime reads from mutable state
|
| 8 |
+
*
|
| 9 |
+
* Aliases always come from YAML (user-customizable), never from backend.
|
| 10 |
+
*/
|
| 11 |
+
|
| 12 |
+
import { readFileSync } from "fs";
|
| 13 |
+
import { resolve } from "path";
|
| 14 |
+
import yaml from "js-yaml";
|
| 15 |
+
import { getConfig } from "../config.js";
|
| 16 |
+
|
| 17 |
+
export interface CodexModelInfo {
|
| 18 |
+
id: string;
|
| 19 |
+
displayName: string;
|
| 20 |
+
description: string;
|
| 21 |
+
isDefault: boolean;
|
| 22 |
+
supportedReasoningEfforts: { reasoningEffort: string; description: string }[];
|
| 23 |
+
defaultReasoningEffort: string;
|
| 24 |
+
inputModalities: string[];
|
| 25 |
+
supportsPersonality: boolean;
|
| 26 |
+
upgrade: string | null;
|
| 27 |
+
/** Where this model entry came from */
|
| 28 |
+
source?: "static" | "backend";
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
interface ModelsConfig {
|
| 32 |
+
models: CodexModelInfo[];
|
| 33 |
+
aliases: Record<string, string>;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// ββ Mutable state ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 37 |
+
|
| 38 |
+
let _catalog: CodexModelInfo[] = [];
|
| 39 |
+
let _aliases: Record<string, string> = {};
|
| 40 |
+
let _lastFetchTime: string | null = null;
|
| 41 |
+
|
| 42 |
+
// ββ Static loading βββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 43 |
+
|
| 44 |
+
/**
|
| 45 |
+
* Load models from config/models.yaml (synchronous).
|
| 46 |
+
* Called at startup and on hot-reload.
|
| 47 |
+
*/
|
| 48 |
+
export function loadStaticModels(configDir?: string): void {
|
| 49 |
+
const dir = configDir ?? resolve(process.cwd(), "config");
|
| 50 |
+
const configPath = resolve(dir, "models.yaml");
|
| 51 |
+
const raw = yaml.load(readFileSync(configPath, "utf-8")) as ModelsConfig;
|
| 52 |
+
|
| 53 |
+
_catalog = (raw.models ?? []).map((m) => ({ ...m, source: "static" as const }));
|
| 54 |
+
_aliases = raw.aliases ?? {};
|
| 55 |
+
console.log(`[ModelStore] Loaded ${_catalog.length} static models, ${Object.keys(_aliases).length} aliases`);
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// ββ Backend merge ββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 59 |
+
|
| 60 |
+
/**
|
| 61 |
+
* Raw model entry from backend (fields are optional β format may vary).
|
| 62 |
+
*/
|
| 63 |
+
export interface BackendModelEntry {
|
| 64 |
+
slug?: string;
|
| 65 |
+
id?: string;
|
| 66 |
+
name?: string;
|
| 67 |
+
display_name?: string;
|
| 68 |
+
description?: string;
|
| 69 |
+
is_default?: boolean;
|
| 70 |
+
default_reasoning_effort?: string;
|
| 71 |
+
supported_reasoning_efforts?: Array<{
|
| 72 |
+
reasoning_effort?: string;
|
| 73 |
+
reasoningEffort?: string;
|
| 74 |
+
description?: string;
|
| 75 |
+
}>;
|
| 76 |
+
input_modalities?: string[];
|
| 77 |
+
supports_personality?: boolean;
|
| 78 |
+
upgrade?: string | null;
|
| 79 |
+
}
|
| 80 |
+
|
| 81 |
+
/** Intermediate type with explicit efforts flag for merge logic. */
|
| 82 |
+
interface NormalizedModelWithMeta extends CodexModelInfo {
|
| 83 |
+
_hasExplicitEfforts: boolean;
|
| 84 |
+
}
|
| 85 |
+
|
| 86 |
+
/**
|
| 87 |
+
* Normalize a backend model entry to our CodexModelInfo format.
|
| 88 |
+
*/
|
| 89 |
+
function normalizeBackendModel(raw: BackendModelEntry): NormalizedModelWithMeta {
|
| 90 |
+
const id = raw.slug ?? raw.id ?? raw.name ?? "unknown";
|
| 91 |
+
|
| 92 |
+
const hasExplicitEfforts = Array.isArray(raw.supported_reasoning_efforts) && raw.supported_reasoning_efforts.length > 0;
|
| 93 |
+
|
| 94 |
+
// Normalize reasoning efforts β accept both snake_case and camelCase
|
| 95 |
+
const efforts = (raw.supported_reasoning_efforts ?? []).map((e) => ({
|
| 96 |
+
reasoningEffort: e.reasoningEffort ?? e.reasoning_effort ?? "medium",
|
| 97 |
+
description: e.description ?? "",
|
| 98 |
+
}));
|
| 99 |
+
|
| 100 |
+
return {
|
| 101 |
+
id,
|
| 102 |
+
displayName: raw.display_name ?? raw.name ?? id,
|
| 103 |
+
description: raw.description ?? "",
|
| 104 |
+
isDefault: raw.is_default ?? false,
|
| 105 |
+
supportedReasoningEfforts: efforts.length > 0
|
| 106 |
+
? efforts
|
| 107 |
+
: [{ reasoningEffort: "medium", description: "Default" }],
|
| 108 |
+
defaultReasoningEffort: raw.default_reasoning_effort ?? "medium",
|
| 109 |
+
inputModalities: raw.input_modalities ?? ["text"],
|
| 110 |
+
supportsPersonality: raw.supports_personality ?? false,
|
| 111 |
+
upgrade: raw.upgrade ?? null,
|
| 112 |
+
source: "backend",
|
| 113 |
+
_hasExplicitEfforts: hasExplicitEfforts,
|
| 114 |
+
};
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
/**
|
| 118 |
+
* Merge backend models into the catalog.
|
| 119 |
+
*
|
| 120 |
+
* Strategy:
|
| 121 |
+
* - Backend models overwrite static models with the same ID
|
| 122 |
+
* (but YAML fields fill in missing backend fields)
|
| 123 |
+
* - Static-only models are preserved (YAML may know about models the backend doesn't list)
|
| 124 |
+
* - Aliases are never touched (always from YAML)
|
| 125 |
+
*/
|
| 126 |
+
export function applyBackendModels(backendModels: BackendModelEntry[]): void {
|
| 127 |
+
const staticMap = new Map(_catalog.map((m) => [m.id, m]));
|
| 128 |
+
const merged: CodexModelInfo[] = [];
|
| 129 |
+
const seenIds = new Set<string>();
|
| 130 |
+
|
| 131 |
+
for (const raw of backendModels) {
|
| 132 |
+
const normalized = normalizeBackendModel(raw);
|
| 133 |
+
seenIds.add(normalized.id);
|
| 134 |
+
|
| 135 |
+
const existing = staticMap.get(normalized.id);
|
| 136 |
+
// Strip internal meta field before storing
|
| 137 |
+
const { _hasExplicitEfforts, ...model } = normalized;
|
| 138 |
+
if (existing) {
|
| 139 |
+
// Backend wins, but YAML fills gaps
|
| 140 |
+
merged.push({
|
| 141 |
+
...existing,
|
| 142 |
+
...model,
|
| 143 |
+
// Preserve YAML fields if backend is empty
|
| 144 |
+
description: model.description || existing.description,
|
| 145 |
+
displayName: model.displayName || existing.displayName,
|
| 146 |
+
supportedReasoningEfforts: _hasExplicitEfforts
|
| 147 |
+
? model.supportedReasoningEfforts
|
| 148 |
+
: existing.supportedReasoningEfforts,
|
| 149 |
+
source: "backend",
|
| 150 |
+
});
|
| 151 |
+
} else {
|
| 152 |
+
merged.push(model);
|
| 153 |
+
}
|
| 154 |
+
}
|
| 155 |
+
|
| 156 |
+
// Preserve static-only models (not in backend)
|
| 157 |
+
for (const m of _catalog) {
|
| 158 |
+
if (!seenIds.has(m.id)) {
|
| 159 |
+
merged.push({ ...m, source: "static" });
|
| 160 |
+
}
|
| 161 |
+
}
|
| 162 |
+
|
| 163 |
+
_catalog = merged;
|
| 164 |
+
_lastFetchTime = new Date().toISOString();
|
| 165 |
+
console.log(
|
| 166 |
+
`[ModelStore] Merged ${backendModels.length} backend + ${merged.length - backendModels.length} static-only = ${merged.length} total models`,
|
| 167 |
+
);
|
| 168 |
+
}
|
| 169 |
+
|
| 170 |
+
// ββ Getters ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
|
| 171 |
+
|
| 172 |
+
/**
|
| 173 |
+
* Resolve a model name (may be an alias) to a canonical model ID.
|
| 174 |
+
*/
|
| 175 |
+
export function resolveModelId(input: string): string {
|
| 176 |
+
const trimmed = input.trim();
|
| 177 |
+
if (_aliases[trimmed]) return _aliases[trimmed];
|
| 178 |
+
if (_catalog.some((m) => m.id === trimmed)) return trimmed;
|
| 179 |
+
return getConfig().model.default;
|
| 180 |
+
}
|
| 181 |
+
|
| 182 |
+
/**
|
| 183 |
+
* Get model info by ID.
|
| 184 |
+
*/
|
| 185 |
+
export function getModelInfo(modelId: string): CodexModelInfo | undefined {
|
| 186 |
+
return _catalog.find((m) => m.id === modelId);
|
| 187 |
+
}
|
| 188 |
+
|
| 189 |
+
/**
|
| 190 |
+
* Get the full model catalog.
|
| 191 |
+
*/
|
| 192 |
+
export function getModelCatalog(): CodexModelInfo[] {
|
| 193 |
+
return [..._catalog];
|
| 194 |
+
}
|
| 195 |
+
|
| 196 |
+
/**
|
| 197 |
+
* Get the alias map.
|
| 198 |
+
*/
|
| 199 |
+
export function getModelAliases(): Record<string, string> {
|
| 200 |
+
return { ..._aliases };
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
/**
|
| 204 |
+
* Debug info for /debug/models endpoint.
|
| 205 |
+
*/
|
| 206 |
+
export function getModelStoreDebug(): {
|
| 207 |
+
totalModels: number;
|
| 208 |
+
backendModels: number;
|
| 209 |
+
staticOnlyModels: number;
|
| 210 |
+
aliasCount: number;
|
| 211 |
+
lastFetchTime: string | null;
|
| 212 |
+
models: Array<{ id: string; source: string }>;
|
| 213 |
+
} {
|
| 214 |
+
const backendCount = _catalog.filter((m) => m.source === "backend").length;
|
| 215 |
+
return {
|
| 216 |
+
totalModels: _catalog.length,
|
| 217 |
+
backendModels: backendCount,
|
| 218 |
+
staticOnlyModels: _catalog.length - backendCount,
|
| 219 |
+
aliasCount: Object.keys(_aliases).length,
|
| 220 |
+
lastFetchTime: _lastFetchTime,
|
| 221 |
+
models: _catalog.map((m) => ({ id: m.id, source: m.source ?? "static" })),
|
| 222 |
+
};
|
| 223 |
+
}
|
|
@@ -16,6 +16,9 @@ import {
|
|
| 16 |
buildHeadersWithContentType,
|
| 17 |
} from "../fingerprint/manager.js";
|
| 18 |
import type { CookieJar } from "./cookie-jar.js";
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
export interface CodexResponsesRequest {
|
| 21 |
model: string;
|
|
@@ -125,6 +128,101 @@ export class CodexApi {
|
|
| 125 |
}
|
| 126 |
}
|
| 127 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 128 |
/**
|
| 129 |
* Create a response (streaming).
|
| 130 |
* Returns the raw Response so the caller can process the SSE stream.
|
|
|
|
| 16 |
buildHeadersWithContentType,
|
| 17 |
} from "../fingerprint/manager.js";
|
| 18 |
import type { CookieJar } from "./cookie-jar.js";
|
| 19 |
+
import type { BackendModelEntry } from "../models/model-store.js";
|
| 20 |
+
|
| 21 |
+
let _firstModelFetchLogged = false;
|
| 22 |
|
| 23 |
export interface CodexResponsesRequest {
|
| 24 |
model: string;
|
|
|
|
| 128 |
}
|
| 129 |
}
|
| 130 |
|
| 131 |
+
/**
|
| 132 |
+
* Fetch available models from the Codex backend.
|
| 133 |
+
* Probes known endpoints; returns null if none respond.
|
| 134 |
+
*/
|
| 135 |
+
async getModels(): Promise<BackendModelEntry[] | null> {
|
| 136 |
+
const config = getConfig();
|
| 137 |
+
const transport = getTransport();
|
| 138 |
+
const baseUrl = config.api.base_url;
|
| 139 |
+
|
| 140 |
+
// Endpoints to probe (most specific first)
|
| 141 |
+
const endpoints = [
|
| 142 |
+
`${baseUrl}/codex/models`,
|
| 143 |
+
`${baseUrl}/models`,
|
| 144 |
+
`${baseUrl}/sentinel/chat-requirements`,
|
| 145 |
+
];
|
| 146 |
+
|
| 147 |
+
const headers = this.applyHeaders(
|
| 148 |
+
buildHeaders(this.token, this.accountId),
|
| 149 |
+
);
|
| 150 |
+
headers["Accept"] = "application/json";
|
| 151 |
+
if (!transport.isImpersonate()) {
|
| 152 |
+
headers["Accept-Encoding"] = "gzip, deflate";
|
| 153 |
+
}
|
| 154 |
+
|
| 155 |
+
for (const url of endpoints) {
|
| 156 |
+
try {
|
| 157 |
+
const result = await transport.get(url, headers, 15);
|
| 158 |
+
const parsed = JSON.parse(result.body) as Record<string, unknown>;
|
| 159 |
+
|
| 160 |
+
// sentinel/chat-requirements returns { chat_models: { models: [...], ... } }
|
| 161 |
+
const sentinel = parsed.chat_models as Record<string, unknown> | undefined;
|
| 162 |
+
const models = sentinel?.models ?? parsed.models ?? parsed.data ?? parsed.categories;
|
| 163 |
+
if (Array.isArray(models) && models.length > 0) {
|
| 164 |
+
console.log(`[CodexApi] getModels() found ${models.length} entries from ${url}`);
|
| 165 |
+
if (!_firstModelFetchLogged) {
|
| 166 |
+
console.log(`[CodexApi] Raw response keys: ${Object.keys(parsed).join(", ")}`);
|
| 167 |
+
console.log(`[CodexApi] Raw model sample: ${JSON.stringify(models[0]).slice(0, 500)}`);
|
| 168 |
+
if (models.length > 1) {
|
| 169 |
+
console.log(`[CodexApi] Raw model sample[1]: ${JSON.stringify(models[1]).slice(0, 500)}`);
|
| 170 |
+
}
|
| 171 |
+
_firstModelFetchLogged = true;
|
| 172 |
+
}
|
| 173 |
+
// Flatten nested categories into a single list
|
| 174 |
+
const flattened: BackendModelEntry[] = [];
|
| 175 |
+
for (const item of models) {
|
| 176 |
+
if (item && typeof item === "object") {
|
| 177 |
+
const entry = item as Record<string, unknown>;
|
| 178 |
+
if (Array.isArray(entry.models)) {
|
| 179 |
+
for (const sub of entry.models as BackendModelEntry[]) {
|
| 180 |
+
flattened.push(sub);
|
| 181 |
+
}
|
| 182 |
+
} else {
|
| 183 |
+
flattened.push(item as BackendModelEntry);
|
| 184 |
+
}
|
| 185 |
+
}
|
| 186 |
+
}
|
| 187 |
+
if (flattened.length > 0) {
|
| 188 |
+
console.log(`[CodexApi] getModels() total after flatten: ${flattened.length} models`);
|
| 189 |
+
return flattened;
|
| 190 |
+
}
|
| 191 |
+
}
|
| 192 |
+
} catch (err) {
|
| 193 |
+
const msg = err instanceof Error ? err.message : String(err);
|
| 194 |
+
console.log(`[CodexApi] Probe ${url} failed: ${msg}`);
|
| 195 |
+
continue;
|
| 196 |
+
}
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
return null;
|
| 200 |
+
}
|
| 201 |
+
|
| 202 |
+
/**
|
| 203 |
+
* Probe a backend endpoint and return raw JSON (for debug).
|
| 204 |
+
*/
|
| 205 |
+
async probeEndpoint(path: string): Promise<Record<string, unknown> | null> {
|
| 206 |
+
const config = getConfig();
|
| 207 |
+
const transport = getTransport();
|
| 208 |
+
const url = `${config.api.base_url}${path}`;
|
| 209 |
+
|
| 210 |
+
const headers = this.applyHeaders(
|
| 211 |
+
buildHeaders(this.token, this.accountId),
|
| 212 |
+
);
|
| 213 |
+
headers["Accept"] = "application/json";
|
| 214 |
+
if (!transport.isImpersonate()) {
|
| 215 |
+
headers["Accept-Encoding"] = "gzip, deflate";
|
| 216 |
+
}
|
| 217 |
+
|
| 218 |
+
try {
|
| 219 |
+
const result = await transport.get(url, headers, 15);
|
| 220 |
+
return JSON.parse(result.body) as Record<string, unknown>;
|
| 221 |
+
} catch {
|
| 222 |
+
return null;
|
| 223 |
+
}
|
| 224 |
+
}
|
| 225 |
+
|
| 226 |
/**
|
| 227 |
* Create a response (streaming).
|
| 228 |
* Returns the raw Response so the caller can process the SSE stream.
|
|
@@ -21,7 +21,7 @@ import {
|
|
| 21 |
collectCodexToGeminiResponse,
|
| 22 |
} from "../translation/codex-to-gemini.js";
|
| 23 |
import { getConfig } from "../config.js";
|
| 24 |
-
import { getModelCatalog } from "./models.js";
|
| 25 |
import {
|
| 26 |
handleProxyRequest,
|
| 27 |
type FormatAdapter,
|
|
|
|
| 21 |
collectCodexToGeminiResponse,
|
| 22 |
} from "../translation/codex-to-gemini.js";
|
| 23 |
import { getConfig } from "../config.js";
|
| 24 |
+
import { getModelCatalog } from "../models/model-store.js";
|
| 25 |
import {
|
| 26 |
handleProxyRequest,
|
| 27 |
type FormatAdapter,
|
|
@@ -1,66 +1,16 @@
|
|
| 1 |
-
import { Hono } from "hono";
|
| 2 |
-
import { readFileSync } from "fs";
|
| 3 |
-
import { resolve } from "path";
|
| 4 |
-
import yaml from "js-yaml";
|
| 5 |
-
import { getConfig } from "../config.js";
|
| 6 |
-
import type { OpenAIModel, OpenAIModelList } from "../types/openai.js";
|
| 7 |
-
|
| 8 |
/**
|
| 9 |
-
*
|
| 10 |
-
* Each model has reasoning effort levels, description, and capabilities.
|
| 11 |
*/
|
| 12 |
-
export interface CodexModelInfo {
|
| 13 |
-
id: string;
|
| 14 |
-
displayName: string;
|
| 15 |
-
description: string;
|
| 16 |
-
isDefault: boolean;
|
| 17 |
-
supportedReasoningEfforts: { reasoningEffort: string; description: string }[];
|
| 18 |
-
defaultReasoningEffort: string;
|
| 19 |
-
inputModalities: string[];
|
| 20 |
-
supportsPersonality: boolean;
|
| 21 |
-
upgrade: string | null;
|
| 22 |
-
}
|
| 23 |
-
|
| 24 |
-
interface ModelsConfig {
|
| 25 |
-
models: CodexModelInfo[];
|
| 26 |
-
aliases: Record<string, string>;
|
| 27 |
-
}
|
| 28 |
-
|
| 29 |
-
function loadModelConfig(): ModelsConfig {
|
| 30 |
-
const configPath = resolve(process.cwd(), "config/models.yaml");
|
| 31 |
-
const raw = yaml.load(readFileSync(configPath, "utf-8")) as ModelsConfig;
|
| 32 |
-
return raw;
|
| 33 |
-
}
|
| 34 |
-
|
| 35 |
-
const modelConfig = loadModelConfig();
|
| 36 |
-
const MODEL_CATALOG: CodexModelInfo[] = modelConfig.models;
|
| 37 |
-
const MODEL_ALIASES: Record<string, string> = modelConfig.aliases;
|
| 38 |
-
|
| 39 |
-
/**
|
| 40 |
-
* Resolve a model name (may be an alias) to a canonical model ID.
|
| 41 |
-
*/
|
| 42 |
-
export function resolveModelId(input: string): string {
|
| 43 |
-
const trimmed = input.trim();
|
| 44 |
-
if (MODEL_ALIASES[trimmed]) return MODEL_ALIASES[trimmed];
|
| 45 |
-
// Check if it's already a known model ID
|
| 46 |
-
if (MODEL_CATALOG.some((m) => m.id === trimmed)) return trimmed;
|
| 47 |
-
// Fall back to config default
|
| 48 |
-
return getConfig().model.default;
|
| 49 |
-
}
|
| 50 |
|
| 51 |
-
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
*/
|
| 61 |
-
export function getModelCatalog(): CodexModelInfo[] {
|
| 62 |
-
return MODEL_CATALOG;
|
| 63 |
-
}
|
| 64 |
|
| 65 |
// --- Routes ---
|
| 66 |
|
|
@@ -80,9 +30,12 @@ export function createModelRoutes(): Hono {
|
|
| 80 |
const app = new Hono();
|
| 81 |
|
| 82 |
app.get("/v1/models", (c) => {
|
|
|
|
|
|
|
|
|
|
| 83 |
// Include catalog models + aliases as separate entries
|
| 84 |
-
const models: OpenAIModel[] =
|
| 85 |
-
for (const
|
| 86 |
models.push({
|
| 87 |
id: alias,
|
| 88 |
object: "model",
|
|
@@ -96,13 +49,15 @@ export function createModelRoutes(): Hono {
|
|
| 96 |
|
| 97 |
app.get("/v1/models/:modelId", (c) => {
|
| 98 |
const modelId = c.req.param("modelId");
|
|
|
|
|
|
|
| 99 |
|
| 100 |
// Try direct match
|
| 101 |
-
const info =
|
| 102 |
if (info) return c.json(toOpenAIModel(info));
|
| 103 |
|
| 104 |
// Try alias
|
| 105 |
-
const resolved =
|
| 106 |
if (resolved) {
|
| 107 |
return c.json({
|
| 108 |
id: modelId,
|
|
@@ -126,8 +81,9 @@ export function createModelRoutes(): Hono {
|
|
| 126 |
// Extended endpoint: model details with reasoning efforts
|
| 127 |
app.get("/v1/models/:modelId/info", (c) => {
|
| 128 |
const modelId = c.req.param("modelId");
|
| 129 |
-
const
|
| 130 |
-
const
|
|
|
|
| 131 |
if (!info) {
|
| 132 |
c.status(404);
|
| 133 |
return c.json({ error: `Model '${modelId}' not found` });
|
|
@@ -135,5 +91,10 @@ export function createModelRoutes(): Hono {
|
|
| 135 |
return c.json(info);
|
| 136 |
});
|
| 137 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 138 |
return app;
|
| 139 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
/**
|
| 2 |
+
* Model routes β pure route handlers reading from model-store singleton.
|
|
|
|
| 3 |
*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
|
| 5 |
+
import { Hono } from "hono";
|
| 6 |
+
import type { OpenAIModel, OpenAIModelList } from "../types/openai.js";
|
| 7 |
+
import {
|
| 8 |
+
getModelCatalog,
|
| 9 |
+
getModelAliases,
|
| 10 |
+
getModelInfo,
|
| 11 |
+
getModelStoreDebug,
|
| 12 |
+
type CodexModelInfo,
|
| 13 |
+
} from "../models/model-store.js";
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
// --- Routes ---
|
| 16 |
|
|
|
|
| 30 |
const app = new Hono();
|
| 31 |
|
| 32 |
app.get("/v1/models", (c) => {
|
| 33 |
+
const catalog = getModelCatalog();
|
| 34 |
+
const aliases = getModelAliases();
|
| 35 |
+
|
| 36 |
// Include catalog models + aliases as separate entries
|
| 37 |
+
const models: OpenAIModel[] = catalog.map(toOpenAIModel);
|
| 38 |
+
for (const alias of Object.keys(aliases)) {
|
| 39 |
models.push({
|
| 40 |
id: alias,
|
| 41 |
object: "model",
|
|
|
|
| 49 |
|
| 50 |
app.get("/v1/models/:modelId", (c) => {
|
| 51 |
const modelId = c.req.param("modelId");
|
| 52 |
+
const catalog = getModelCatalog();
|
| 53 |
+
const aliases = getModelAliases();
|
| 54 |
|
| 55 |
// Try direct match
|
| 56 |
+
const info = catalog.find((m) => m.id === modelId);
|
| 57 |
if (info) return c.json(toOpenAIModel(info));
|
| 58 |
|
| 59 |
// Try alias
|
| 60 |
+
const resolved = aliases[modelId];
|
| 61 |
if (resolved) {
|
| 62 |
return c.json({
|
| 63 |
id: modelId,
|
|
|
|
| 81 |
// Extended endpoint: model details with reasoning efforts
|
| 82 |
app.get("/v1/models/:modelId/info", (c) => {
|
| 83 |
const modelId = c.req.param("modelId");
|
| 84 |
+
const aliases = getModelAliases();
|
| 85 |
+
const resolved = aliases[modelId] ?? modelId;
|
| 86 |
+
const info = getModelInfo(resolved);
|
| 87 |
if (!info) {
|
| 88 |
c.status(404);
|
| 89 |
return c.json({ error: `Model '${modelId}' not found` });
|
|
|
|
| 91 |
return c.json(info);
|
| 92 |
});
|
| 93 |
|
| 94 |
+
// Debug endpoint: model store internals
|
| 95 |
+
app.get("/debug/models", (c) => {
|
| 96 |
+
return c.json(getModelStoreDebug());
|
| 97 |
+
});
|
| 98 |
+
|
| 99 |
return app;
|
| 100 |
}
|
|
@@ -7,7 +7,7 @@ import type {
|
|
| 7 |
CodexResponsesRequest,
|
| 8 |
CodexInputItem,
|
| 9 |
} from "../proxy/codex-api.js";
|
| 10 |
-
import { resolveModelId, getModelInfo } from "../
|
| 11 |
import { getConfig } from "../config.js";
|
| 12 |
import { buildInstructions, budgetToEffort } from "./shared-utils.js";
|
| 13 |
import { anthropicToolsToCodex, anthropicToolChoiceToCodex } from "./tool-format.js";
|
|
|
|
| 7 |
CodexResponsesRequest,
|
| 8 |
CodexInputItem,
|
| 9 |
} from "../proxy/codex-api.js";
|
| 10 |
+
import { resolveModelId, getModelInfo } from "../models/model-store.js";
|
| 11 |
import { getConfig } from "../config.js";
|
| 12 |
import { buildInstructions, budgetToEffort } from "./shared-utils.js";
|
| 13 |
import { anthropicToolsToCodex, anthropicToolChoiceToCodex } from "./tool-format.js";
|
|
@@ -11,7 +11,7 @@ import type {
|
|
| 11 |
CodexResponsesRequest,
|
| 12 |
CodexInputItem,
|
| 13 |
} from "../proxy/codex-api.js";
|
| 14 |
-
import { resolveModelId, getModelInfo } from "../
|
| 15 |
import { getConfig } from "../config.js";
|
| 16 |
import { buildInstructions, budgetToEffort } from "./shared-utils.js";
|
| 17 |
import { geminiToolsToCodex, geminiToolConfigToCodex } from "./tool-format.js";
|
|
|
|
| 11 |
CodexResponsesRequest,
|
| 12 |
CodexInputItem,
|
| 13 |
} from "../proxy/codex-api.js";
|
| 14 |
+
import { resolveModelId, getModelInfo } from "../models/model-store.js";
|
| 15 |
import { getConfig } from "../config.js";
|
| 16 |
import { buildInstructions, budgetToEffort } from "./shared-utils.js";
|
| 17 |
import { geminiToolsToCodex, geminiToolConfigToCodex } from "./tool-format.js";
|
|
@@ -7,7 +7,7 @@ import type {
|
|
| 7 |
CodexResponsesRequest,
|
| 8 |
CodexInputItem,
|
| 9 |
} from "../proxy/codex-api.js";
|
| 10 |
-
import { resolveModelId, getModelInfo } from "../
|
| 11 |
import { getConfig } from "../config.js";
|
| 12 |
import { buildInstructions } from "./shared-utils.js";
|
| 13 |
import {
|
|
|
|
| 7 |
CodexResponsesRequest,
|
| 8 |
CodexInputItem,
|
| 9 |
} from "../proxy/codex-api.js";
|
| 10 |
+
import { resolveModelId, getModelInfo } from "../models/model-store.js";
|
| 11 |
import { getConfig } from "../config.js";
|
| 12 |
import { buildInstructions } from "./shared-utils.js";
|
| 13 |
import {
|
|
@@ -4,7 +4,7 @@ import { Header } from "./components/Header";
|
|
| 4 |
import { AccountList } from "./components/AccountList";
|
| 5 |
import { AddAccount } from "./components/AddAccount";
|
| 6 |
import { ApiConfig } from "./components/ApiConfig";
|
| 7 |
-
import {
|
| 8 |
import { CodeExamples } from "./components/CodeExamples";
|
| 9 |
import { Footer } from "./components/Footer";
|
| 10 |
import { useAccounts } from "./hooks/use-accounts";
|
|
@@ -37,8 +37,10 @@ function Dashboard() {
|
|
| 37 |
selectedModel={status.selectedModel}
|
| 38 |
onModelChange={status.setSelectedModel}
|
| 39 |
/>
|
| 40 |
-
<
|
| 41 |
apiKey={status.apiKey}
|
|
|
|
|
|
|
| 42 |
/>
|
| 43 |
<CodeExamples
|
| 44 |
baseUrl={status.baseUrl}
|
|
|
|
| 4 |
import { AccountList } from "./components/AccountList";
|
| 5 |
import { AddAccount } from "./components/AddAccount";
|
| 6 |
import { ApiConfig } from "./components/ApiConfig";
|
| 7 |
+
import { AnthropicSetup } from "./components/AnthropicSetup";
|
| 8 |
import { CodeExamples } from "./components/CodeExamples";
|
| 9 |
import { Footer } from "./components/Footer";
|
| 10 |
import { useAccounts } from "./hooks/use-accounts";
|
|
|
|
| 37 |
selectedModel={status.selectedModel}
|
| 38 |
onModelChange={status.setSelectedModel}
|
| 39 |
/>
|
| 40 |
+
<AnthropicSetup
|
| 41 |
apiKey={status.apiKey}
|
| 42 |
+
models={status.models}
|
| 43 |
+
selectedModel={status.selectedModel}
|
| 44 |
/>
|
| 45 |
<CodeExamples
|
| 46 |
baseUrl={status.baseUrl}
|
|
@@ -1,29 +1,23 @@
|
|
| 1 |
-
import { useState,
|
| 2 |
import { useT } from "../i18n/context";
|
| 3 |
import { CopyButton } from "./CopyButton";
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
const CLAUDE_MODELS: { id: ClaudeModel; label: string; desc: string }[] = [
|
| 8 |
-
{ id: "opus", label: "Opus", desc: "gpt-5.3-codex" },
|
| 9 |
-
{ id: "sonnet", label: "Sonnet", desc: "gpt-5.2-codex" },
|
| 10 |
-
{ id: "haiku", label: "Haiku", desc: "gpt-5.1-codex-mini" },
|
| 11 |
-
];
|
| 12 |
-
|
| 13 |
-
interface ClaudeCodeSetupProps {
|
| 14 |
apiKey: string;
|
|
|
|
|
|
|
| 15 |
}
|
| 16 |
|
| 17 |
-
export function
|
| 18 |
const t = useT();
|
| 19 |
-
const [model, setModel] = useState
|
| 20 |
|
| 21 |
const origin = typeof window !== "undefined" ? window.location.origin : "http://localhost:8080";
|
| 22 |
|
| 23 |
const envLines = useMemo(() => ({
|
| 24 |
ANTHROPIC_BASE_URL: origin,
|
| 25 |
ANTHROPIC_API_KEY: apiKey,
|
| 26 |
-
ANTHROPIC_MODEL:
|
| 27 |
}), [origin, apiKey, model]);
|
| 28 |
|
| 29 |
const allEnvText = useMemo(
|
|
@@ -36,9 +30,6 @@ export function ClaudeCodeSetup({ apiKey }: ClaudeCodeSetupProps) {
|
|
| 36 |
const getApiKey = useCallback(() => envLines.ANTHROPIC_API_KEY, [envLines]);
|
| 37 |
const getModel = useCallback(() => envLines.ANTHROPIC_MODEL, [envLines]);
|
| 38 |
|
| 39 |
-
const activeBtn = "px-3 py-1.5 text-xs font-semibold rounded bg-white dark:bg-[#21262d] text-slate-800 dark:text-text-main shadow-sm border border-transparent dark:border-border-dark transition-all";
|
| 40 |
-
const inactiveBtn = "px-3 py-1.5 text-xs font-medium rounded text-slate-500 dark:text-text-dim hover:text-slate-700 dark:hover:text-text-main hover:bg-white/50 dark:hover:bg-[#21262d] border border-transparent transition-all";
|
| 41 |
-
|
| 42 |
return (
|
| 43 |
<section class="bg-white dark:bg-card-dark border border-gray-200 dark:border-border-dark rounded-xl p-5 shadow-sm transition-colors">
|
| 44 |
<div class="flex items-center justify-between mb-6 border-b border-slate-100 dark:border-border-dark pb-4">
|
|
@@ -46,20 +37,17 @@ export function ClaudeCodeSetup({ apiKey }: ClaudeCodeSetupProps) {
|
|
| 46 |
<svg class="size-5 text-primary" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5">
|
| 47 |
<path stroke-linecap="round" stroke-linejoin="round" d="M6.75 7.5l3 2.25-3 2.25m4.5 0h3m-9 8.25h13.5A2.25 2.25 0 0021 18V6a2.25 2.25 0 00-2.25-2.25H5.25A2.25 2.25 0 003 6v12a2.25 2.25 0 002.25 2.25z" />
|
| 48 |
</svg>
|
| 49 |
-
<h2 class="text-[0.95rem] font-bold">{t("
|
| 50 |
</div>
|
| 51 |
-
<
|
| 52 |
-
|
| 53 |
-
|
| 54 |
-
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
>
|
| 58 |
-
{m.label}
|
| 59 |
-
<span class="ml-1 text-[0.65rem] opacity-60">({m.desc})</span>
|
| 60 |
-
</button>
|
| 61 |
))}
|
| 62 |
-
</
|
| 63 |
</div>
|
| 64 |
|
| 65 |
{/* Env vars */}
|
|
@@ -86,7 +74,7 @@ export function ClaudeCodeSetup({ apiKey }: ClaudeCodeSetupProps) {
|
|
| 86 |
{/* Copy all button */}
|
| 87 |
<div class="mt-5 flex items-center gap-3">
|
| 88 |
<CopyButton getText={getAllEnv} variant="label" />
|
| 89 |
-
<span class="text-xs text-slate-400 dark:text-text-dim">{t("
|
| 90 |
</div>
|
| 91 |
</section>
|
| 92 |
);
|
|
|
|
| 1 |
+
import { useState, useMemo, useCallback } from "preact/hooks";
|
| 2 |
import { useT } from "../i18n/context";
|
| 3 |
import { CopyButton } from "./CopyButton";
|
| 4 |
|
| 5 |
+
interface AnthropicSetupProps {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
apiKey: string;
|
| 7 |
+
models: string[];
|
| 8 |
+
selectedModel: string;
|
| 9 |
}
|
| 10 |
|
| 11 |
+
export function AnthropicSetup({ apiKey, models, selectedModel }: AnthropicSetupProps) {
|
| 12 |
const t = useT();
|
| 13 |
+
const [model, setModel] = useState(selectedModel);
|
| 14 |
|
| 15 |
const origin = typeof window !== "undefined" ? window.location.origin : "http://localhost:8080";
|
| 16 |
|
| 17 |
const envLines = useMemo(() => ({
|
| 18 |
ANTHROPIC_BASE_URL: origin,
|
| 19 |
ANTHROPIC_API_KEY: apiKey,
|
| 20 |
+
ANTHROPIC_MODEL: model,
|
| 21 |
}), [origin, apiKey, model]);
|
| 22 |
|
| 23 |
const allEnvText = useMemo(
|
|
|
|
| 30 |
const getApiKey = useCallback(() => envLines.ANTHROPIC_API_KEY, [envLines]);
|
| 31 |
const getModel = useCallback(() => envLines.ANTHROPIC_MODEL, [envLines]);
|
| 32 |
|
|
|
|
|
|
|
|
|
|
| 33 |
return (
|
| 34 |
<section class="bg-white dark:bg-card-dark border border-gray-200 dark:border-border-dark rounded-xl p-5 shadow-sm transition-colors">
|
| 35 |
<div class="flex items-center justify-between mb-6 border-b border-slate-100 dark:border-border-dark pb-4">
|
|
|
|
| 37 |
<svg class="size-5 text-primary" viewBox="0 0 24 24" fill="none" stroke="currentColor" stroke-width="1.5">
|
| 38 |
<path stroke-linecap="round" stroke-linejoin="round" d="M6.75 7.5l3 2.25-3 2.25m4.5 0h3m-9 8.25h13.5A2.25 2.25 0 0021 18V6a2.25 2.25 0 00-2.25-2.25H5.25A2.25 2.25 0 003 6v12a2.25 2.25 0 002.25 2.25z" />
|
| 39 |
</svg>
|
| 40 |
+
<h2 class="text-[0.95rem] font-bold">{t("anthropicSetup")}</h2>
|
| 41 |
</div>
|
| 42 |
+
<select
|
| 43 |
+
class="px-3 py-1.5 text-xs font-mono rounded-lg bg-slate-100 dark:bg-bg-dark border border-gray-200 dark:border-border-dark text-slate-700 dark:text-text-main outline-none"
|
| 44 |
+
value={model}
|
| 45 |
+
onChange={(e) => setModel((e.target as HTMLSelectElement).value)}
|
| 46 |
+
>
|
| 47 |
+
{models.map((m) => (
|
| 48 |
+
<option key={m} value={m}>{m}</option>
|
|
|
|
|
|
|
|
|
|
| 49 |
))}
|
| 50 |
+
</select>
|
| 51 |
</div>
|
| 52 |
|
| 53 |
{/* Env vars */}
|
|
|
|
| 74 |
{/* Copy all button */}
|
| 75 |
<div class="mt-5 flex items-center gap-3">
|
| 76 |
<CopyButton getText={getAllEnv} variant="label" />
|
| 77 |
+
<span class="text-xs text-slate-400 dark:text-text-dim">{t("anthropicCopyAllHint")}</span>
|
| 78 |
</div>
|
| 79 |
</section>
|
| 80 |
);
|
|
@@ -13,7 +13,7 @@ export function useStatus(accountCount: number) {
|
|
| 13 |
const ids: string[] = data.data.map((m: { id: string }) => m.id);
|
| 14 |
if (ids.length > 0) {
|
| 15 |
setModels(ids);
|
| 16 |
-
const preferred = ids.find((n) => n
|
| 17 |
if (preferred) setSelectedModel(preferred);
|
| 18 |
}
|
| 19 |
} catch {
|
|
|
|
| 13 |
const ids: string[] = data.data.map((m: { id: string }) => m.id);
|
| 14 |
if (ids.length > 0) {
|
| 15 |
setModels(ids);
|
| 16 |
+
const preferred = ids.find((n) => n === "codex");
|
| 17 |
if (preferred) setSelectedModel(preferred);
|
| 18 |
}
|
| 19 |
} catch {
|
|
@@ -32,8 +32,8 @@ export const translations = {
|
|
| 32 |
"Use this key to authenticate requests to the proxy. Do not share it.",
|
| 33 |
copyUrl: "Copy URL",
|
| 34 |
copyApiKey: "Copy API Key",
|
| 35 |
-
|
| 36 |
-
|
| 37 |
integrationExamples: "Integration Examples",
|
| 38 |
copy: "Copy",
|
| 39 |
addStep1:
|
|
@@ -89,8 +89,8 @@ export const translations = {
|
|
| 89 |
"\u4f7f\u7528\u6b64\u5bc6\u94a5\u5411\u4ee3\u7406\u53d1\u9001\u8ba4\u8bc1\u8bf7\u6c42\uff0c\u8bf7\u52ff\u6cc4\u9732\u3002",
|
| 90 |
copyUrl: "\u590d\u5236 URL",
|
| 91 |
copyApiKey: "\u590d\u5236 API \u5bc6\u94a5",
|
| 92 |
-
|
| 93 |
-
|
| 94 |
integrationExamples: "\u96c6\u6210\u793a\u4f8b",
|
| 95 |
copy: "\u590d\u5236",
|
| 96 |
addStep1:
|
|
|
|
| 32 |
"Use this key to authenticate requests to the proxy. Do not share it.",
|
| 33 |
copyUrl: "Copy URL",
|
| 34 |
copyApiKey: "Copy API Key",
|
| 35 |
+
anthropicSetup: "Anthropic SDK Setup",
|
| 36 |
+
anthropicCopyAllHint: "Copy all env vars β paste into terminal or .env file",
|
| 37 |
integrationExamples: "Integration Examples",
|
| 38 |
copy: "Copy",
|
| 39 |
addStep1:
|
|
|
|
| 89 |
"\u4f7f\u7528\u6b64\u5bc6\u94a5\u5411\u4ee3\u7406\u53d1\u9001\u8ba4\u8bc1\u8bf7\u6c42\uff0c\u8bf7\u52ff\u6cc4\u9732\u3002",
|
| 90 |
copyUrl: "\u590d\u5236 URL",
|
| 91 |
copyApiKey: "\u590d\u5236 API \u5bc6\u94a5",
|
| 92 |
+
anthropicSetup: "Anthropic SDK \u914d\u7f6e",
|
| 93 |
+
anthropicCopyAllHint: "\u590d\u5236\u6240\u6709\u73af\u5883\u53d8\u91cf \u2014 \u7c98\u8d34\u5230\u7ec8\u7aef\u6216 .env \u6587\u4ef6",
|
| 94 |
integrationExamples: "\u96c6\u6210\u793a\u4f8b",
|
| 95 |
copy: "\u590d\u5236",
|
| 96 |
addStep1:
|