id stringlengths 9 61 | canonical_slug stringlengths 11 50 | hugging_face_id stringlengths 0 56 ⌀ | name stringlengths 8 54 | created int64 1.69B 1.78B | description stringlengths 67 330 | context_length int64 2.82k 2M | architecture dict | pricing dict | top_provider dict | per_request_limits null | supported_parameters listlengths 0 22 | default_parameters dict | knowledge_cutoff stringclasses 26
values | expiration_date stringclasses 4
values | links dict |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
openrouter/elephant-alpha | openrouter/elephant-alpha | null | Elephant | 1,776,052,598 | Elephant Alpha is a 100B-parameter text model focused on intelligence efficiency, delivering strong performance while minimizing token usage. It supports a 256K context window with up to 32K output tokens,... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"max_tokens",
"response_format",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openrouter/elephant-alpha/endpoints"
} |
anthropic/claude-opus-4.6-fast | anthropic/claude-4.6-opus-fast-20260407 | null | Anthropic: Claude Opus 4.6 (Fast) | 1,775,592,472 | Fast-mode variant of [Opus 4.6](/anthropic/claude-opus-4.6) - identical capabilities with higher output speed at premium 6x pricing.
Learn more in Anthropic's docs: https://platform.claude.com/docs/en/build-with-claude/fast-mode | 1,000,000 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Claude"
} | {
"audio": null,
"completion": "0.00015",
"image": null,
"input_cache_read": "0.000003",
"input_cache_write": "0.0000375",
"internal_reasoning": null,
"prompt": "0.00003",
"web_search": "0.01"
} | {
"context_length": 1000000,
"is_moderated": true,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p",
"verbosity"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/anthropic/claude-4.6-opus-fast-20260407/endpoints"
} |
z-ai/glm-5.1 | z-ai/glm-5.1-20260406 | zai-org/GLM-5.1 | Z.ai: GLM 5.1 | 1,775,578,025 | GLM-5.1 delivers a major leap in coding capability, with particularly significant gains in handling long-horizon tasks. Unlike previous models built around minute-level interactions, GLM-5.1 can work independently and continuously on... | 202,752 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000315",
"image": null,
"input_cache_read": "0.000000475",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000095",
"web_search": null
} | {
"context_length": 202752,
"is_moderated": false,
"max_completion_tokens": 65535
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"parallel_tool_calls",
"presence_penalty",
"reasoning",
"reasoning_effort",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools... | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/z-ai/glm-5.1-20260406/endpoints"
} |
google/gemma-4-26b-a4b-it:free | google/gemma-4-26b-a4b-it-20260403 | google/gemma-4-26B-A4B-it | Google: Gemma 4 26B A4B (free) | 1,775,227,989 | Gemma 4 26B A4B IT is an instruction-tuned Mixture-of-Experts (MoE) model from Google DeepMind. Despite 25.2B total parameters, only 3.8B activate per token during inference — delivering near-31B quality at... | 262,144 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Gemma"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": 64,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/google/gemma-4-26b-a4b-it-20260403/endpoints"
} |
google/gemma-4-26b-a4b-it | google/gemma-4-26b-a4b-it-20260403 | google/gemma-4-26B-A4B-it | Google: Gemma 4 26B A4B | 1,775,227,989 | Gemma 4 26B A4B IT is an instruction-tuned Mixture-of-Experts (MoE) model from Google DeepMind. Despite 25.2B total parameters, only 3.8B activate per token during inference — delivering near-31B quality at... | 262,144 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Gemma"
} | {
"audio": null,
"completion": "0.00000035",
"image": null,
"input_cache_read": "0.00000001",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000008",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": 64,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/google/gemma-4-26b-a4b-it-20260403/endpoints"
} |
google/gemma-4-31b-it:free | google/gemma-4-31b-it-20260402 | google/gemma-4-31B-it | Google: Gemma 4 31B (free) | 1,775,148,486 | Gemma 4 31B Instruct is Google DeepMind's 30.7B dense multimodal model supporting text and image input with text output. Features a 256K token context window, configurable thinking/reasoning mode, native function... | 262,144 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Gemma"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": 64,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/google/gemma-4-31b-it-20260402/endpoints"
} |
google/gemma-4-31b-it | google/gemma-4-31b-it-20260402 | google/gemma-4-31B-it | Google: Gemma 4 31B | 1,775,148,486 | Gemma 4 31B Instruct is Google DeepMind's 30.7B dense multimodal model supporting text and image input with text output. Features a 256K token context window, configurable thinking/reasoning mode, native function... | 262,144 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Gemma"
} | {
"audio": null,
"completion": "0.00000038",
"image": null,
"input_cache_read": "0.0000000199999995",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000013",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": 64,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/google/gemma-4-31b-it-20260402/endpoints"
} |
qwen/qwen3.6-plus | qwen/qwen3.6-plus-04-02 | Qwen: Qwen3.6 Plus | 1,775,133,557 | Qwen 3.6 Plus builds on a hybrid architecture that combines efficient linear attention with sparse mixture-of-experts routing, enabling strong scalability and high-performance inference. Compared to the 3.5 series, it delivers... | 1,000,000 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen3"
} | {
"audio": null,
"completion": "0.00000195",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000000325",
"web_search": null
} | {
"context_length": 1000000,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"include_reasoning",
"max_tokens",
"presence_penalty",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/qwen/qwen3.6-plus-04-02/endpoints"
} | |
z-ai/glm-5v-turbo | z-ai/glm-5v-turbo-20260401 | Z.ai: GLM 5V Turbo | 1,775,061,458 | GLM-5V-Turbo is Z.ai’s first native multimodal agent foundation model, built for vision-based coding and agent-driven tasks. It natively handles image, video, and text inputs, excels at long-horizon planning, complex coding,... | 202,752 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.000004",
"image": null,
"input_cache_read": "0.00000024",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000012",
"web_search": null
} | {
"context_length": 202752,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/z-ai/glm-5v-turbo-20260401/endpoints"
} | |
arcee-ai/trinity-large-thinking | arcee-ai/trinity-large-thinking | arcee-ai/Trinity-Large-Thinking | Arcee AI: Trinity Large Thinking | 1,775,058,318 | Trinity Large Thinking is a powerful open source reasoning model from the team at Arcee AI. It shows strong performance in PinchBench, agentic workloads, and reasoning tasks. Launch video: https://youtu.be/Gc82AXLa0Rg?si=4RLn6WBz33qT--B7 | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000085",
"image": null,
"input_cache_read": "0.00000006",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000022",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 262144
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.3,
"top_k": null,
"top_p": 0.8
} | null | null | {
"details": "/api/v1/models/arcee-ai/trinity-large-thinking/endpoints"
} |
x-ai/grok-4.20-multi-agent | x-ai/grok-4.20-multi-agent-20260309 | xAI: Grok 4.20 Multi-Agent | 1,774,979,158 | Grok 4.20 Multi-Agent is a variant of xAI’s Grok 4.20 designed for collaborative, agent-based workflows. Multiple agents operate in parallel to conduct deep research, coordinate tool use, and synthesize information... | 2,000,000 | {
"input_modalities": [
"text",
"image",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "Grok"
} | {
"audio": null,
"completion": "0.000006",
"image": null,
"input_cache_read": "0.0000002",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000002",
"web_search": "0.005"
} | {
"context_length": 2000000,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"include_reasoning",
"logprobs",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"temperature",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | 2025-09-01 | null | {
"details": "/api/v1/models/x-ai/grok-4.20-multi-agent-20260309/endpoints"
} | |
x-ai/grok-4.20 | x-ai/grok-4.20-20260309 | xAI: Grok 4.20 | 1,774,979,019 | Grok 4.20 is xAI's newest flagship model with industry-leading speed and agentic tool calling capabilities. It combines the lowest hallucination rate on the market with strict prompt adherance, delivering consistently... | 2,000,000 | {
"input_modalities": [
"text",
"image",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "Grok"
} | {
"audio": null,
"completion": "0.000006",
"image": null,
"input_cache_read": "0.0000002",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000002",
"web_search": "0.005"
} | {
"context_length": 2000000,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"include_reasoning",
"logprobs",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | 2025-09-01 | null | {
"details": "/api/v1/models/x-ai/grok-4.20-20260309/endpoints"
} | |
google/lyria-3-pro-preview | google/lyria-3-pro-preview-20260330 | null | Google: Lyria 3 Pro Preview | 1,774,907,286 | Full-length songs are priced at $0.08 per song. Lyria 3 is Google's family of music generation models, available through the Gemini API. With Lyria 3, you can generate high-quality, 48kHz... | 1,048,576 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text+audio",
"output_modalities": [
"text",
"audio"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 1048576,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"max_tokens",
"response_format",
"seed",
"temperature",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/google/lyria-3-pro-preview-20260330/endpoints"
} |
google/lyria-3-clip-preview | google/lyria-3-clip-preview-20260330 | null | Google: Lyria 3 Clip Preview | 1,774,907,255 | 30 second duration clips are priced at $0.04 per clip. Lyria 3 is Google's family of music generation models, available through the Gemini API. With Lyria 3, you can generate... | 1,048,576 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text+audio",
"output_modalities": [
"text",
"audio"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 1048576,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"max_tokens",
"response_format",
"seed",
"temperature",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/google/lyria-3-clip-preview-20260330/endpoints"
} |
kwaipilot/kat-coder-pro-v2 | kwaipilot/kat-coder-pro-v2-20260327 | Kwaipilot: KAT-Coder-Pro V2 | 1,774,649,310 | KAT-Coder-Pro V2 is the latest high-performance model in KwaiKAT’s KAT-Coder series, designed for complex enterprise-grade software engineering and SaaS integration. It builds on the agentic coding strengths of earlier versions,... | 256,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000012",
"image": null,
"input_cache_read": "0.00000006",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000003",
"web_search": null
} | {
"context_length": 256000,
"is_moderated": false,
"max_completion_tokens": 80000
} | null | [
"frequency_penalty",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/kwaipilot/kat-coder-pro-v2-20260327/endpoints"
} | |
rekaai/reka-edge | rekaai/reka-edge-2603 | RekaAI/reka-edge-2603 | Reka Edge | 1,774,026,965 | Reka Edge is an extremely efficient 7B multimodal vision-language model that accepts image/video+text inputs and generates text outputs. This model is optimized specifically to deliver industry-leading performance in image understanding,... | 16,384 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000001",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000001",
"web_search": null
} | {
"context_length": 16384,
"is_moderated": false,
"max_completion_tokens": 16384
} | null | [
"frequency_penalty",
"max_tokens",
"presence_penalty",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/rekaai/reka-edge-2603/endpoints"
} |
xiaomi/mimo-v2-omni | xiaomi/mimo-v2-omni-20260318 | Xiaomi: MiMo-V2-Omni | 1,773,863,703 | MiMo-V2-Omni is a frontier omni-modal model that natively processes image, video, and audio inputs within a unified architecture. It combines strong multimodal perception with agentic capability - visual grounding, multi-step... | 262,144 | {
"input_modalities": [
"text",
"audio",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+audio+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.000002",
"image": null,
"input_cache_read": "0.00000008",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000004",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"presence_penalty",
"reasoning",
"response_format",
"stop",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/xiaomi/mimo-v2-omni-20260318/endpoints"
} | |
xiaomi/mimo-v2-pro | xiaomi/mimo-v2-pro-20260318 | Xiaomi: MiMo-V2-Pro | 1,773,863,643 | MiMo-V2-Pro is Xiaomi's flagship foundation model, featuring over 1T total parameters and a 1M context length, deeply optimized for agentic scenarios. It is highly adaptable to general agent frameworks like... | 1,048,576 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.000003",
"image": null,
"input_cache_read": "0.0000002",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000001",
"web_search": null
} | {
"context_length": 1048576,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"presence_penalty",
"reasoning",
"response_format",
"stop",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/xiaomi/mimo-v2-pro-20260318/endpoints"
} | |
minimax/minimax-m2.7 | minimax/minimax-m2.7-20260318 | MiniMaxAI/MiniMax-M2.7 | MiniMax: MiniMax M2.7 | 1,773,836,697 | MiniMax-M2.7 is a next-generation large language model designed for autonomous, real-world productivity and continuous improvement. Built to actively participate in its own evolution, M2.7 integrates advanced agentic capabilities through multi-agent... | 196,608 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000012",
"image": null,
"input_cache_read": "0.000000059",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000003",
"web_search": null
} | {
"context_length": 196608,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/minimax/minimax-m2.7-20260318/endpoints"
} |
openai/gpt-5.4-nano | openai/gpt-5.4-nano-20260317 | OpenAI: GPT-5.4 Nano | 1,773,748,187 | GPT-5.4 nano is the most lightweight and cost-efficient variant of the GPT-5.4 family, optimized for speed-critical and high-volume tasks. It supports text and image inputs and is designed for low-latency... | 400,000 | {
"input_modalities": [
"file",
"image",
"text"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.00000125",
"image": null,
"input_cache_read": "0.00000002",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000002",
"web_search": "0.01"
} | {
"context_length": 400000,
"is_moderated": false,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | 2025-08-31 | null | {
"details": "/api/v1/models/openai/gpt-5.4-nano-20260317/endpoints"
} | |
openai/gpt-5.4-mini | openai/gpt-5.4-mini-20260317 | OpenAI: GPT-5.4 Mini | 1,773,748,178 | GPT-5.4 mini brings the core capabilities of GPT-5.4 to a faster, more efficient model optimized for high-throughput workloads. It supports text and image inputs with strong performance across reasoning, coding,... | 400,000 | {
"input_modalities": [
"file",
"image",
"text"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.0000045",
"image": null,
"input_cache_read": "0.000000075",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000075",
"web_search": "0.01"
} | {
"context_length": 400000,
"is_moderated": false,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | 2025-08-31 | null | {
"details": "/api/v1/models/openai/gpt-5.4-mini-20260317/endpoints"
} | |
mistralai/mistral-small-2603 | mistralai/mistral-small-2603 | mistralai/Mistral-Small-4-119B-2603 | Mistral: Mistral Small 4 | 1,773,695,685 | Mistral Small 4 is the next major release in the Mistral Small family, unifying the capabilities of several flagship Mistral models into a single system. It combines strong reasoning from... | 262,144 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Mistral"
} | {
"audio": null,
"completion": "0.0000006",
"image": null,
"input_cache_read": "0.000000015",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000015",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"presence_penalty",
"reasoning",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/mistralai/mistral-small-2603/endpoints"
} |
z-ai/glm-5-turbo | z-ai/glm-5-turbo-20260315 | Z.ai: GLM 5 Turbo | 1,773,583,573 | GLM-5 Turbo is a new model from Z.ai designed for fast inference and strong performance in agent-driven environments such as OpenClaw scenarios. It is deeply optimized for real-world agent workflows... | 202,752 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.000004",
"image": null,
"input_cache_read": "0.00000024",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000012",
"web_search": null
} | {
"context_length": 202752,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/z-ai/glm-5-turbo-20260315/endpoints"
} | |
nvidia/nemotron-3-super-120b-a12b:free | nvidia/nemotron-3-super-120b-a12b-20230311 | nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-FP8 | NVIDIA: Nemotron 3 Super (free) | 1,773,245,239 | NVIDIA Nemotron 3 Super is a 120B-parameter open hybrid MoE model, activating just 12B parameters for maximum compute efficiency and accuracy in complex multi-agent applications. Built on a hybrid Mamba-Transformer... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 262144
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/nvidia/nemotron-3-super-120b-a12b-20230311/endpoints"
} |
nvidia/nemotron-3-super-120b-a12b | nvidia/nemotron-3-super-120b-a12b-20230311 | nvidia/NVIDIA-Nemotron-3-Super-120B-A12B-FP8 | NVIDIA: Nemotron 3 Super | 1,773,245,239 | NVIDIA Nemotron 3 Super is a 120B-parameter open hybrid MoE model, activating just 12B parameters for maximum compute efficiency and accuracy in complex multi-agent applications. Built on a hybrid Mamba-Transformer... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000005",
"image": null,
"input_cache_read": "0.0000001",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000001",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/nvidia/nemotron-3-super-120b-a12b-20230311/endpoints"
} |
bytedance-seed/seed-2.0-lite | bytedance-seed/seed-2.0-lite-20260309 | null | ByteDance Seed: Seed-2.0-Lite | 1,773,157,231 | Seed-2.0-Lite is a versatile, cost‑efficient enterprise workhorse that delivers strong multimodal and agent capabilities while offering noticeably lower latency, making it a practical default choice for most production workloads across... | 262,144 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.000002",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000025",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/bytedance-seed/seed-2.0-lite-20260309/endpoints"
} |
qwen/qwen3.5-9b | qwen/qwen3.5-9b-20260310 | Qwen/Qwen3.5-9B | Qwen: Qwen3.5-9B | 1,773,152,396 | Qwen3.5-9B is a multimodal foundation model from the Qwen3.5 family, designed to deliver strong reasoning, coding, and visual understanding in an efficient 9B-parameter architecture. It uses a unified vision-language design... | 256,000 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen3"
} | {
"audio": null,
"completion": "0.00000015",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000005",
"web_search": null
} | {
"context_length": 256000,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/qwen/qwen3.5-9b-20260310/endpoints"
} |
openai/gpt-5.4-pro | openai/gpt-5.4-pro-20260305 | OpenAI: GPT-5.4 Pro | 1,772,734,366 | GPT-5.4 Pro is OpenAI's most advanced model, building on GPT-5.4's unified architecture with enhanced reasoning capabilities for complex, high-stakes tasks. It features a 1M+ token context window (922K input, 128K... | 1,050,000 | {
"input_modalities": [
"text",
"image",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.00018",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00003",
"web_search": "0.01"
} | {
"context_length": 1050000,
"is_moderated": false,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.4-pro-20260305/endpoints"
} | |
openai/gpt-5.4 | openai/gpt-5.4-20260305 | OpenAI: GPT-5.4 | 1,772,734,352 | GPT-5.4 is OpenAI’s latest frontier model, unifying the Codex and GPT lines into a single system. It features a 1M+ token context window (922K input, 128K output) with support for... | 1,050,000 | {
"input_modalities": [
"text",
"image",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.000015",
"image": null,
"input_cache_read": "0.00000025",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000025",
"web_search": "0.01"
} | {
"context_length": 1050000,
"is_moderated": false,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.4-20260305/endpoints"
} | |
inception/mercury-2 | inception/mercury-2-20260304 | null | Inception: Mercury 2 | 1,772,636,275 | Mercury 2 is an extremely fast reasoning LLM, and the first reasoning diffusion LLM (dLLM). Instead of generating tokens sequentially, Mercury 2 produces and refines multiple tokens in parallel, achieving... | 128,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000075",
"image": null,
"input_cache_read": "0.000000025",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000025",
"web_search": null
} | {
"context_length": 128000,
"is_moderated": false,
"max_completion_tokens": 50000
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.75,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/inception/mercury-2-20260304/endpoints"
} |
openai/gpt-5.3-chat | openai/gpt-5.3-chat-20260303 | OpenAI: GPT-5.3 Chat | 1,772,564,061 | GPT-5.3 Chat is an update to ChatGPT's most-used model that makes everyday conversations smoother, more useful, and more directly helpful. It delivers more accurate answers with better contextualization and significantly... | 128,000 | {
"input_modalities": [
"text",
"image",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.000014",
"image": null,
"input_cache_read": "0.000000175",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000175",
"web_search": "0.1"
} | {
"context_length": 128000,
"is_moderated": true,
"max_completion_tokens": 16384
} | null | [
"max_completion_tokens",
"max_tokens",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.3-chat-20260303/endpoints"
} | |
google/gemini-3.1-flash-lite-preview | google/gemini-3.1-flash-lite-preview-20260303 | Google: Gemini 3.1 Flash Lite Preview | 1,772,512,673 | Gemini 3.1 Flash Lite Preview is Google's high-efficiency model optimized for high-volume use cases. It outperforms Gemini 2.5 Flash Lite on overall quality and approaches Gemini 2.5 Flash performance across... | 1,048,576 | {
"input_modalities": [
"text",
"image",
"video",
"file",
"audio"
],
"instruct_type": null,
"modality": "text+image+file+audio+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Gemini"
} | {
"audio": "0.0000005",
"completion": "0.0000015",
"image": "0.00000025",
"input_cache_read": "0.000000025",
"input_cache_write": "0.00000008333333333333334",
"internal_reasoning": "0.0000015",
"prompt": "0.00000025",
"web_search": null
} | {
"context_length": 1048576,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/google/gemini-3.1-flash-lite-preview-20260303/endpoints"
} | |
bytedance-seed/seed-2.0-mini | bytedance-seed/seed-2.0-mini-20260224 | ByteDance Seed: Seed-2.0-Mini | 1,772,131,107 | Seed-2.0-mini targets latency-sensitive, high-concurrency, and cost-sensitive scenarios, emphasizing fast response and flexible inference deployment. It delivers performance comparable to ByteDance-Seed-1.6, supports 256k context, four reasoning effort modes (minimal/low/medium/high), multimodal understanding,... | 262,144 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000004",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000001",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/bytedance-seed/seed-2.0-mini-20260224/endpoints"
} | |
google/gemini-3.1-flash-image-preview | google/gemini-3.1-flash-image-preview-20260226 | Google: Nano Banana 2 (Gemini 3.1 Flash Image Preview) | 1,772,119,558 | Gemini 3.1 Flash Image Preview, a.k.a. "Nano Banana 2," is Google’s latest state of the art image generation and editing model, delivering Pro-level visual quality at Flash speed. It combines... | 65,536 | {
"input_modalities": [
"image",
"text"
],
"instruct_type": null,
"modality": "text+image->text+image",
"output_modalities": [
"image",
"text"
],
"tokenizer": "Gemini"
} | {
"audio": null,
"completion": "0.000003",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000005",
"web_search": null
} | {
"context_length": 65536,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/google/gemini-3.1-flash-image-preview-20260226/endpoints"
} | |
qwen/qwen3.5-35b-a3b | qwen/qwen3.5-35b-a3b-20260224 | Qwen/Qwen3.5-35B-A3B | Qwen: Qwen3.5-35B-A3B | 1,772,053,822 | The Qwen3.5 Series 35B-A3B is a native vision-language model designed with a hybrid architecture that integrates linear attention mechanisms and a sparse mixture-of-experts model, achieving higher inference efficiency. Its overall... | 262,144 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen3"
} | {
"audio": null,
"completion": "0.0000013",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000001625",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": 20,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/qwen/qwen3.5-35b-a3b-20260224/endpoints"
} |
qwen/qwen3.5-27b | qwen/qwen3.5-27b-20260224 | Qwen/Qwen3.5-27B | Qwen: Qwen3.5-27B | 1,772,053,810 | The Qwen3.5 27B native vision-language Dense model incorporates a linear attention mechanism, delivering fast response times while balancing inference speed and performance. Its overall capabilities are comparable to those of... | 262,144 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen3"
} | {
"audio": null,
"completion": "0.00000156",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000000195",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.6,
"top_k": 20,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/qwen/qwen3.5-27b-20260224/endpoints"
} |
qwen/qwen3.5-122b-a10b | qwen/qwen3.5-122b-a10b-20260224 | Qwen/Qwen3.5-122B-A10B | Qwen: Qwen3.5-122B-A10B | 1,772,053,789 | The Qwen3.5 122B-A10B native vision-language model is built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. In terms of... | 262,144 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen3"
} | {
"audio": null,
"completion": "0.00000208",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000026",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.6,
"top_k": 20,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/qwen/qwen3.5-122b-a10b-20260224/endpoints"
} |
qwen/qwen3.5-flash-02-23 | qwen/qwen3.5-flash-20260224 | null | Qwen: Qwen3.5-Flash | 1,772,053,776 | The Qwen3.5 native vision-language Flash models are built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. Compared to the... | 1,000,000 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen3"
} | {
"audio": null,
"completion": "0.00000026",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000000065",
"web_search": null
} | {
"context_length": 1000000,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"include_reasoning",
"max_tokens",
"presence_penalty",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/qwen/qwen3.5-flash-20260224/endpoints"
} |
liquid/lfm-2-24b-a2b | liquid/lfm-2-24b-a2b-20260224 | LiquidAI/LFM2-24B-A2B | LiquidAI: LFM2-24B-A2B | 1,772,048,711 | LFM2-24B-A2B is the largest model in the LFM2 family of hybrid architectures designed for efficient on-device deployment. Built as a 24B parameter Mixture-of-Experts model with only 2B active parameters per... | 32,768 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000012",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000003",
"web_search": null
} | {
"context_length": 32768,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"repetition_penalty",
"stop",
"temperature",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": 1.05,
"temperature": 0.1,
"top_k": 50,
"top_p": null
} | null | null | {
"details": "/api/v1/models/liquid/lfm-2-24b-a2b-20260224/endpoints"
} |
google/gemini-3.1-pro-preview-customtools | google/gemini-3.1-pro-preview-customtools-20260219 | null | Google: Gemini 3.1 Pro Preview Custom Tools | 1,772,045,923 | Gemini 3.1 Pro Preview Custom Tools is a variant of Gemini 3.1 Pro that improves tool selection behavior by preventing overuse of a general bash tool when more efficient third-party... | 1,048,576 | {
"input_modalities": [
"text",
"audio",
"image",
"video",
"file"
],
"instruct_type": null,
"modality": "text+image+file+audio+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Gemini"
} | {
"audio": "0.000002",
"completion": "0.000012",
"image": "0.000002",
"input_cache_read": "0.0000002",
"input_cache_write": "0.000000375",
"internal_reasoning": "0.000012",
"prompt": "0.000002",
"web_search": null
} | {
"context_length": 1048576,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/google/gemini-3.1-pro-preview-customtools-20260219/endpoints"
} |
openai/gpt-5.3-codex | openai/gpt-5.3-codex-20260224 | OpenAI: GPT-5.3-Codex | 1,771,959,164 | GPT-5.3-Codex is OpenAI’s most advanced agentic coding model, combining the frontier software engineering performance of GPT-5.2-Codex with the broader reasoning and professional knowledge capabilities of GPT-5.2. It achieves state-of-the-art results... | 400,000 | {
"input_modalities": [
"text",
"image",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.000014",
"image": null,
"input_cache_read": "0.000000175",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000175",
"web_search": "0.01"
} | {
"context_length": 400000,
"is_moderated": true,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.3-codex-20260224/endpoints"
} | |
aion-labs/aion-2.0 | aion-labs/aion-2.0-20260223 | null | AionLabs: Aion-2.0 | 1,771,881,306 | Aion-2.0 is a variant of DeepSeek V3.2 optimized for immersive roleplaying and storytelling. It is particularly strong at introducing tension, crises, and conflict into stories, making narratives feel more engaging.... | 131,072 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000016",
"image": null,
"input_cache_read": "0.0000002",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000008",
"web_search": null
} | {
"context_length": 131072,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"temperature",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/aion-labs/aion-2.0-20260223/endpoints"
} |
google/gemini-3.1-pro-preview | google/gemini-3.1-pro-preview-20260219 | Google: Gemini 3.1 Pro Preview | 1,771,509,627 | Gemini 3.1 Pro Preview is Google’s frontier reasoning model, delivering enhanced software engineering performance, improved agentic reliability, and more efficient token usage across complex workflows. Building on the multimodal foundation... | 1,048,576 | {
"input_modalities": [
"audio",
"file",
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+file+audio+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Gemini"
} | {
"audio": "0.000002",
"completion": "0.000012",
"image": "0.000002",
"input_cache_read": "0.0000002",
"input_cache_write": "0.000000375",
"internal_reasoning": "0.000012",
"prompt": "0.000002",
"web_search": null
} | {
"context_length": 1048576,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/google/gemini-3.1-pro-preview-20260219/endpoints"
} | |
anthropic/claude-sonnet-4.6 | anthropic/claude-4.6-sonnet-20260217 | Anthropic: Claude Sonnet 4.6 | 1,771,342,990 | Sonnet 4.6 is Anthropic's most capable Sonnet-class model yet, with frontier performance across coding, agents, and professional work. It excels at iterative development, complex codebase navigation, end-to-end project management with... | 1,000,000 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Claude"
} | {
"audio": null,
"completion": "0.000015",
"image": null,
"input_cache_read": "0.0000003",
"input_cache_write": "0.00000375",
"internal_reasoning": null,
"prompt": "0.000003",
"web_search": "0.01"
} | {
"context_length": 1000000,
"is_moderated": true,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p",
"verbosity"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/anthropic/claude-4.6-sonnet-20260217/endpoints"
} | |
qwen/qwen3.5-plus-02-15 | qwen/qwen3.5-plus-20260216 | Qwen: Qwen3.5 Plus 2026-02-15 | 1,771,229,416 | The Qwen3.5 native vision-language series Plus models are built on a hybrid architecture that integrates linear attention mechanisms with sparse mixture-of-experts models, achieving higher inference efficiency. In a variety of... | 1,000,000 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen3"
} | {
"audio": null,
"completion": "0.00000156",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000026",
"web_search": null
} | {
"context_length": 1000000,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"include_reasoning",
"max_tokens",
"presence_penalty",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/qwen/qwen3.5-plus-20260216/endpoints"
} | |
qwen/qwen3.5-397b-a17b | qwen/qwen3.5-397b-a17b-20260216 | Qwen/Qwen3.5-397B-A17B | Qwen: Qwen3.5 397B A17B | 1,771,223,018 | The Qwen3.5 series 397B-A17B native vision-language model is built on a hybrid architecture that integrates a linear attention mechanism with a sparse mixture-of-experts model, achieving higher inference efficiency. It delivers... | 262,144 | {
"input_modalities": [
"text",
"image",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen3"
} | {
"audio": null,
"completion": "0.00000234",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000039",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.6,
"top_k": 20,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/qwen/qwen3.5-397b-a17b-20260216/endpoints"
} |
minimax/minimax-m2.5:free | minimax/minimax-m2.5-20260211 | MiniMaxAI/MiniMax-M2.5 | MiniMax: MiniMax M2.5 (free) | 1,770,908,502 | MiniMax-M2.5 is a SOTA large language model designed for real-world productivity. Trained in a diverse range of complex real-world digital working environments, M2.5 builds upon the coding expertise of M2.1... | 196,608 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 196608,
"is_moderated": true,
"max_completion_tokens": 8192
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"stop",
"temperature",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/minimax/minimax-m2.5-20260211/endpoints"
} |
minimax/minimax-m2.5 | minimax/minimax-m2.5-20260211 | MiniMaxAI/MiniMax-M2.5 | MiniMax: MiniMax M2.5 | 1,770,908,502 | MiniMax-M2.5 is a SOTA large language model designed for real-world productivity. Trained in a diverse range of complex real-world digital working environments, M2.5 builds upon the coding expertise of M2.1... | 196,608 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000099",
"image": null,
"input_cache_read": "0.000000059",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000000118",
"web_search": null
} | {
"context_length": 196608,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"parallel_tool_calls",
"presence_penalty",
"reasoning",
"reasoning_effort",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools... | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/minimax/minimax-m2.5-20260211/endpoints"
} |
z-ai/glm-5 | z-ai/glm-5-20260211 | zai-org/GLM-5 | Z.ai: GLM 5 | 1,770,829,182 | GLM-5 is Z.ai’s flagship open-source foundation model engineered for complex systems design and long-horizon agent workflows. Built for expert developers, it delivers production-grade performance on large-scale programming tasks, rivaling leading... | 80,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000023",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000072",
"web_search": null
} | {
"context_length": 80000,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/z-ai/glm-5-20260211/endpoints"
} |
qwen/qwen3-max-thinking | qwen/qwen3-max-thinking-20260123 | null | Qwen: Qwen3 Max Thinking | 1,770,671,901 | Qwen3-Max-Thinking is the flagship reasoning model in the Qwen3 series, designed for high-stakes cognitive tasks that require deep, multi-step reasoning. By significantly scaling model capacity and reinforcement learning compute, it... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen"
} | {
"audio": null,
"completion": "0.0000039",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000078",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"include_reasoning",
"max_tokens",
"presence_penalty",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/qwen/qwen3-max-thinking-20260123/endpoints"
} |
anthropic/claude-opus-4.6 | anthropic/claude-4.6-opus-20260205 | Anthropic: Claude Opus 4.6 | 1,770,219,050 | Opus 4.6 is Anthropic’s strongest model for coding and long-running professional tasks. It is built for agents that operate across entire workflows rather than single prompts, making it especially effective... | 1,000,000 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Claude"
} | {
"audio": null,
"completion": "0.000025",
"image": null,
"input_cache_read": "0.0000005",
"input_cache_write": "0.00000625",
"internal_reasoning": null,
"prompt": "0.000005",
"web_search": "0.01"
} | {
"context_length": 1000000,
"is_moderated": true,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p",
"verbosity"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/anthropic/claude-4.6-opus-20260205/endpoints"
} | |
qwen/qwen3-coder-next | qwen/qwen3-coder-next-2025-02-03 | Qwen/Qwen3-Coder-Next | Qwen: Qwen3 Coder Next | 1,770,164,101 | Qwen3-Coder-Next is an open-weight causal language model optimized for coding agents and local development workflows. It uses a sparse MoE design with 80B total parameters and only 3B activated per... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Qwen"
} | {
"audio": null,
"completion": "0.0000008",
"image": null,
"input_cache_read": "0.00000012",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000015",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 262144
} | null | [
"frequency_penalty",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/qwen/qwen3-coder-next-2025-02-03/endpoints"
} |
openrouter/free | openrouter/free | Free Models Router | 1,769,917,427 | The simplest way to get free inference. openrouter/free is a router that selects free models at random from the models available on OpenRouter. The router smartly filters for models that... | 200,000 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Router"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": null,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openrouter/free/endpoints"
} | |
stepfun/step-3.5-flash | stepfun/step-3.5-flash | stepfun-ai/Step-3.5-Flash | StepFun: Step 3.5 Flash | 1,769,728,337 | Step 3.5 Flash is StepFun's most capable open-source foundation model. Built on a sparse Mixture of Experts (MoE) architecture, it selectively activates only 11B of its 196B parameters per token.... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000003",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000001",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"reasoning_effort",
"repetition_penalty",
"response_format",
"seed",
"stop",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/stepfun/step-3.5-flash/endpoints"
} |
arcee-ai/trinity-large-preview:free | arcee-ai/trinity-large-preview | arcee-ai/Trinity-Large-Preview | Arcee AI: Trinity Large Preview (free) | 1,769,552,670 | Trinity-Large-Preview is a frontier-scale open-weight language model from Arcee, built as a 400B-parameter sparse Mixture-of-Experts with 13B active parameters per token using 4-of-256 expert routing. It excels in creative writing,... | 131,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 131000,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"max_tokens",
"response_format",
"structured_outputs",
"temperature",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.8,
"top_k": null,
"top_p": 0.8
} | null | 2026-04-22 | {
"details": "/api/v1/models/arcee-ai/trinity-large-preview/endpoints"
} |
moonshotai/kimi-k2.5 | moonshotai/kimi-k2.5-0127 | moonshotai/Kimi-K2.5 | MoonshotAI: Kimi K2.5 | 1,769,487,076 | Kimi K2.5 is Moonshot AI's native multimodal model, delivering state-of-the-art visual coding capability and a self-directed agent swarm paradigm. Built on Kimi K2 with continued pretraining over approximately 15T mixed... | 262,144 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000172",
"image": null,
"input_cache_read": "0.00000019135",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000003827",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 65535
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"parallel_tool_calls",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_l... | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/moonshotai/kimi-k2.5-0127/endpoints"
} |
upstage/solar-pro-3 | upstage/solar-pro-3 | Upstage: Solar Pro 3 | 1,769,481,200 | Solar Pro 3 is Upstage's powerful Mixture-of-Experts (MoE) language model. With 102B total parameters and 12B active parameters per forward pass, it delivers exceptional performance while maintaining computational efficiency. Optimized... | 128,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000006",
"image": null,
"input_cache_read": "0.000000015",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000015",
"web_search": null
} | {
"context_length": 128000,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"structured_outputs",
"temperature",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/upstage/solar-pro-3/endpoints"
} | |
minimax/minimax-m2-her | minimax/minimax-m2-her-20260123 | MiniMax: MiniMax M2-her | 1,769,177,239 | MiniMax M2-her is a dialogue-first large language model built for immersive roleplay, character-driven chat, and expressive multi-turn conversations. Designed to stay consistent in tone and personality, it supports rich message... | 65,536 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000012",
"image": null,
"input_cache_read": "0.00000003",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000003",
"web_search": null
} | {
"context_length": 65536,
"is_moderated": false,
"max_completion_tokens": 2048
} | null | [
"max_tokens",
"temperature",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/minimax/minimax-m2-her-20260123/endpoints"
} | |
writer/palmyra-x5 | writer/palmyra-x5-20250428 | Writer: Palmyra X5 | 1,769,003,823 | Palmyra X5 is Writer's most advanced model, purpose-built for building and scaling AI agents across the enterprise. It delivers industry-leading speed and efficiency on context windows up to 1 million... | 1,040,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.000006",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000006",
"web_search": null
} | {
"context_length": 1040000,
"is_moderated": true,
"max_completion_tokens": 8192
} | null | [
"max_tokens",
"stop",
"temperature",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/writer/palmyra-x5-20250428/endpoints"
} | |
liquid/lfm-2.5-1.2b-thinking:free | liquid/lfm-2.5-1.2b-thinking-20260120 | LiquidAI/LFM2.5-1.2B-Thinking | LiquidAI: LFM2.5-1.2B-Thinking (free) | 1,768,927,527 | LFM2.5-1.2B-Thinking is a lightweight reasoning-focused model optimized for agentic tasks, data extraction, and RAG—while still running comfortably on edge devices. It supports long context (up to 32K tokens) and is... | 32,768 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 32768,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"seed",
"stop",
"temperature",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/liquid/lfm-2.5-1.2b-thinking-20260120/endpoints"
} |
liquid/lfm-2.5-1.2b-instruct:free | liquid/lfm-2.5-1.2b-instruct-20260120 | LiquidAI/LFM2.5-1.2B-Instruct | LiquidAI: LFM2.5-1.2B-Instruct (free) | 1,768,927,521 | LFM2.5-1.2B-Instruct is a compact, high-performance instruction-tuned model built for fast on-device AI. It delivers strong chat quality in a 1.2B parameter footprint, with efficient edge inference and broad runtime support. | 32,768 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 32768,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"max_tokens",
"min_p",
"presence_penalty",
"repetition_penalty",
"seed",
"stop",
"temperature",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/liquid/lfm-2.5-1.2b-instruct-20260120/endpoints"
} |
openai/gpt-audio | openai/gpt-audio | OpenAI: GPT Audio | 1,768,862,569 | The gpt-audio model is OpenAI's first generally available audio model. The new snapshot features an upgraded decoder for more natural sounding voices and maintains better voice consistency. Audio is priced... | 128,000 | {
"input_modalities": [
"text",
"audio"
],
"instruct_type": null,
"modality": "text+audio->text+audio",
"output_modalities": [
"text",
"audio"
],
"tokenizer": "GPT"
} | {
"audio": "0.000032",
"completion": "0.00001",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000025",
"web_search": null
} | {
"context_length": 128000,
"is_moderated": true,
"max_completion_tokens": 16384
} | null | [
"frequency_penalty",
"logit_bias",
"logprobs",
"max_tokens",
"presence_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-audio/endpoints"
} | |
openai/gpt-audio-mini | openai/gpt-audio-mini | OpenAI: GPT Audio Mini | 1,768,859,419 | A cost-efficient version of GPT Audio. The new snapshot features an upgraded decoder for more natural sounding voices and maintains better voice consistency. Input is priced at $0.60 per million... | 128,000 | {
"input_modalities": [
"text",
"audio"
],
"instruct_type": null,
"modality": "text+audio->text+audio",
"output_modalities": [
"text",
"audio"
],
"tokenizer": "GPT"
} | {
"audio": "0.0000006",
"completion": "0.0000024",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000006",
"web_search": null
} | {
"context_length": 128000,
"is_moderated": true,
"max_completion_tokens": 16384
} | null | [
"frequency_penalty",
"logit_bias",
"logprobs",
"max_tokens",
"presence_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-audio-mini/endpoints"
} | |
z-ai/glm-4.7-flash | z-ai/glm-4.7-flash-20260119 | zai-org/GLM-4.7-Flash | Z.ai: GLM 4.7 Flash | 1,768,833,913 | As a 30B-class SOTA model, GLM-4.7-Flash offers a new option that balances performance and efficiency. It is further optimized for agentic coding use cases, strengthening coding capabilities, long-horizon task planning,... | 202,752 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000004",
"image": null,
"input_cache_read": "0.0000000100000002",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000006",
"web_search": null
} | {
"context_length": 202752,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/z-ai/glm-4.7-flash-20260119/endpoints"
} |
openai/gpt-5.2-codex | openai/gpt-5.2-codex-20260114 | OpenAI: GPT-5.2-Codex | 1,768,409,315 | GPT-5.2-Codex is an upgraded version of GPT-5.1-Codex optimized for software engineering and coding workflows. It is designed for both interactive development sessions and long, independent execution of complex engineering tasks.... | 400,000 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.000014",
"image": null,
"input_cache_read": "0.000000175",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000175",
"web_search": null
} | {
"context_length": 400000,
"is_moderated": false,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.2-codex-20260114/endpoints"
} | |
allenai/olmo-3.1-32b-instruct | allenai/olmo-3.1-32b-instruct-20251215 | allenai/Olmo-3.1-32B-Instruct | AllenAI: Olmo 3.1 32B Instruct | 1,767,728,554 | Olmo 3.1 32B Instruct is a large-scale, 32-billion-parameter instruction-tuned language model engineered for high-performance conversational AI, multi-turn dialogue, and practical instruction following. As part of the Olmo 3.1 family, this... | 65,536 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000006",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000002",
"web_search": null
} | {
"context_length": 65536,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"max_tokens",
"min_p",
"presence_penalty",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.6,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/allenai/olmo-3.1-32b-instruct-20251215/endpoints"
} |
bytedance-seed/seed-1.6-flash | bytedance-seed/seed-1.6-flash-20250625 | ByteDance Seed: Seed 1.6 Flash | 1,766,505,011 | Seed 1.6 Flash is an ultra-fast multimodal deep thinking model by ByteDance Seed, supporting both text and visual understanding. It features a 256k context window and can generate outputs of... | 262,144 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000003",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000000075",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/bytedance-seed/seed-1.6-flash-20250625/endpoints"
} | |
bytedance-seed/seed-1.6 | bytedance-seed/seed-1.6-20250625 | ByteDance Seed: Seed 1.6 | 1,766,504,997 | Seed 1.6 is a general-purpose model released by the ByteDance Seed team. It incorporates multimodal capabilities and adaptive deep thinking with a 256K context window. | 262,144 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.000002",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000025",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/bytedance-seed/seed-1.6-20250625/endpoints"
} | |
minimax/minimax-m2.1 | minimax/minimax-m2.1 | MiniMaxAI/MiniMax-M2.1 | MiniMax: MiniMax M2.1 | 1,766,454,997 | MiniMax-M2.1 is a lightweight, state-of-the-art large language model optimized for coding, agentic workflows, and modern application development. With only 10 billion activated parameters, it delivers a major jump in real-world... | 196,608 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000095",
"image": null,
"input_cache_read": "0.00000003",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000029",
"web_search": null
} | {
"context_length": 196608,
"is_moderated": false,
"max_completion_tokens": 196608
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.9
} | null | null | {
"details": "/api/v1/models/minimax/minimax-m2.1/endpoints"
} |
z-ai/glm-4.7 | z-ai/glm-4.7-20251222 | zai-org/GLM-4.7 | Z.ai: GLM 4.7 | 1,766,378,014 | GLM-4.7 is Z.ai’s latest flagship model, featuring upgrades in two key areas: enhanced programming capabilities and more stable multi-step reasoning/execution. It demonstrates significant improvements in executing complex agent tasks while... | 202,752 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000175",
"image": null,
"input_cache_read": "0.000000195",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000039",
"web_search": null
} | {
"context_length": 202752,
"is_moderated": false,
"max_completion_tokens": 65535
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/z-ai/glm-4.7-20251222/endpoints"
} |
google/gemini-3-flash-preview | google/gemini-3-flash-preview-20251217 | Google: Gemini 3 Flash Preview | 1,765,987,078 | Gemini 3 Flash Preview is a high speed, high value thinking model designed for agentic workflows, multi turn chat, and coding assistance. It delivers near Pro level reasoning and tool... | 1,048,576 | {
"input_modalities": [
"text",
"image",
"file",
"audio",
"video"
],
"instruct_type": null,
"modality": "text+image+file+audio+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Gemini"
} | {
"audio": "0.000001",
"completion": "0.000003",
"image": "0.0000005",
"input_cache_read": "0.00000005",
"input_cache_write": "0.00000008333333333333334",
"internal_reasoning": "0.000003",
"prompt": "0.0000005",
"web_search": null
} | {
"context_length": 1048576,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/google/gemini-3-flash-preview-20251217/endpoints"
} | |
mistralai/mistral-small-creative | mistralai/mistral-small-creative-20251216 | null | Mistral: Mistral Small Creative | 1,765,908,653 | Mistral Small Creative is an experimental small model designed for creative writing, narrative generation, roleplay and character-driven dialogue, general-purpose instruction following, and conversational agents. | 32,768 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Mistral"
} | {
"audio": null,
"completion": "0.0000003",
"image": null,
"input_cache_read": "0.00000001",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000001",
"web_search": null
} | {
"context_length": 32768,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.3,
"top_k": null,
"top_p": 0.95
} | null | 2026-04-30 | {
"details": "/api/v1/models/mistralai/mistral-small-creative-20251216/endpoints"
} |
xiaomi/mimo-v2-flash | xiaomi/mimo-v2-flash-20251210 | XiaomiMiMo/MiMo-V2-Flash | Xiaomi: MiMo-V2-Flash | 1,765,731,308 | MiMo-V2-Flash is an open-source foundation language model developed by Xiaomi. It is a Mixture-of-Experts model with 309B total parameters and 15B active parameters, adopting hybrid attention architecture. MiMo-V2-Flash supports a... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000029",
"image": null,
"input_cache_read": "0.000000045",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000009",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/xiaomi/mimo-v2-flash-20251210/endpoints"
} |
nvidia/nemotron-3-nano-30b-a3b:free | nvidia/nemotron-3-nano-30b-a3b | nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16 | NVIDIA: Nemotron 3 Nano 30B A3B (free) | 1,765,731,275 | NVIDIA Nemotron 3 Nano 30B A3B is a small language MoE model with highest compute efficiency and accuracy for developers to build specialized agentic AI systems. The model is fully... | 256,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0",
"web_search": null
} | {
"context_length": 256000,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"seed",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/nvidia/nemotron-3-nano-30b-a3b/endpoints"
} |
nvidia/nemotron-3-nano-30b-a3b | nvidia/nemotron-3-nano-30b-a3b | nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16 | NVIDIA: Nemotron 3 Nano 30B A3B | 1,765,731,275 | NVIDIA Nemotron 3 Nano 30B A3B is a small language MoE model with highest compute efficiency and accuracy for developers to build specialized agentic AI systems. The model is fully... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000002",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000005",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/nvidia/nemotron-3-nano-30b-a3b/endpoints"
} |
openai/gpt-5.2-chat | openai/gpt-5.2-chat-20251211 | OpenAI: GPT-5.2 Chat | 1,765,389,783 | GPT-5.2 Chat (AKA Instant) is the fast, lightweight member of the 5.2 family, optimized for low-latency chat while retaining strong general intelligence. It uses adaptive reasoning to selectively “think” on... | 128,000 | {
"input_modalities": [
"file",
"image",
"text"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.000014",
"image": null,
"input_cache_read": "0.000000175",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000175",
"web_search": null
} | {
"context_length": 128000,
"is_moderated": false,
"max_completion_tokens": 32000
} | null | [
"max_completion_tokens",
"max_tokens",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.2-chat-20251211/endpoints"
} | |
openai/gpt-5.2-pro | openai/gpt-5.2-pro-20251211 | OpenAI: GPT-5.2 Pro | 1,765,389,780 | GPT-5.2 Pro is OpenAI’s most advanced model, offering major improvements in agentic coding and long context performance over GPT-5 Pro. It is optimized for complex tasks that require step-by-step reasoning,... | 400,000 | {
"input_modalities": [
"image",
"text",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.000168",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000021",
"web_search": "0.01"
} | {
"context_length": 400000,
"is_moderated": true,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.2-pro-20251211/endpoints"
} | |
openai/gpt-5.2 | openai/gpt-5.2-20251211 | OpenAI: GPT-5.2 | 1,765,389,775 | GPT-5.2 is the latest frontier-grade model in the GPT-5 series, offering stronger agentic and long context perfomance compared to GPT-5.1. It uses adaptive reasoning to allocate computation dynamically, responding quickly... | 400,000 | {
"input_modalities": [
"file",
"image",
"text"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.000014",
"image": null,
"input_cache_read": "0.000000175",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000175",
"web_search": null
} | {
"context_length": 400000,
"is_moderated": false,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.2-20251211/endpoints"
} | |
mistralai/devstral-2512 | mistralai/devstral-2512 | mistralai/Devstral-2-123B-Instruct-2512 | Mistral: Devstral 2 2512 | 1,765,285,419 | Devstral 2 is a state-of-the-art open-source model by Mistral AI specializing in agentic coding. It is a 123B-parameter dense transformer model supporting a 256K context window. Devstral 2 supports exploring... | 262,144 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Mistral"
} | {
"audio": null,
"completion": "0.000002",
"image": null,
"input_cache_read": "0.00000004",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000004",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"max_tokens",
"presence_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.3,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/mistralai/devstral-2512/endpoints"
} |
relace/relace-search | relace/relace-search-20251208 | null | Relace: Relace Search | 1,765,213,560 | The relace-search model uses 4-12 `view_file` and `grep` tools in parallel to explore a codebase and return relevant files to the user request. In contrast to RAG, relace-search performs agentic... | 256,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.000003",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000001",
"web_search": null
} | {
"context_length": 256000,
"is_moderated": false,
"max_completion_tokens": 128000
} | null | [
"max_tokens",
"seed",
"stop",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/relace/relace-search-20251208/endpoints"
} |
z-ai/glm-4.6v | z-ai/glm-4.6-20251208 | zai-org/GLM-4.6V | Z.ai: GLM 4.6V | 1,765,207,462 | GLM-4.6V is a large multimodal model designed for high-fidelity visual understanding and long-context reasoning across images, documents, and mixed media. It supports up to 128K tokens, processes complex page layouts... | 131,072 | {
"input_modalities": [
"image",
"text",
"video"
],
"instruct_type": null,
"modality": "text+image+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000009",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000003",
"web_search": null
} | {
"context_length": 131072,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.8,
"top_k": null,
"top_p": 0.6
} | null | null | {
"details": "/api/v1/models/z-ai/glm-4.6-20251208/endpoints"
} |
nex-agi/deepseek-v3.1-nex-n1 | nex-agi/deepseek-v3.1-nex-n1 | nex-agi/DeepSeek-V3.1-Nex-N1 | Nex AGI: DeepSeek V3.1 Nex N1 | 1,765,204,393 | DeepSeek V3.1 Nex-N1 is the flagship release of the Nex-N1 series — a post-trained model designed to highlight agent autonomy, tool use, and real-world productivity. Nex-N1 demonstrates competitive performance across... | 131,072 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "DeepSeek"
} | {
"audio": null,
"completion": "0.0000005",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000000135",
"web_search": null
} | {
"context_length": 131072,
"is_moderated": false,
"max_completion_tokens": 163840
} | null | [
"frequency_penalty",
"response_format",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/nex-agi/deepseek-v3.1-nex-n1/endpoints"
} |
essentialai/rnj-1-instruct | essentialai/rnj-1-instruct | EssentialAI/rnj-1-instruct | EssentialAI: Rnj 1 Instruct | 1,765,094,847 | Rnj-1 is an 8B-parameter, dense, open-weight model family developed by Essential AI and trained from scratch with a focus on programming, math, and scientific reasoning. The model demonstrates strong performance... | 32,768 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000015",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000015",
"web_search": null
} | {
"context_length": 32768,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"repetition_penalty",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/essentialai/rnj-1-instruct/endpoints"
} |
openrouter/bodybuilder | openrouter/bodybuilder | Body Builder (beta) | 1,764,903,653 | Transform your natural language requests into structured OpenRouter API request objects. Describe what you want to accomplish with AI models, and Body Builder will construct the appropriate API calls. Example:... | 128,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Router"
} | {
"audio": null,
"completion": "-1",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "-1",
"web_search": null
} | {
"context_length": null,
"is_moderated": false,
"max_completion_tokens": null
} | null | [] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openrouter/bodybuilder/endpoints"
} | |
openai/gpt-5.1-codex-max | openai/gpt-5.1-codex-max-20251204 | OpenAI: GPT-5.1-Codex-Max | 1,764,878,934 | GPT-5.1-Codex-Max is OpenAI’s latest agentic coding model, designed for long-running, high-context software development tasks. It is based on an updated version of the 5.1 reasoning stack and trained on agentic... | 400,000 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.00001",
"image": null,
"input_cache_read": "0.000000125",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000125",
"web_search": "0.01"
} | {
"context_length": 400000,
"is_moderated": true,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.1-codex-max-20251204/endpoints"
} | |
amazon/nova-2-lite-v1 | amazon/nova-2-lite-v1 | Amazon: Nova 2 Lite | 1,764,696,672 | Nova 2 Lite is a fast, cost-effective reasoning model for everyday workloads that can process text, images, and videos to generate text. Nova 2 Lite demonstrates standout capabilities in processing... | 1,000,000 | {
"input_modalities": [
"text",
"image",
"video",
"file"
],
"instruct_type": null,
"modality": "text+image+file+video->text",
"output_modalities": [
"text"
],
"tokenizer": "Nova"
} | {
"audio": null,
"completion": "0.0000025",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000003",
"web_search": null
} | {
"context_length": 1000000,
"is_moderated": true,
"max_completion_tokens": 65535
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"stop",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/amazon/nova-2-lite-v1/endpoints"
} | |
mistralai/ministral-14b-2512 | mistralai/ministral-14b-2512 | mistralai/Ministral-3-14B-Instruct-2512 | Mistral: Ministral 3 14B 2512 | 1,764,681,735 | The largest model in the Ministral 3 family, Ministral 3 14B offers frontier capabilities and performance comparable to its larger Mistral Small 3.2 24B counterpart. A powerful and efficient language... | 262,144 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Mistral"
} | {
"audio": null,
"completion": "0.0000002",
"image": null,
"input_cache_read": "0.00000002",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000002",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"logprobs",
"max_tokens",
"presence_penalty",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.3,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/mistralai/ministral-14b-2512/endpoints"
} |
mistralai/ministral-8b-2512 | mistralai/ministral-8b-2512 | mistralai/Ministral-3-8B-Instruct-2512 | Mistral: Ministral 3 8B 2512 | 1,764,681,654 | A balanced model in the Ministral 3 family, Ministral 3 8B is a powerful, efficient tiny language model with vision capabilities. | 262,144 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Mistral"
} | {
"audio": null,
"completion": "0.00000015",
"image": null,
"input_cache_read": "0.000000015",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000015",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"logprobs",
"max_tokens",
"presence_penalty",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.3,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/mistralai/ministral-8b-2512/endpoints"
} |
mistralai/ministral-3b-2512 | mistralai/ministral-3b-2512 | mistralai/Ministral-3-3B-Instruct-2512 | Mistral: Ministral 3 3B 2512 | 1,764,681,560 | The smallest model in the Ministral 3 family, Ministral 3 3B is a powerful, efficient tiny language model with vision capabilities. | 131,072 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Mistral"
} | {
"audio": null,
"completion": "0.0000001",
"image": null,
"input_cache_read": "0.00000001",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000001",
"web_search": null
} | {
"context_length": 131072,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"logprobs",
"max_tokens",
"presence_penalty",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.3,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/mistralai/ministral-3b-2512/endpoints"
} |
mistralai/mistral-large-2512 | mistralai/mistral-large-2512 | Mistral: Mistral Large 3 2512 | 1,764,624,472 | Mistral Large 3 2512 is Mistral’s most capable model to date, featuring a sparse mixture-of-experts architecture with 41B active parameters (675B total), and released under the Apache 2.0 license. | 262,144 | {
"input_modalities": [
"text",
"image"
],
"instruct_type": null,
"modality": "text+image->text",
"output_modalities": [
"text"
],
"tokenizer": "Mistral"
} | {
"audio": null,
"completion": "0.0000015",
"image": null,
"input_cache_read": "0.00000005",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000005",
"web_search": null
} | {
"context_length": 262144,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"max_tokens",
"presence_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.0645,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/mistralai/mistral-large-2512/endpoints"
} | |
arcee-ai/trinity-mini | arcee-ai/trinity-mini-20251201 | arcee-ai/Trinity-Mini | Arcee AI: Trinity Mini | 1,764,601,720 | Trinity Mini is a 26B-parameter (3B active) sparse mixture-of-experts language model featuring 128 experts with 8 active per token. Engineered for efficient reasoning over long contexts (131k) with robust function... | 131,072 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000015",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.000000045",
"web_search": null
} | {
"context_length": 131072,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.15,
"top_k": null,
"top_p": 0.75
} | null | null | {
"details": "/api/v1/models/arcee-ai/trinity-mini-20251201/endpoints"
} |
deepseek/deepseek-v3.2-speciale | deepseek/deepseek-v3.2-speciale-20251201 | deepseek-ai/DeepSeek-V3.2-Speciale | DeepSeek: DeepSeek V3.2 Speciale | 1,764,594,837 | DeepSeek-V3.2-Speciale is a high-compute variant of DeepSeek-V3.2 optimized for maximum reasoning and agentic performance. It builds on DeepSeek Sparse Attention (DSA) for efficient long-context processing, then scales post-training reinforcement learning... | 163,840 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "DeepSeek"
} | {
"audio": null,
"completion": "0.0000012",
"image": null,
"input_cache_read": "0.0000002",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000004",
"web_search": null
} | {
"context_length": 163840,
"is_moderated": false,
"max_completion_tokens": 163840
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/deepseek/deepseek-v3.2-speciale-20251201/endpoints"
} |
deepseek/deepseek-v3.2 | deepseek/deepseek-v3.2-20251201 | deepseek-ai/DeepSeek-V3.2 | DeepSeek: DeepSeek V3.2 | 1,764,594,642 | DeepSeek-V3.2 is a large language model designed to harmonize high computational efficiency with strong reasoning and agentic tool-use performance. It introduces DeepSeek Sparse Attention (DSA), a fine-grained sparse attention mechanism... | 163,840 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "DeepSeek"
} | {
"audio": null,
"completion": "0.00000038",
"image": null,
"input_cache_read": "0.00000013",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000026",
"web_search": null
} | {
"context_length": 163840,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"logprobs",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 1,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/deepseek/deepseek-v3.2-20251201/endpoints"
} |
prime-intellect/intellect-3 | prime-intellect/intellect-3-20251126 | PrimeIntellect/INTELLECT-3-FP8 | Prime Intellect: INTELLECT-3 | 1,764,212,534 | INTELLECT-3 is a 106B-parameter Mixture-of-Experts model (12B active) post-trained from GLM-4.5-Air-Base using supervised fine-tuning (SFT) followed by large-scale reinforcement learning (RL). It offers state-of-the-art performance for its size across math,... | 131,072 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000011",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000002",
"web_search": null
} | {
"context_length": 131072,
"is_moderated": false,
"max_completion_tokens": 131072
} | null | [
"frequency_penalty",
"include_reasoning",
"max_tokens",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.6,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/prime-intellect/intellect-3-20251126/endpoints"
} |
anthropic/claude-opus-4.5 | anthropic/claude-4.5-opus-20251124 | Anthropic: Claude Opus 4.5 | 1,764,010,580 | Claude Opus 4.5 is Anthropic’s frontier reasoning model optimized for complex software engineering, agentic workflows, and long-horizon computer use. It offers strong multimodal capabilities, competitive performance across real-world coding and... | 200,000 | {
"input_modalities": [
"file",
"image",
"text"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "Claude"
} | {
"audio": null,
"completion": "0.000025",
"image": null,
"input_cache_read": "0.0000005",
"input_cache_write": "0.00000625",
"internal_reasoning": null,
"prompt": "0.000005",
"web_search": "0.01"
} | {
"context_length": 200000,
"is_moderated": true,
"max_completion_tokens": 64000
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"stop",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_k",
"verbosity"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/anthropic/claude-4.5-opus-20251124/endpoints"
} | |
allenai/olmo-3-32b-think | allenai/olmo-3-32b-think-20251121 | allenai/Olmo-3-32B-Think | AllenAI: Olmo 3 32B Think | 1,763,758,276 | Olmo 3 32B Think is a large-scale, 32-billion-parameter model purpose-built for deep reasoning, complex logic chains and advanced instruction-following scenarios. Its capacity enables strong performance on demanding evaluation tasks and... | 65,536 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.0000005",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000015",
"web_search": null
} | {
"context_length": 65536,
"is_moderated": false,
"max_completion_tokens": 65536
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.6,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/allenai/olmo-3-32b-think-20251121/endpoints"
} |
google/gemini-3-pro-image-preview | google/gemini-3-pro-image-preview-20251120 | Google: Nano Banana Pro (Gemini 3 Pro Image Preview) | 1,763,653,797 | Nano Banana Pro is Google’s most advanced image-generation and editing model, built on Gemini 3 Pro. It extends the original Nano Banana with significantly improved multimodal reasoning, real-world grounding, and... | 65,536 | {
"input_modalities": [
"image",
"text"
],
"instruct_type": null,
"modality": "text+image->text+image",
"output_modalities": [
"image",
"text"
],
"tokenizer": "Gemini"
} | {
"audio": "0.000002",
"completion": "0.000012",
"image": "0.000002",
"input_cache_read": "0.0000002",
"input_cache_write": "0.000000375",
"internal_reasoning": "0.000012",
"prompt": "0.000002",
"web_search": null
} | {
"context_length": 65536,
"is_moderated": false,
"max_completion_tokens": 32768
} | null | [
"include_reasoning",
"max_tokens",
"reasoning",
"response_format",
"seed",
"stop",
"structured_outputs",
"temperature",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/google/gemini-3-pro-image-preview-20251120/endpoints"
} | |
x-ai/grok-4.1-fast | x-ai/grok-4.1-fast | xAI: Grok 4.1 Fast | 1,763,587,502 | Grok 4.1 Fast is xAI's best agentic tool calling model that shines in real-world use cases like customer support and deep research. 2M context window. Reasoning can be enabled/disabled using... | 2,000,000 | {
"input_modalities": [
"text",
"image",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "Grok"
} | {
"audio": null,
"completion": "0.0000005",
"image": null,
"input_cache_read": "0.00000005",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.0000002",
"web_search": "0.005"
} | {
"context_length": 2000000,
"is_moderated": false,
"max_completion_tokens": 30000
} | null | [
"include_reasoning",
"logprobs",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"temperature",
"tool_choice",
"tools",
"top_logprobs",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": 0.7,
"top_k": null,
"top_p": 0.95
} | null | null | {
"details": "/api/v1/models/x-ai/grok-4.1-fast/endpoints"
} | |
deepcogito/cogito-v2.1-671b | deepcogito/cogito-v2.1-671b-20251118 | Deep Cogito: Cogito v2.1 671B | 1,763,071,233 | Cogito v2.1 671B MoE represents one of the strongest open models globally, matching performance of frontier closed and open models. This model is trained using self play with reinforcement learning... | 128,000 | {
"input_modalities": [
"text"
],
"instruct_type": null,
"modality": "text->text",
"output_modalities": [
"text"
],
"tokenizer": "Other"
} | {
"audio": null,
"completion": "0.00000125",
"image": null,
"input_cache_read": null,
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000125",
"web_search": null
} | {
"context_length": 128000,
"is_moderated": false,
"max_completion_tokens": null
} | null | [
"frequency_penalty",
"include_reasoning",
"logit_bias",
"max_tokens",
"min_p",
"presence_penalty",
"reasoning",
"repetition_penalty",
"response_format",
"stop",
"structured_outputs",
"temperature",
"top_k",
"top_p"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/deepcogito/cogito-v2.1-671b-20251118/endpoints"
} | |
openai/gpt-5.1 | openai/gpt-5.1-20251113 | OpenAI: GPT-5.1 | 1,763,060,305 | GPT-5.1 is the latest frontier-grade model in the GPT-5 series, offering stronger general-purpose reasoning, improved instruction adherence, and a more natural conversational style compared to GPT-5. It uses adaptive reasoning... | 400,000 | {
"input_modalities": [
"image",
"text",
"file"
],
"instruct_type": null,
"modality": "text+image+file->text",
"output_modalities": [
"text"
],
"tokenizer": "GPT"
} | {
"audio": null,
"completion": "0.00001",
"image": null,
"input_cache_read": "0.00000013",
"input_cache_write": null,
"internal_reasoning": null,
"prompt": "0.00000125",
"web_search": null
} | {
"context_length": 400000,
"is_moderated": false,
"max_completion_tokens": 128000
} | null | [
"include_reasoning",
"max_completion_tokens",
"max_tokens",
"reasoning",
"response_format",
"seed",
"structured_outputs",
"tool_choice",
"tools"
] | {
"frequency_penalty": null,
"presence_penalty": null,
"repetition_penalty": null,
"temperature": null,
"top_k": null,
"top_p": null
} | null | null | {
"details": "/api/v1/models/openai/gpt-5.1-20251113/endpoints"
} |
End of preview. Expand in Data Studio
README.md exists but content is empty.
- Downloads last month
- 5