id
stringlengths
9
61
canonical_slug
stringlengths
11
50
hugging_face_id
stringlengths
0
56
name
stringlengths
8
54
created
int64
1.69B
1.78B
description
stringlengths
67
330
context_length
int64
2.82k
2M
architecture
dict
pricing
dict
top_provider
dict
per_request_limits
null
supported_parameters
listlengths
0
22
default_parameters
dict
knowledge_cutoff
stringclasses
26 values
expiration_date
stringclasses
4 values
links
dict
x-ai/grok-3-mini
x-ai/grok-3-mini
xAI: Grok 3 Mini
1,749,583,245
A lightweight model that thinks before responding. Fast, smart, and great for logic-based tasks that do not require deep domain knowledge. The raw thinking traces are accessible.
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Grok" }
{ "audio": null, "completion": "0.0000005", "image": null, "input_cache_read": "0.000000075", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000003", "web_search": "0.005" }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "include_reasoning", "logprobs", "max_tokens", "reasoning", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-02-28
null
{ "details": "/api/v1/models/x-ai/grok-3-mini/endpoints" }
x-ai/grok-3
x-ai/grok-3
xAI: Grok 3
1,749,582,908
Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in...
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Grok" }
{ "audio": null, "completion": "0.000015", "image": null, "input_cache_read": "0.00000075", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000003", "web_search": "0.005" }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "logprobs", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-02-28
null
{ "details": "/api/v1/models/x-ai/grok-3/endpoints" }
google/gemini-2.5-pro-preview
google/gemini-2.5-pro-preview-06-05
Google: Gemini 2.5 Pro Preview 06-05
1,749,137,257
Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy...
1,048,576
{ "input_modalities": [ "file", "image", "text", "audio" ], "instruct_type": null, "modality": "text+image+file+audio->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": "0.00000125", "completion": "0.00001", "image": "0.00000125", "input_cache_read": "0.000000125", "input_cache_write": "0.000000375", "internal_reasoning": "0.00001", "prompt": "0.00000125", "web_search": null }
{ "context_length": 1048576, "is_moderated": false, "max_completion_tokens": 65536 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-01-31
null
{ "details": "/api/v1/models/google/gemini-2.5-pro-preview-06-05/endpoints" }
deepseek/deepseek-r1-0528
deepseek/deepseek-r1-0528
deepseek-ai/DeepSeek-R1-0528
DeepSeek: R1 0528
1,748,455,170
May 28th update to the [original DeepSeek R1](/deepseek/deepseek-r1) Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active...
163,840
{ "input_modalities": [ "text" ], "instruct_type": "deepseek-r1", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "DeepSeek" }
{ "audio": null, "completion": "0.00000215", "image": null, "input_cache_read": "0.00000035", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000005", "web_search": null }
{ "context_length": 163840, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "include_reasoning", "logit_bias", "max_tokens", "min_p", "presence_penalty", "reasoning", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/deepseek/deepseek-r1-0528/endpoints" }
anthropic/claude-opus-4
anthropic/claude-4-opus-20250522
Anthropic: Claude Opus 4
1,747,931,245
Claude Opus 4 is benchmarked as the world’s best coding model, at time of release, bringing sustained performance on complex, long-running tasks and agent workflows. It sets new benchmarks in...
200,000
{ "input_modalities": [ "image", "text", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "Claude" }
{ "audio": null, "completion": "0.000075", "image": null, "input_cache_read": "0.0000015", "input_cache_write": "0.00001875", "internal_reasoning": null, "prompt": "0.000015", "web_search": "0.01" }
{ "context_length": 200000, "is_moderated": false, "max_completion_tokens": 32000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-01-31
null
{ "details": "/api/v1/models/anthropic/claude-4-opus-20250522/endpoints" }
anthropic/claude-sonnet-4
anthropic/claude-4-sonnet-20250522
Anthropic: Claude Sonnet 4
1,747,930,371
Claude Sonnet 4 significantly enhances the capabilities of its predecessor, Sonnet 3.7, excelling in both coding and reasoning tasks with improved precision and controllability. Achieving state-of-the-art performance on SWE-bench (72.7%),...
1,000,000
{ "input_modalities": [ "image", "text", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "Claude" }
{ "audio": null, "completion": "0.000015", "image": null, "input_cache_read": "0.0000003", "input_cache_write": "0.00000375", "internal_reasoning": null, "prompt": "0.000003", "web_search": "0.01" }
{ "context_length": 1000000, "is_moderated": false, "max_completion_tokens": 64000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-01-31
null
{ "details": "/api/v1/models/anthropic/claude-4-sonnet-20250522/endpoints" }
google/gemma-3n-e4b-it:free
google/gemma-3n-e4b-it
google/gemma-3n-E4B-it
Google: Gemma 3n 4B (free)
1,747,776,824
Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks...
8,192
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0", "web_search": null }
{ "context_length": 8192, "is_moderated": false, "max_completion_tokens": 2048 }
null
[ "max_tokens", "response_format", "seed", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/google/gemma-3n-e4b-it/endpoints" }
google/gemma-3n-e4b-it
google/gemma-3n-e4b-it
google/gemma-3n-E4B-it
Google: Gemma 3n 4B
1,747,776,824
Gemma 3n E4B-it is optimized for efficient execution on mobile and low-resource devices, such as phones, laptops, and tablets. It supports multimodal inputs—including text, visual data, and audio—enabling diverse tasks...
32,768
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.00000004", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000002", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/google/gemma-3n-e4b-it/endpoints" }
mistralai/mistral-medium-3
mistralai/mistral-medium-3
Mistral: Mistral Medium 3
1,746,627,341
Mistral Medium 3 is a high-performance enterprise-grade language model designed to deliver frontier-level capabilities at significantly reduced operational cost. It balances state-of-the-art reasoning and multimodal performance with 8× lower cost...
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Mistral" }
{ "audio": null, "completion": "0.000002", "image": null, "input_cache_read": "0.00000004", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000004", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": 0.3, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/mistralai/mistral-medium-3/endpoints" }
google/gemini-2.5-pro-preview-05-06
google/gemini-2.5-pro-preview-03-25
Google: Gemini 2.5 Pro Preview 05-06
1,746,578,513
Gemini 2.5 Pro is Google’s state-of-the-art AI model designed for advanced reasoning, coding, mathematics, and scientific tasks. It employs “thinking” capabilities, enabling it to reason through responses with enhanced accuracy...
1,048,576
{ "input_modalities": [ "text", "image", "file", "audio", "video" ], "instruct_type": null, "modality": "text+image+file+audio+video->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": "0.00000125", "completion": "0.00001", "image": "0.00000125", "input_cache_read": "0.000000125", "input_cache_write": "0.000000375", "internal_reasoning": "0.00001", "prompt": "0.00000125", "web_search": null }
{ "context_length": 1048576, "is_moderated": false, "max_completion_tokens": 65535 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-01-31
null
{ "details": "/api/v1/models/google/gemini-2.5-pro-preview-03-25/endpoints" }
arcee-ai/spotlight
arcee-ai/spotlight
Arcee AI: Spotlight
1,746,481,552
Spotlight is a 7‑billion‑parameter vision‑language model derived from Qwen 2.5‑VL and fine‑tuned by Arcee AI for tight image‑text grounding tasks. It offers a 32 k‑token context window, enabling rich multimodal...
131,072
{ "input_modalities": [ "image", "text" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.00000018", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000018", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 65537 }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/arcee-ai/spotlight/endpoints" }
arcee-ai/maestro-reasoning
arcee-ai/maestro-reasoning
Arcee AI: Maestro Reasoning
1,746,481,269
Maestro Reasoning is Arcee's flagship analysis model: a 32 B‑parameter derivative of Qwen 2.5‑32 B tuned with DPO and chain‑of‑thought RL for step‑by‑step logic. Compared to the earlier 7 B...
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000033", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000009", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 32000 }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/arcee-ai/maestro-reasoning/endpoints" }
arcee-ai/virtuoso-large
arcee-ai/virtuoso-large
Arcee AI: Virtuoso Large
1,746,478,885
Virtuoso‑Large is Arcee's top‑tier general‑purpose LLM at 72 B parameters, tuned to tackle cross‑domain reasoning, creative writing and enterprise QA. Unlike many 70 B peers, it retains the 128 k...
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000012", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000075", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 64000 }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/arcee-ai/virtuoso-large/endpoints" }
arcee-ai/coder-large
arcee-ai/coder-large
Arcee AI: Coder Large
1,746,478,663
Coder‑Large is a 32 B‑parameter offspring of Qwen 2.5‑Instruct that has been further trained on permissively‑licensed GitHub, CodeSearchNet and synthetic bug‑fix corpora. It supports a 32k context window, enabling multi‑file...
32,768
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000008", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000005", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/arcee-ai/coder-large/endpoints" }
meta-llama/llama-guard-4-12b
meta-llama/llama-guard-4-12b
meta-llama/Llama-Guard-4-12B
Meta: Llama Guard 4 12B
1,745,975,193
Llama Guard 4 is a Llama 4 Scout-derived multimodal pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM...
163,840
{ "input_modalities": [ "image", "text" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.00000018", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000018", "web_search": null }
{ "context_length": 163840, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/meta-llama/llama-guard-4-12b/endpoints" }
qwen/qwen3-30b-a3b
qwen/qwen3-30b-a3b-04-28
Qwen/Qwen3-30B-A3B
Qwen: Qwen3 30B A3B
1,745,878,604
Qwen3, the latest generation in the Qwen large language model series, features both dense and mixture-of-experts (MoE) architectures to excel in reasoning, multilingual support, and advanced agent tasks. Its unique...
40,960
{ "input_modalities": [ "text" ], "instruct_type": "qwen3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen3" }
{ "audio": null, "completion": "0.00000028", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000008", "web_search": null }
{ "context_length": 40960, "is_moderated": false, "max_completion_tokens": 40960 }
null
[ "frequency_penalty", "include_reasoning", "logprobs", "max_tokens", "min_p", "presence_penalty", "reasoning", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen3-30b-a3b-04-28/endpoints" }
qwen/qwen3-8b
qwen/qwen3-8b-04-28
Qwen/Qwen3-8B
Qwen: Qwen3 8B
1,745,876,632
Qwen3-8B is a dense 8.2B parameter causal language model from the Qwen3 series, designed for both reasoning-heavy tasks and efficient dialogue. It supports seamless switching between "thinking" mode for math,...
40,960
{ "input_modalities": [ "text" ], "instruct_type": "qwen3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen3" }
{ "audio": null, "completion": "0.0000004", "image": null, "input_cache_read": "0.00000005", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000005", "web_search": null }
{ "context_length": 40960, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "frequency_penalty", "include_reasoning", "logit_bias", "max_tokens", "min_p", "presence_penalty", "reasoning", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": 0.6, "top_k": 20, "top_p": 0.95 }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen3-8b-04-28/endpoints" }
qwen/qwen3-14b
qwen/qwen3-14b-04-28
Qwen/Qwen3-14B
Qwen: Qwen3 14B
1,745,876,478
Qwen3-14B is a dense 14.8B parameter causal language model from the Qwen3 series, designed for both complex reasoning and efficient dialogue. It supports seamless switching between a "thinking" mode for...
40,960
{ "input_modalities": [ "text" ], "instruct_type": "qwen3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen3" }
{ "audio": null, "completion": "0.00000024", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000006", "web_search": null }
{ "context_length": 40960, "is_moderated": false, "max_completion_tokens": 40960 }
null
[ "frequency_penalty", "include_reasoning", "logprobs", "max_tokens", "min_p", "presence_penalty", "reasoning", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen3-14b-04-28/endpoints" }
qwen/qwen3-32b
qwen/qwen3-32b-04-28
Qwen/Qwen3-32B
Qwen: Qwen3 32B
1,745,875,945
Qwen3-32B is a dense 32.8B parameter causal language model from the Qwen3 series, optimized for both complex reasoning and efficient dialogue. It supports seamless switching between a "thinking" mode for...
40,960
{ "input_modalities": [ "text" ], "instruct_type": "qwen3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen3" }
{ "audio": null, "completion": "0.00000024", "image": null, "input_cache_read": "0.00000004", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000008", "web_search": null }
{ "context_length": 40960, "is_moderated": false, "max_completion_tokens": 40960 }
null
[ "frequency_penalty", "include_reasoning", "logit_bias", "max_tokens", "min_p", "presence_penalty", "reasoning", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen3-32b-04-28/endpoints" }
qwen/qwen3-235b-a22b
qwen/qwen3-235b-a22b-04-28
Qwen/Qwen3-235B-A22B
Qwen: Qwen3 235B A22B
1,745,875,757
Qwen3-235B-A22B is a 235B parameter mixture-of-experts (MoE) model developed by Qwen, activating 22B parameters per forward pass. It supports seamless switching between a "thinking" mode for complex reasoning, math, and...
131,072
{ "input_modalities": [ "text" ], "instruct_type": "qwen3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen3" }
{ "audio": null, "completion": "0.00000182", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000000455", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "include_reasoning", "max_tokens", "presence_penalty", "reasoning", "response_format", "seed", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen3-235b-a22b-04-28/endpoints" }
openai/o4-mini-high
openai/o4-mini-high-2025-04-16
OpenAI: o4 Mini High
1,744,824,212
OpenAI o4-mini-high is the same model as [o4-mini](/openai/o4-mini) with reasoning_effort set to high. OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining...
200,000
{ "input_modalities": [ "image", "text", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.0000044", "image": null, "input_cache_read": "0.000000275", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000011", "web_search": "0.01" }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 100000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "structured_outputs", "tool_choice", "tools" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/openai/o4-mini-high-2025-04-16/endpoints" }
openai/o3
openai/o3-2025-04-16
OpenAI: o3
1,744,823,457
o3 is a well-rounded and powerful model across domains. It sets a new standard for math, science, coding, and visual reasoning tasks. It also excels at technical writing and instruction-following....
200,000
{ "input_modalities": [ "image", "text", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.000008", "image": null, "input_cache_read": "0.0000005", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000002", "web_search": "0.01" }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 100000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "structured_outputs", "tool_choice", "tools" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/openai/o3-2025-04-16/endpoints" }
openai/o4-mini
openai/o4-mini-2025-04-16
OpenAI: o4 Mini
1,744,820,942
OpenAI o4-mini is a compact reasoning model in the o-series, optimized for fast, cost-efficient performance while retaining strong multimodal and agentic capabilities. It supports tool use and demonstrates competitive reasoning...
200,000
{ "input_modalities": [ "image", "text", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.0000044", "image": null, "input_cache_read": "0.000000275", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000011", "web_search": "0.01" }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 100000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "structured_outputs", "tool_choice", "tools" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/openai/o4-mini-2025-04-16/endpoints" }
openai/gpt-4.1
openai/gpt-4.1-2025-04-14
OpenAI: GPT-4.1
1,744,651,385
GPT-4.1 is a flagship large language model optimized for advanced instruction following, real-world software engineering, and long-context reasoning. It supports a 1 million token context window and outperforms GPT-4o and...
1,047,576
{ "input_modalities": [ "image", "text", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.000008", "image": null, "input_cache_read": "0.0000005", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000002", "web_search": null }
{ "context_length": 1047576, "is_moderated": false, "max_completion_tokens": null }
null
[ "max_completion_tokens", "max_tokens", "response_format", "seed", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/openai/gpt-4.1-2025-04-14/endpoints" }
openai/gpt-4.1-mini
openai/gpt-4.1-mini-2025-04-14
OpenAI: GPT-4.1 Mini
1,744,651,381
GPT-4.1 Mini is a mid-sized model delivering performance competitive with GPT-4o at substantially lower latency and cost. It retains a 1 million token context window and scores 45.1% on hard...
1,047,576
{ "input_modalities": [ "image", "text", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.0000016", "image": null, "input_cache_read": "0.0000001", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000004", "web_search": "0.01" }
{ "context_length": 1047576, "is_moderated": true, "max_completion_tokens": 32768 }
null
[ "max_completion_tokens", "max_tokens", "response_format", "seed", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/openai/gpt-4.1-mini-2025-04-14/endpoints" }
openai/gpt-4.1-nano
openai/gpt-4.1-nano-2025-04-14
OpenAI: GPT-4.1 Nano
1,744,651,369
For tasks that demand low latency, GPT‑4.1 nano is the fastest and cheapest model in the GPT-4.1 series. It delivers exceptional performance at a small size with its 1 million...
1,047,576
{ "input_modalities": [ "image", "text", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.0000004", "image": null, "input_cache_read": "0.000000025", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000001", "web_search": "0.01" }
{ "context_length": 1047576, "is_moderated": true, "max_completion_tokens": 32768 }
null
[ "max_completion_tokens", "max_tokens", "response_format", "seed", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/openai/gpt-4.1-nano-2025-04-14/endpoints" }
alfredpros/codellama-7b-instruct-solidity
alfredpros/codellama-7b-instruct-solidity
AlfredPros/CodeLlama-7b-Instruct-Solidity
AlfredPros: CodeLLaMa 7B Instruct Solidity
1,744,641,874
A finetuned 7 billion parameters Code LLaMA - Instruct model to generate Solidity smart contract using 4-bit QLoRA finetuning provided by PEFT library.
4,096
{ "input_modalities": [ "text" ], "instruct_type": "alpaca", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000012", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000008", "web_search": null }
{ "context_length": 4096, "is_moderated": false, "max_completion_tokens": 4096 }
null
[ "frequency_penalty", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-06-30
null
{ "details": "/api/v1/models/alfredpros/codellama-7b-instruct-solidity/endpoints" }
x-ai/grok-3-mini-beta
x-ai/grok-3-mini-beta
xAI: Grok 3 Mini Beta
1,744,240,195
Grok 3 Mini is a lightweight, smaller thinking model. Unlike traditional models that generate answers immediately, Grok 3 Mini thinks before responding. It’s ideal for reasoning-heavy tasks that don’t demand...
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Grok" }
{ "audio": null, "completion": "0.0000005", "image": null, "input_cache_read": "0.000000075", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000003", "web_search": "0.005" }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "include_reasoning", "logprobs", "max_tokens", "reasoning", "response_format", "seed", "stop", "temperature", "tool_choice", "tools", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-02-28
null
{ "details": "/api/v1/models/x-ai/grok-3-mini-beta/endpoints" }
x-ai/grok-3-beta
x-ai/grok-3-beta
xAI: Grok 3 Beta
1,744,240,068
Grok 3 is the latest model from xAI. It's their flagship model that excels at enterprise use cases like data extraction, coding, and text summarization. Possesses deep domain knowledge in...
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Grok" }
{ "audio": null, "completion": "0.000015", "image": null, "input_cache_read": "0.00000075", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000003", "web_search": "0.005" }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "logprobs", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "temperature", "tool_choice", "tools", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-02-28
null
{ "details": "/api/v1/models/x-ai/grok-3-beta/endpoints" }
meta-llama/llama-4-maverick
meta-llama/llama-4-maverick-17b-128e-instruct
meta-llama/Llama-4-Maverick-17B-128E-Instruct
Meta: Llama 4 Maverick
1,743,881,822
Llama 4 Maverick 17B Instruct (128E) is a high-capacity multimodal language model from Meta, built on a mixture-of-experts (MoE) architecture with 128 experts and 17 billion active parameters per forward...
1,048,576
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Llama4" }
{ "audio": null, "completion": "0.0000006", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000015", "web_search": null }
{ "context_length": 1048576, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/meta-llama/llama-4-maverick-17b-128e-instruct/endpoints" }
meta-llama/llama-4-scout
meta-llama/llama-4-scout-17b-16e-instruct
meta-llama/Llama-4-Scout-17B-16E-Instruct
Meta: Llama 4 Scout
1,743,881,519
Llama 4 Scout 17B Instruct (16E) is a mixture-of-experts (MoE) language model developed by Meta, activating 17 billion parameters out of a total of 109B. It supports native multimodal input...
327,680
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Llama4" }
{ "audio": null, "completion": "0.0000003", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000008", "web_search": null }
{ "context_length": 327680, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/meta-llama/llama-4-scout-17b-16e-instruct/endpoints" }
qwen/qwen2.5-vl-32b-instruct
qwen/qwen2.5-vl-32b-instruct
Qwen/Qwen2.5-VL-32B-Instruct
Qwen: Qwen2.5 VL 32B Instruct
1,742,839,838
Qwen2.5-VL-32B is a multimodal vision-language model fine-tuned through reinforcement learning for enhanced mathematical reasoning, structured outputs, and visual problem-solving capabilities. It excels at visual analysis tasks, including object recognition, textual...
128,000
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.0000006", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000002", "web_search": null }
{ "context_length": 128000, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/qwen/qwen2.5-vl-32b-instruct/endpoints" }
deepseek/deepseek-chat-v3-0324
deepseek/deepseek-chat-v3-0324
deepseek-ai/DeepSeek-V3-0324
DeepSeek: DeepSeek V3 0324
1,742,824,755
DeepSeek V3, a 685B-parameter, mixture-of-experts model, is the latest iteration of the flagship chat model family from the DeepSeek team. It succeeds the [DeepSeek V3](/deepseek/deepseek-chat-v3) model and performs really well...
163,840
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "DeepSeek" }
{ "audio": null, "completion": "0.00000077", "image": null, "input_cache_read": "0.000000135", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000002", "web_search": null }
{ "context_length": 163840, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "min_p", "presence_penalty", "reasoning", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-07-31
null
{ "details": "/api/v1/models/deepseek/deepseek-chat-v3-0324/endpoints" }
openai/o1-pro
openai/o1-pro
OpenAI: o1-pro
1,742,423,211
The o1 series of models are trained with reinforcement learning to think before they answer and perform complex reasoning. The o1-pro model uses more compute to think harder and provide...
200,000
{ "input_modalities": [ "text", "image", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.0006", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00015", "web_search": null }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 100000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "structured_outputs" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/openai/o1-pro/endpoints" }
mistralai/mistral-small-3.1-24b-instruct
mistralai/mistral-small-3.1-24b-instruct-2503
mistralai/Mistral-Small-3.1-24B-Instruct-2503
Mistral: Mistral Small 3.1 24B
1,742,238,937
Mistral Small 3.1 24B Instruct is an upgraded variant of Mistral Small 3 (2501), featuring 24 billion parameters with advanced multimodal capabilities. It provides state-of-the-art performance in text-based reasoning and...
128,000
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Mistral" }
{ "audio": null, "completion": "0.00000056", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000035", "web_search": null }
{ "context_length": 128000, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "repetition_penalty", "seed", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": 0.3, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/mistralai/mistral-small-3.1-24b-instruct-2503/endpoints" }
allenai/olmo-2-0325-32b-instruct
allenai/olmo-2-0325-32b-instruct
allenai/OLMo-2-0325-32B-Instruct
AllenAI: Olmo 2 32B Instruct
1,741,988,556
OLMo-2 32B Instruct is a supervised instruction-finetuned variant of the OLMo-2 32B March 2025 base model. It excels in complex reasoning and instruction-following tasks across diverse benchmarks such as GSM8K,...
128,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000002", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000005", "web_search": null }
{ "context_length": 128000, "is_moderated": false, "max_completion_tokens": null }
null
[]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-10-31
null
{ "details": "/api/v1/models/allenai/olmo-2-0325-32b-instruct/endpoints" }
google/gemma-3-4b-it:free
google/gemma-3-4b-it
google/gemma-3-4b-it
Google: Gemma 3 4B (free)
1,741,905,510
Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...
32,768
{ "input_modalities": [ "text", "image" ], "instruct_type": "gemma", "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": null, "completion": "0", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "max_tokens", "response_format", "seed", "stop", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/google/gemma-3-4b-it/endpoints" }
google/gemma-3-4b-it
google/gemma-3-4b-it
google/gemma-3-4b-it
Google: Gemma 3 4B
1,741,905,510
Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": "gemma", "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": null, "completion": "0.00000008", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000004", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/google/gemma-3-4b-it/endpoints" }
google/gemma-3-12b-it:free
google/gemma-3-12b-it
google/gemma-3-12b-it
Google: Gemma 3 12B (free)
1,741,902,625
Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...
32,768
{ "input_modalities": [ "text", "image" ], "instruct_type": "gemma", "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": null, "completion": "0", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "max_tokens", "seed", "stop", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/google/gemma-3-12b-it/endpoints" }
google/gemma-3-12b-it
google/gemma-3-12b-it
google/gemma-3-12b-it
Google: Gemma 3 12B
1,741,902,625
Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": "gemma", "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": null, "completion": "0.00000013", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000004", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/google/gemma-3-12b-it/endpoints" }
cohere/command-a
cohere/command-a-03-2025
CohereForAI/c4ai-command-a-03-2025
Cohere: Command A
1,741,894,342
Command A is an open-weights 111B parameter model with a 256k context window focused on delivering great performance across agentic, multilingual, and coding use cases. Compared to other leading proprietary...
256,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.00001", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000025", "web_search": null }
{ "context_length": 256000, "is_moderated": true, "max_completion_tokens": 8192 }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/cohere/command-a-03-2025/endpoints" }
openai/gpt-4o-mini-search-preview
openai/gpt-4o-mini-search-preview-2025-03-11
OpenAI: GPT-4o-mini Search Preview
1,741,818,122
GPT-4o mini Search Preview is a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.
128,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.0000006", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000015", "web_search": "0.0275" }
{ "context_length": 128000, "is_moderated": true, "max_completion_tokens": 16384 }
null
[ "max_tokens", "response_format", "structured_outputs", "web_search_options" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/openai/gpt-4o-mini-search-preview-2025-03-11/endpoints" }
openai/gpt-4o-search-preview
openai/gpt-4o-search-preview-2025-03-11
OpenAI: GPT-4o Search Preview
1,741,817,949
GPT-4o Search Previewis a specialized model for web search in Chat Completions. It is trained to understand and execute web search queries.
128,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.00001", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000025", "web_search": "0.035" }
{ "context_length": 128000, "is_moderated": true, "max_completion_tokens": 16384 }
null
[ "max_tokens", "response_format", "structured_outputs", "web_search_options" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/openai/gpt-4o-search-preview-2025-03-11/endpoints" }
rekaai/reka-flash-3
rekaai/reka-flash-3
RekaAI/reka-flash-3
Reka Flash 3
1,741,812,813
Reka Flash 3 is a general-purpose, instruction-tuned large language model with 21 billion parameters, developed by Reka. It excels at general chat, coding tasks, instruction-following, and function calling. Featuring a...
65,536
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000002", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000001", "web_search": null }
{ "context_length": 65536, "is_moderated": false, "max_completion_tokens": 65536 }
null
[ "frequency_penalty", "include_reasoning", "max_tokens", "presence_penalty", "reasoning", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-01-31
null
{ "details": "/api/v1/models/rekaai/reka-flash-3/endpoints" }
google/gemma-3-27b-it:free
google/gemma-3-27b-it
google/gemma-3-27b-it
Google: Gemma 3 27B (free)
1,741,756,359
Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": "gemma", "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": null, "completion": "0", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "max_tokens", "response_format", "seed", "stop", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/google/gemma-3-27b-it/endpoints" }
google/gemma-3-27b-it
google/gemma-3-27b-it
google/gemma-3-27b-it
Google: Gemma 3 27B
1,741,756,359
Gemma 3 introduces multimodality, supporting vision-language input and text outputs. It handles context windows up to 128k tokens, understands over 140 languages, and offers improved math, reasoning, and chat capabilities,...
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": "gemma", "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": null, "completion": "0.00000016", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000008", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/google/gemma-3-27b-it/endpoints" }
thedrummer/skyfall-36b-v2
thedrummer/skyfall-36b-v2
TheDrummer/Skyfall-36B-v2
TheDrummer: Skyfall 36B V2
1,741,636,566
Skyfall 36B v2 is an enhanced iteration of Mistral Small 2501, specifically fine-tuned for improved creativity, nuanced writing, role-playing, and coherent storytelling.
32,768
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000008", "image": null, "input_cache_read": "0.00000025", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000055", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "presence_penalty", "repetition_penalty", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/thedrummer/skyfall-36b-v2/endpoints" }
perplexity/sonar-reasoning-pro
perplexity/sonar-reasoning-pro
Perplexity: Sonar Reasoning Pro
1,741,313,308
Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro) Sonar Reasoning Pro is a premier reasoning model powered by DeepSeek R1 with Chain of Thought (CoT). Designed for...
128,000
{ "input_modalities": [ "text", "image" ], "instruct_type": "deepseek-r1", "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.000008", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000002", "web_search": "0.005" }
{ "context_length": 128000, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "include_reasoning", "max_tokens", "presence_penalty", "reasoning", "temperature", "top_k", "top_p", "web_search_options" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
null
null
{ "details": "/api/v1/models/perplexity/sonar-reasoning-pro/endpoints" }
perplexity/sonar-pro
perplexity/sonar-pro
Perplexity: Sonar Pro
1,741,312,423
Note: Sonar Pro pricing includes Perplexity search pricing. See [details here](https://docs.perplexity.ai/guides/pricing#detailed-pricing-breakdown-for-sonar-reasoning-pro-and-sonar-pro) For enterprises seeking more advanced capabilities, the Sonar Pro API can handle in-depth, multi-step queries with added extensibilit...
200,000
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.000015", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000003", "web_search": "0.005" }
{ "context_length": 200000, "is_moderated": false, "max_completion_tokens": 8000 }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "temperature", "top_k", "top_p", "web_search_options" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
null
null
{ "details": "/api/v1/models/perplexity/sonar-pro/endpoints" }
perplexity/sonar-deep-research
perplexity/sonar-deep-research
Perplexity: Sonar Deep Research
1,741,311,246
Sonar Deep Research is a research-focused model designed for multi-step retrieval, synthesis, and reasoning across complex topics. It autonomously searches, reads, and evaluates sources, refining its approach as it gathers...
128,000
{ "input_modalities": [ "text" ], "instruct_type": "deepseek-r1", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.000008", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": "0.000003", "prompt": "0.000002", "web_search": "0.005" }
{ "context_length": 128000, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "include_reasoning", "max_tokens", "presence_penalty", "reasoning", "temperature", "top_k", "top_p", "web_search_options" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
null
null
{ "details": "/api/v1/models/perplexity/sonar-deep-research/endpoints" }
qwen/qwq-32b
qwen/qwq-32b
Qwen/QwQ-32B
Qwen: QwQ 32B
1,741,208,814
QwQ is the reasoning model of the Qwen series. Compared with conventional instruction-tuned models, QwQ, which is capable of thinking and reasoning, can achieve significantly enhanced performance in downstream tasks,...
131,072
{ "input_modalities": [ "text" ], "instruct_type": "qwq", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.00000058", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000015", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 131072 }
null
[ "frequency_penalty", "include_reasoning", "reasoning", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/qwen/qwq-32b/endpoints" }
google/gemini-2.0-flash-lite-001
google/gemini-2.0-flash-lite-001
Google: Gemini 2.0 Flash Lite
1,740,506,212
Gemini 2.0 Flash Lite offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5),...
1,048,576
{ "input_modalities": [ "text", "image", "file", "audio", "video" ], "instruct_type": null, "modality": "text+image+file+audio+video->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": "0.000000075", "completion": "0.0000003", "image": "0.000000075", "input_cache_read": null, "input_cache_write": null, "internal_reasoning": "0.0000003", "prompt": "0.000000075", "web_search": null }
{ "context_length": 1048576, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "max_tokens", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
2026-06-01
{ "details": "/api/v1/models/google/gemini-2.0-flash-lite-001/endpoints" }
anthropic/claude-3.7-sonnet
anthropic/claude-3-7-sonnet-20250219
Anthropic: Claude 3.7 Sonnet
1,740,422,110
Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and...
200,000
{ "input_modalities": [ "text", "image", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "Claude" }
{ "audio": null, "completion": "0.000015", "image": null, "input_cache_read": "0.0000003", "input_cache_write": "0.00000375", "internal_reasoning": null, "prompt": "0.000003", "web_search": "0.01" }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 128000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-10-31
2026-05-05
{ "details": "/api/v1/models/anthropic/claude-3-7-sonnet-20250219/endpoints" }
anthropic/claude-3.7-sonnet:thinking
anthropic/claude-3-7-sonnet-20250219
Anthropic: Claude 3.7 Sonnet (thinking)
1,740,422,110
Claude 3.7 Sonnet is an advanced large language model with improved reasoning, coding, and problem-solving capabilities. It introduces a hybrid reasoning approach, allowing users to choose between rapid responses and...
200,000
{ "input_modalities": [ "text", "image", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "Claude" }
{ "audio": null, "completion": "0.000015", "image": null, "input_cache_read": "0.0000003", "input_cache_write": "0.00000375", "internal_reasoning": null, "prompt": "0.000003", "web_search": "0.01" }
{ "context_length": 200000, "is_moderated": false, "max_completion_tokens": 64000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "stop", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-10-31
2026-05-05
{ "details": "/api/v1/models/anthropic/claude-3-7-sonnet-20250219/endpoints" }
mistralai/mistral-saba
mistralai/mistral-saba-2502
Mistral: Saba
1,739,803,239
Mistral Saba is a 24B-parameter language model specifically designed for the Middle East and South Asia, delivering accurate and contextually relevant responses while maintaining efficient performance. Trained on curated regional...
32,768
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Mistral" }
{ "audio": null, "completion": "0.0000006", "image": null, "input_cache_read": "0.00000002", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000002", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": 0.3, "top_k": null, "top_p": null }
2024-09-30
null
{ "details": "/api/v1/models/mistralai/mistral-saba-2502/endpoints" }
meta-llama/llama-guard-3-8b
meta-llama/llama-guard-3-8b
meta-llama/Llama-Guard-3-8B
Llama Guard 3 8B
1,739,401,318
Llama Guard 3 is a Llama-3.1-8B pretrained model, fine-tuned for content safety classification. Similar to previous versions, it can be used to classify content in both LLM inputs (prompt classification)...
131,072
{ "input_modalities": [ "text" ], "instruct_type": "none", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0.00000003", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000048", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "repetition_penalty", "seed", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/meta-llama/llama-guard-3-8b/endpoints" }
openai/o3-mini-high
openai/o3-mini-high-2025-01-31
OpenAI: o3 Mini High
1,739,372,611
OpenAI o3-mini-high is the same model as [o3-mini](/openai/o3-mini) with reasoning_effort set to high. o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and...
200,000
{ "input_modalities": [ "text", "file" ], "instruct_type": null, "modality": "text+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.0000044", "image": null, "input_cache_read": "0.00000055", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000011", "web_search": null }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 100000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "structured_outputs", "tool_choice", "tools" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/openai/o3-mini-high-2025-01-31/endpoints" }
google/gemini-2.0-flash-001
google/gemini-2.0-flash-001
Google: Gemini 2.0 Flash
1,738,769,413
Gemini Flash 2.0 offers a significantly faster time to first token (TTFT) compared to [Gemini Flash 1.5](/google/gemini-flash-1.5), while maintaining quality on par with larger models like [Gemini Pro 1.5](/google/gemini-pro-1.5). It...
1,048,576
{ "input_modalities": [ "text", "image", "file", "audio", "video" ], "instruct_type": null, "modality": "text+image+file+audio+video->text", "output_modalities": [ "text" ], "tokenizer": "Gemini" }
{ "audio": "0.0000007", "completion": "0.0000004", "image": "0.0000001", "input_cache_read": "0.000000025", "input_cache_write": "0.00000008333333333333334", "internal_reasoning": "0.0000004", "prompt": "0.0000001", "web_search": null }
{ "context_length": 1048576, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "max_tokens", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
2026-06-01
{ "details": "/api/v1/models/google/gemini-2.0-flash-001/endpoints" }
qwen/qwen-vl-plus
qwen/qwen-vl-plus
Qwen: Qwen VL Plus
1,738,731,255
Qwen's Enhanced Large Visual Language Model. Significantly upgraded for detailed recognition capabilities and text recognition abilities, supporting ultra-high pixel resolutions up to millions of pixels and extreme aspect ratios for...
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.0000004095", "image": null, "input_cache_read": "0.0000000273", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000001365", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "max_tokens", "presence_penalty", "response_format", "seed", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen-vl-plus/endpoints" }
aion-labs/aion-1.0
aion-labs/aion-1.0
AionLabs: Aion-1.0
1,738,697,557
Aion-1.0 is a multi-model system designed for high performance across various tasks, including reasoning and coding. It is built on DeepSeek-R1, augmented with additional models and techniques such as Tree...
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.000008", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000004", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "include_reasoning", "max_tokens", "reasoning", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
null
null
{ "details": "/api/v1/models/aion-labs/aion-1.0/endpoints" }
aion-labs/aion-1.0-mini
aion-labs/aion-1.0-mini
FuseAI/FuseO1-DeepSeekR1-QwQ-SkyT1-32B-Preview
AionLabs: Aion-1.0-Mini
1,738,697,107
Aion-1.0-Mini 32B parameter model is a distilled version of the DeepSeek-R1 model, designed for strong performance in reasoning domains such as mathematics, coding, and logic. It is a modified variant...
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000014", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000007", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "include_reasoning", "max_tokens", "reasoning", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
null
null
{ "details": "/api/v1/models/aion-labs/aion-1.0-mini/endpoints" }
aion-labs/aion-rp-llama-3.1-8b
aion-labs/aion-rp-llama-3.1-8b
AionLabs: Aion-RP 1.0 (8B)
1,738,696,718
Aion-RP-Llama-3.1-8B ranks the highest in the character evaluation portion of the RPBench-Auto benchmark, a roleplaying-specific variant of Arena-Hard-Auto, where LLMs evaluate each other’s responses. It is a fine-tuned base model...
32,768
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000016", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000008", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "max_tokens", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/aion-labs/aion-rp-llama-3.1-8b/endpoints" }
qwen/qwen-vl-max
qwen/qwen-vl-max-2025-01-25
Qwen: Qwen VL Max
1,738,434,304
Qwen VL Max is a visual understanding model with 7500 tokens context length. It excels in delivering optimal performance for a broader spectrum of complex tasks.
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.00000208", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000052", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "max_tokens", "presence_penalty", "response_format", "seed", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen-vl-max-2025-01-25/endpoints" }
qwen/qwen-turbo
qwen/qwen-turbo-2024-11-01
Qwen: Qwen-Turbo
1,738,410,974
Qwen-Turbo, based on Qwen2.5, is a 1M context model that provides fast speed and low cost, suitable for simple tasks.
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.00000013", "image": null, "input_cache_read": "0.0000000065", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000000325", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "max_tokens", "presence_penalty", "response_format", "seed", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen-turbo-2024-11-01/endpoints" }
qwen/qwen2.5-vl-72b-instruct
qwen/qwen2.5-vl-72b-instruct
Qwen/Qwen2.5-VL-72B-Instruct
Qwen: Qwen2.5 VL 72B Instruct
1,738,410,311
Qwen2.5-VL is proficient in recognizing common objects such as flowers, birds, fish, and insects. It is also highly capable of analyzing texts, charts, icons, graphics, and layouts within images.
32,768
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.0000008", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000008", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/qwen/qwen2.5-vl-72b-instruct/endpoints" }
qwen/qwen-plus
qwen/qwen-plus-2025-01-25
Qwen: Qwen-Plus
1,738,409,840
Qwen-Plus, based on the Qwen2.5 foundation model, is a 131K context model with a balanced performance, speed, and cost combination.
1,000,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.00000078", "image": null, "input_cache_read": "0.000000052", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000026", "web_search": null }
{ "context_length": 1000000, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "max_tokens", "presence_penalty", "response_format", "seed", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen-plus-2025-01-25/endpoints" }
qwen/qwen-max
qwen/qwen-max-2025-01-25
Qwen: Qwen-Max
1,738,402,289
Qwen-Max, based on Qwen2.5, provides the best inference performance among [Qwen models](/qwen), especially for complex multi-step tasks. It's a large-scale MoE model that has been pretrained on over 20 trillion...
32,768
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.00000416", "image": null, "input_cache_read": "0.000000208", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000104", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 8192 }
null
[ "max_tokens", "presence_penalty", "response_format", "seed", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2025-03-31
null
{ "details": "/api/v1/models/qwen/qwen-max-2025-01-25/endpoints" }
openai/o3-mini
openai/o3-mini-2025-01-31
OpenAI: o3 Mini
1,738,351,721
OpenAI o3-mini is a cost-efficient language model optimized for STEM reasoning tasks, particularly excelling in science, mathematics, and coding. This model supports the `reasoning_effort` parameter, which can be set to...
200,000
{ "input_modalities": [ "text", "file" ], "instruct_type": null, "modality": "text+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.0000044", "image": null, "input_cache_read": "0.00000055", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000011", "web_search": null }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 100000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "structured_outputs", "tool_choice", "tools" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/openai/o3-mini-2025-01-31/endpoints" }
mistralai/mistral-small-24b-instruct-2501
mistralai/mistral-small-24b-instruct-2501
mistralai/Mistral-Small-24B-Instruct-2501
Mistral: Mistral Small 3
1,738,255,409
Mistral Small 3 is a 24B-parameter language model optimized for low-latency performance across common AI tasks. Released under the Apache 2.0 license, it features both pre-trained and instruction-tuned versions designed...
32,768
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Mistral" }
{ "audio": null, "completion": "0.00000008", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000005", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": 0.3, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/mistralai/mistral-small-24b-instruct-2501/endpoints" }
deepseek/deepseek-r1-distill-qwen-32b
deepseek/deepseek-r1-distill-qwen-32b
deepseek-ai/DeepSeek-R1-Distill-Qwen-32B
DeepSeek: R1 Distill Qwen 32B
1,738,194,830
DeepSeek R1 Distill Qwen 32B is a distilled large language model based on [Qwen 2.5 32B](https://huggingface.co/Qwen/Qwen2.5-32B), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). It outperforms OpenAI's o1-mini across various benchmarks, achieving new...
32,768
{ "input_modalities": [ "text" ], "instruct_type": "deepseek-r1", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.00000029", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000029", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "frequency_penalty", "include_reasoning", "logprobs", "max_tokens", "presence_penalty", "reasoning", "response_format", "stop", "structured_outputs", "temperature", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-07-31
null
{ "details": "/api/v1/models/deepseek/deepseek-r1-distill-qwen-32b/endpoints" }
perplexity/sonar
perplexity/sonar
Perplexity: Sonar
1,738,013,808
Sonar is lightweight, affordable, fast, and simple to use — now featuring citations and the ability to customize sources. It is designed for companies seeking to integrate lightweight question-and-answer features...
127,072
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.000001", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000001", "web_search": "0.005" }
{ "context_length": 127072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "temperature", "top_k", "top_p", "web_search_options" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
null
null
{ "details": "/api/v1/models/perplexity/sonar/endpoints" }
deepseek/deepseek-r1-distill-llama-70b
deepseek/deepseek-r1-distill-llama-70b
deepseek-ai/DeepSeek-R1-Distill-Llama-70B
DeepSeek: R1 Distill Llama 70B
1,737,663,169
DeepSeek R1 Distill Llama 70B is a distilled large language model based on [Llama-3.3-70B-Instruct](/meta-llama/llama-3.3-70b-instruct), using outputs from [DeepSeek R1](/deepseek/deepseek-r1). The model combines advanced distillation techniques to achieve high performance across...
131,072
{ "input_modalities": [ "text" ], "instruct_type": "deepseek-r1", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0.0000008", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000007", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "include_reasoning", "max_tokens", "min_p", "presence_penalty", "reasoning", "repetition_penalty", "response_format", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-07-31
null
{ "details": "/api/v1/models/deepseek/deepseek-r1-distill-llama-70b/endpoints" }
deepseek/deepseek-r1
deepseek/deepseek-r1
deepseek-ai/DeepSeek-R1
DeepSeek: R1
1,737,381,095
DeepSeek R1 is here: Performance on par with [OpenAI o1](/openai/o1), but open-sourced and with fully open reasoning tokens. It's 671B parameters in size, with 37B active in an inference pass....
64,000
{ "input_modalities": [ "text" ], "instruct_type": "deepseek-r1", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "DeepSeek" }
{ "audio": null, "completion": "0.0000025", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000007", "web_search": null }
{ "context_length": 64000, "is_moderated": false, "max_completion_tokens": 16000 }
null
[ "frequency_penalty", "include_reasoning", "max_completion_tokens", "max_tokens", "presence_penalty", "reasoning", "repetition_penalty", "seed", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-07-31
null
{ "details": "/api/v1/models/deepseek/deepseek-r1/endpoints" }
minimax/minimax-01
minimax/minimax-01
MiniMaxAI/MiniMax-Text-01
MiniMax: MiniMax-01
1,736,915,462
MiniMax-01 is a combines MiniMax-Text-01 for text generation and MiniMax-VL-01 for image understanding. It has 456 billion parameters, with 45.9 billion parameters activated per inference, and can handle a context...
1,000,192
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.0000011", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000002", "web_search": null }
{ "context_length": 1000192, "is_moderated": false, "max_completion_tokens": 1000192 }
null
[ "max_tokens", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-03-31
null
{ "details": "/api/v1/models/minimax/minimax-01/endpoints" }
microsoft/phi-4
microsoft/phi-4
microsoft/phi-4
Microsoft: Phi 4
1,736,489,872
[Microsoft Research](/microsoft) Phi-4 is designed to perform well in complex reasoning tasks and can operate efficiently in situations with limited memory or where quick responses are needed. At 14 billion...
16,384
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.00000014", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000000065", "web_search": null }
{ "context_length": 16384, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "logprobs", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "top_k", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/microsoft/phi-4/endpoints" }
sao10k/l3.1-70b-hanami-x1
sao10k/l3.1-70b-hanami-x1
Sao10K/L3.1-70B-Hanami-x1
Sao10K: Llama 3.1 70B Hanami x1
1,736,302,854
This is [Sao10K](/sao10k)'s experiment over [Euryale v2.2](/sao10k/l3.1-euryale-70b).
16,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0.000003", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000003", "web_search": null }
{ "context_length": 16000, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/sao10k/l3.1-70b-hanami-x1/endpoints" }
deepseek/deepseek-chat
deepseek/deepseek-chat-v3
deepseek-ai/DeepSeek-V3
DeepSeek: DeepSeek V3
1,735,241,320
DeepSeek-V3 is the latest model from the DeepSeek team, building upon the instruction following and coding abilities of the previous versions. Pre-trained on nearly 15 trillion tokens, the reported evaluations...
163,840
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "DeepSeek" }
{ "audio": null, "completion": "0.00000089", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000032", "web_search": null }
{ "context_length": 163840, "is_moderated": false, "max_completion_tokens": 163840 }
null
[ "frequency_penalty", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-07-31
null
{ "details": "/api/v1/models/deepseek/deepseek-chat-v3/endpoints" }
sao10k/l3.3-euryale-70b
sao10k/l3.3-euryale-70b-v2.3
Sao10K/L3.3-70B-Euryale-v2.3
Sao10K: Llama 3.3 Euryale 70B
1,734,535,928
Euryale L3.3 70B is a model focused on creative roleplay from [Sao10k](https://ko-fi.com/sao10k). It is the successor of [Euryale L3 70B v2.2](/models/sao10k/l3-euryale-70b).
131,072
{ "input_modalities": [ "text" ], "instruct_type": "llama3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0.00000075", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000065", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "logprobs", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "top_k", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/sao10k/l3.3-euryale-70b-v2.3/endpoints" }
openai/o1
openai/o1-2024-12-17
OpenAI: o1
1,734,459,999
The latest and strongest model family from OpenAI, o1 is designed to spend more time thinking before responding. The o1 model series is trained with large-scale reinforcement learning to reason...
200,000
{ "input_modalities": [ "text", "image", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.00006", "image": null, "input_cache_read": "0.0000075", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000015", "web_search": null }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 100000 }
null
[ "include_reasoning", "max_tokens", "reasoning", "response_format", "seed", "structured_outputs", "tool_choice", "tools" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/openai/o1-2024-12-17/endpoints" }
cohere/command-r7b-12-2024
cohere/command-r7b-12-2024
Cohere: Command R7B (12-2024)
1,734,158,152
Command R7B (12-2024) is a small, fast update of the Command R+ model, delivered in December 2024. It excels at RAG, tool use, agents, and similar tasks requiring complex reasoning...
128,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Cohere" }
{ "audio": null, "completion": "0.00000015", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000000375", "web_search": null }
{ "context_length": 128000, "is_moderated": true, "max_completion_tokens": 4000 }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-08-31
null
{ "details": "/api/v1/models/cohere/command-r7b-12-2024/endpoints" }
meta-llama/llama-3.3-70b-instruct:free
meta-llama/llama-3.3-70b-instruct
meta-llama/Llama-3.3-70B-Instruct
Meta: Llama 3.3 70B Instruct (free)
1,733,506,137
The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model...
65,536
{ "input_modalities": [ "text" ], "instruct_type": "llama3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0", "web_search": null }
{ "context_length": 65536, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/meta-llama/llama-3.3-70b-instruct/endpoints" }
meta-llama/llama-3.3-70b-instruct
meta-llama/llama-3.3-70b-instruct
meta-llama/Llama-3.3-70B-Instruct
Meta: Llama 3.3 70B Instruct
1,733,506,137
The Meta Llama 3.3 multilingual large language model (LLM) is a pretrained and instruction tuned generative model in 70B (text in/text out). The Llama 3.3 instruction tuned text only model...
131,072
{ "input_modalities": [ "text" ], "instruct_type": "llama3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0.00000032", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000001", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/meta-llama/llama-3.3-70b-instruct/endpoints" }
amazon/nova-lite-v1
amazon/nova-lite-v1
Amazon: Nova Lite 1.0
1,733,437,363
Amazon Nova Lite 1.0 is a very low-cost multimodal model from Amazon that focused on fast processing of image, video, and text inputs to generate text output. Amazon Nova Lite...
300,000
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Nova" }
{ "audio": null, "completion": "0.00000024", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000006", "web_search": null }
{ "context_length": 300000, "is_moderated": true, "max_completion_tokens": 5120 }
null
[ "max_tokens", "stop", "temperature", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-10-31
null
{ "details": "/api/v1/models/amazon/nova-lite-v1/endpoints" }
amazon/nova-micro-v1
amazon/nova-micro-v1
Amazon: Nova Micro 1.0
1,733,437,237
Amazon Nova Micro 1.0 is a text-only model that delivers the lowest latency responses in the Amazon Nova family of models at a very low cost. With a context length...
128,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Nova" }
{ "audio": null, "completion": "0.00000014", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000000035", "web_search": null }
{ "context_length": 128000, "is_moderated": true, "max_completion_tokens": 5120 }
null
[ "max_tokens", "stop", "temperature", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-10-31
null
{ "details": "/api/v1/models/amazon/nova-micro-v1/endpoints" }
amazon/nova-pro-v1
amazon/nova-pro-v1
Amazon: Nova Pro 1.0
1,733,436,303
Amazon Nova Pro 1.0 is a capable multimodal model from Amazon focused on providing a combination of accuracy, speed, and cost for a wide range of tasks. As of December...
300,000
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Nova" }
{ "audio": null, "completion": "0.0000032", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000008", "web_search": null }
{ "context_length": 300000, "is_moderated": true, "max_completion_tokens": 5120 }
null
[ "max_tokens", "stop", "temperature", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-10-31
null
{ "details": "/api/v1/models/amazon/nova-pro-v1/endpoints" }
openai/gpt-4o-2024-11-20
openai/gpt-4o-2024-11-20
OpenAI: GPT-4o (2024-11-20)
1,732,127,594
The 2024-11-20 version of GPT-4o offers a leveled-up creative writing ability with more natural, engaging, and tailored writing to improve relevance & readability. It’s also better at working with uploaded...
128,000
{ "input_modalities": [ "text", "image", "file" ], "instruct_type": null, "modality": "text+image+file->text", "output_modalities": [ "text" ], "tokenizer": "GPT" }
{ "audio": null, "completion": "0.00001", "image": null, "input_cache_read": "0.00000125", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000025", "web_search": null }
{ "context_length": 128000, "is_moderated": true, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_logprobs", "top_p", "web_search_options" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-10-31
null
{ "details": "/api/v1/models/openai/gpt-4o-2024-11-20/endpoints" }
mistralai/mistral-large-2411
mistralai/mistral-large-2411
Mistral Large 2411
1,731,978,685
Mistral Large 2 2411 is an update of [Mistral Large 2](/mistralai/mistral-large) released together with [Pixtral Large 2411](/mistralai/pixtral-large-2411) It provides a significant upgrade on the previous [Mistral Large 24.07](/mistralai/mistral-large-2407), with notable...
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Mistral" }
{ "audio": null, "completion": "0.000006", "image": null, "input_cache_read": "0.0000002", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000002", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": 0.3, "top_k": null, "top_p": null }
2024-07-31
null
{ "details": "/api/v1/models/mistralai/mistral-large-2411/endpoints" }
mistralai/mistral-large-2407
mistralai/mistral-large-2407
Mistral Large 2407
1,731,978,415
This is Mistral AI's flagship model, Mistral Large 2 (version mistral-large-2407). It's a proprietary weights-available model and excels at reasoning, code, JSON, chat, and more. Read the launch announcement [here](https://mistral.ai/news/mistral-large-2407/)....
131,072
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Mistral" }
{ "audio": null, "completion": "0.000006", "image": null, "input_cache_read": "0.0000002", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000002", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": 0.3, "top_k": null, "top_p": null }
2024-03-31
null
{ "details": "/api/v1/models/mistralai/mistral-large-2407/endpoints" }
mistralai/pixtral-large-2411
mistralai/pixtral-large-2411
Mistral: Pixtral Large 2411
1,731,977,388
Pixtral Large is a 124B parameter, open-weight, multimodal model built on top of [Mistral Large 2](/mistralai/mistral-large-2411). The model is able to understand documents, charts and natural images. The model is...
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Mistral" }
{ "audio": null, "completion": "0.000006", "image": null, "input_cache_read": "0.0000002", "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000002", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": 0.3, "top_k": null, "top_p": null }
2024-07-31
null
{ "details": "/api/v1/models/mistralai/pixtral-large-2411/endpoints" }
qwen/qwen-2.5-coder-32b-instruct
qwen/qwen-2.5-coder-32b-instruct
Qwen/Qwen2.5-Coder-32B-Instruct
Qwen2.5 Coder 32B Instruct
1,731,368,400
Qwen2.5-Coder is the latest series of Code-Specific Qwen large language models (formerly known as CodeQwen). Qwen2.5-Coder brings the following improvements upon CodeQwen1.5: - Significantly improvements in **code generation**, **code reasoning**...
32,768
{ "input_modalities": [ "text" ], "instruct_type": "chatml", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.000001", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000066", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "repetition_penalty", "seed", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/qwen/qwen-2.5-coder-32b-instruct/endpoints" }
thedrummer/unslopnemo-12b
thedrummer/unslopnemo-12b
TheDrummer/UnslopNemo-12B-v4.1
TheDrummer: UnslopNemo 12B
1,731,103,448
UnslopNemo v4.1 is the latest addition from the creator of Rocinante, designed for adventure writing and role-play scenarios.
32,768
{ "input_modalities": [ "text" ], "instruct_type": "mistral", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Mistral" }
{ "audio": null, "completion": "0.0000004", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000004", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "frequency_penalty", "logprobs", "max_tokens", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-04-30
null
{ "details": "/api/v1/models/thedrummer/unslopnemo-12b/endpoints" }
anthropic/claude-3.5-haiku
anthropic/claude-3-5-haiku
null
Anthropic: Claude 3.5 Haiku
1,730,678,400
Claude 3.5 Haiku features offers enhanced capabilities in speed, coding accuracy, and tool use. Engineered to excel in real-time applications, it delivers quick response times that are essential for dynamic...
200,000
{ "input_modalities": [ "text", "image" ], "instruct_type": null, "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Claude" }
{ "audio": null, "completion": "0.000004", "image": null, "input_cache_read": "0.00000008", "input_cache_write": "0.000001", "internal_reasoning": null, "prompt": "0.0000008", "web_search": "0.01" }
{ "context_length": 200000, "is_moderated": true, "max_completion_tokens": 8192 }
null
[ "max_tokens", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-07-31
null
{ "details": "/api/v1/models/anthropic/claude-3-5-haiku/endpoints" }
anthracite-org/magnum-v4-72b
anthracite-org/magnum-v4-72b
anthracite-org/magnum-v4-72b
Magnum v4 72B
1,729,555,200
This is a series of models designed to replicate the prose quality of the Claude 3 models, specifically Sonnet(https://openrouter.ai/anthropic/claude-3.5-sonnet) and Opus(https://openrouter.ai/anthropic/claude-3-opus). The model is fine-tuned on top of [Qwen2.5 72B](https://openrouter.ai/qwen/qwen-2.5-72b-instruct).
16,384
{ "input_modalities": [ "text" ], "instruct_type": "chatml", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.000005", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000003", "web_search": null }
{ "context_length": 16384, "is_moderated": false, "max_completion_tokens": 2048 }
null
[ "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "top_a", "top_k", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/anthracite-org/magnum-v4-72b/endpoints" }
qwen/qwen-2.5-7b-instruct
qwen/qwen-2.5-7b-instruct
Qwen/Qwen2.5-7B-Instruct
Qwen: Qwen2.5 7B Instruct
1,729,036,800
Qwen2.5 7B is the latest series of Qwen large language models. Qwen2.5 brings the following improvements upon Qwen2: - Significantly more knowledge and has greatly improved capabilities in coding and...
32,768
{ "input_modalities": [ "text" ], "instruct_type": "chatml", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.0000001", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000004", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "frequency_penalty", "logit_bias", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-06-30
null
{ "details": "/api/v1/models/qwen/qwen-2.5-7b-instruct/endpoints" }
nvidia/llama-3.1-nemotron-70b-instruct
nvidia/llama-3.1-nemotron-70b-instruct
nvidia/Llama-3.1-Nemotron-70B-Instruct-HF
NVIDIA: Llama 3.1 Nemotron 70B Instruct
1,728,950,400
NVIDIA's Llama 3.1 Nemotron 70B is a language model designed for generating precise and useful responses. Leveraging [Llama 3.1 70B](/models/meta-llama/llama-3.1-70b-instruct) architecture and Reinforcement Learning from Human Feedback (RLHF), it excels...
131,072
{ "input_modalities": [ "text" ], "instruct_type": "llama3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0.0000012", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000012", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "tool_choice", "tools", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/nvidia/llama-3.1-nemotron-70b-instruct/endpoints" }
inflection/inflection-3-pi
inflection/inflection-3-pi
null
Inflection: Inflection 3 Pi
1,728,604,800
Inflection 3 Pi powers Inflection's [Pi](https://pi.ai) chatbot, including backstory, emotional intelligence, productivity, and safety. It has access to recent news, and excels in scenarios like customer support and roleplay. Pi...
8,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.00001", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000025", "web_search": null }
{ "context_length": 8000, "is_moderated": false, "max_completion_tokens": 1024 }
null
[ "max_tokens", "stop", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-10-31
null
{ "details": "/api/v1/models/inflection/inflection-3-pi/endpoints" }
inflection/inflection-3-productivity
inflection/inflection-3-productivity
null
Inflection: Inflection 3 Productivity
1,728,604,800
Inflection 3 Productivity is optimized for following instructions. It is better for tasks requiring JSON output or precise adherence to provided guidelines. It has access to recent news. For emotional...
8,000
{ "input_modalities": [ "text" ], "instruct_type": null, "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Other" }
{ "audio": null, "completion": "0.00001", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.0000025", "web_search": null }
{ "context_length": 8000, "is_moderated": false, "max_completion_tokens": 1024 }
null
[ "max_tokens", "stop", "temperature", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-10-31
null
{ "details": "/api/v1/models/inflection/inflection-3-productivity/endpoints" }
thedrummer/rocinante-12b
thedrummer/rocinante-12b
TheDrummer/Rocinante-12B-v1.1
TheDrummer: Rocinante 12B
1,727,654,400
Rocinante 12B is designed for engaging storytelling and rich prose. Early testers have reported: - Expanded vocabulary with unique and expressive word choices - Enhanced creativity for vivid narratives -...
32,768
{ "input_modalities": [ "text" ], "instruct_type": "chatml", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Qwen" }
{ "audio": null, "completion": "0.00000043", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.00000017", "web_search": null }
{ "context_length": 32768, "is_moderated": false, "max_completion_tokens": 32768 }
null
[ "frequency_penalty", "logit_bias", "logprobs", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "structured_outputs", "temperature", "tool_choice", "tools", "top_k", "top_logprobs", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2024-04-30
null
{ "details": "/api/v1/models/thedrummer/rocinante-12b/endpoints" }
meta-llama/llama-3.2-1b-instruct
meta-llama/llama-3.2-1b-instruct
meta-llama/Llama-3.2-1B-Instruct
Meta: Llama 3.2 1B Instruct
1,727,222,400
Llama 3.2 1B is a 1-billion-parameter language model focused on efficiently performing natural language tasks, such as summarization, dialogue, and multilingual text analysis. Its smaller size allows it to operate...
60,000
{ "input_modalities": [ "text" ], "instruct_type": "llama3", "modality": "text->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0.0000002", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000000027", "web_search": null }
{ "context_length": 60000, "is_moderated": false, "max_completion_tokens": null }
null
[ "frequency_penalty", "max_tokens", "presence_penalty", "repetition_penalty", "seed", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/meta-llama/llama-3.2-1b-instruct/endpoints" }
meta-llama/llama-3.2-11b-vision-instruct
meta-llama/llama-3.2-11b-vision-instruct
meta-llama/Llama-3.2-11B-Vision-Instruct
Meta: Llama 3.2 11B Vision Instruct
1,727,222,400
Llama 3.2 11B Vision is a multimodal model with 11 billion parameters, designed to handle tasks combining visual and textual data. It excels in tasks such as image captioning and...
131,072
{ "input_modalities": [ "text", "image" ], "instruct_type": "llama3", "modality": "text+image->text", "output_modalities": [ "text" ], "tokenizer": "Llama3" }
{ "audio": null, "completion": "0.000000245", "image": null, "input_cache_read": null, "input_cache_write": null, "internal_reasoning": null, "prompt": "0.000000245", "web_search": null }
{ "context_length": 131072, "is_moderated": false, "max_completion_tokens": 16384 }
null
[ "frequency_penalty", "max_tokens", "min_p", "presence_penalty", "repetition_penalty", "response_format", "seed", "stop", "temperature", "top_k", "top_p" ]
{ "frequency_penalty": null, "presence_penalty": null, "repetition_penalty": null, "temperature": null, "top_k": null, "top_p": null }
2023-12-31
null
{ "details": "/api/v1/models/meta-llama/llama-3.2-11b-vision-instruct/endpoints" }