Spaces:
Running on CPU Upgrade
Running on CPU Upgrade
Switch from Bedrock to Anthropic endpoint as default. Include support for gpt-5.5 (#118)
Browse files* Default ml-intern to Anthropic
Co-authored-by: OpenAI Codex <codex@openai.com>
* Add direct OpenAI GPT-5 model support
Co-authored-by: OpenAI Codex <codex@openai.com>
* Raise probe budget for GPT-5 models
Co-authored-by: OpenAI Codex <codex@openai.com>
* Fix deps
* Fix stale xhigh provider messaging
Co-authored-by: OpenAI Codex <codex@openai.com>
---------
Co-authored-by: OpenAI Codex <codex@openai.com>
- README.md +2 -0
- agent/core/effort_probe.py +8 -4
- agent/core/llm_params.py +2 -2
- agent/core/model_switcher.py +5 -2
- agent/main.py +3 -2
- configs/main_agent_config.json +1 -1
- pyproject.toml +2 -2
- tests/unit/test_llm_params.py +25 -0
- uv.lock +98 -72
README.md
CHANGED
|
@@ -27,6 +27,7 @@ Create a `.env` file in the project root (or export these in your shell):
|
|
| 27 |
|
| 28 |
```bash
|
| 29 |
ANTHROPIC_API_KEY=<your-anthropic-api-key> # if using anthropic models
|
|
|
|
| 30 |
HF_TOKEN=<your-hugging-face-token>
|
| 31 |
GITHUB_TOKEN=<github-personal-access-token>
|
| 32 |
```
|
|
@@ -50,6 +51,7 @@ ml-intern "fine-tune llama on my dataset"
|
|
| 50 |
|
| 51 |
```bash
|
| 52 |
ml-intern --model anthropic/claude-opus-4-6 "your prompt"
|
|
|
|
| 53 |
ml-intern --max-iterations 100 "your prompt"
|
| 54 |
ml-intern --no-stream "your prompt"
|
| 55 |
```
|
|
|
|
| 27 |
|
| 28 |
```bash
|
| 29 |
ANTHROPIC_API_KEY=<your-anthropic-api-key> # if using anthropic models
|
| 30 |
+
OPENAI_API_KEY=<your-openai-api-key> # if using openai models
|
| 31 |
HF_TOKEN=<your-hugging-face-token>
|
| 32 |
GITHUB_TOKEN=<github-personal-access-token>
|
| 33 |
```
|
|
|
|
| 51 |
|
| 52 |
```bash
|
| 53 |
ml-intern --model anthropic/claude-opus-4-6 "your prompt"
|
| 54 |
+
ml-intern --model openai/gpt-5.5 "your prompt"
|
| 55 |
ml-intern --max-iterations 100 "your prompt"
|
| 56 |
ml-intern --no-stream "your prompt"
|
| 57 |
```
|
agent/core/effort_probe.py
CHANGED
|
@@ -32,9 +32,10 @@ logger = logging.getLogger(__name__)
|
|
| 32 |
|
| 33 |
|
| 34 |
# Cascade: for each user-stated preference, the ordered list of levels to
|
| 35 |
-
# try. First success wins. ``max``
|
| 36 |
-
# that don't accept
|
| 37 |
-
#
|
|
|
|
| 38 |
_EFFORT_CASCADE: dict[str, list[str]] = {
|
| 39 |
"max": ["max", "xhigh", "high", "medium", "low"],
|
| 40 |
"xhigh": ["xhigh", "high", "medium", "low"],
|
|
@@ -45,7 +46,10 @@ _EFFORT_CASCADE: dict[str, list[str]] = {
|
|
| 45 |
}
|
| 46 |
|
| 47 |
_PROBE_TIMEOUT = 15.0
|
| 48 |
-
|
|
|
|
|
|
|
|
|
|
| 49 |
|
| 50 |
|
| 51 |
class ProbeInconclusive(Exception):
|
|
|
|
| 32 |
|
| 33 |
|
| 34 |
# Cascade: for each user-stated preference, the ordered list of levels to
|
| 35 |
+
# try. First success wins. ``max`` is Anthropic-only; ``xhigh`` is also
|
| 36 |
+
# supported on current OpenAI GPT-5 models. Providers that don't accept a
|
| 37 |
+
# requested level raise ``UnsupportedEffortError`` synchronously (no wasted
|
| 38 |
+
# network round-trip) and we advance to the next level.
|
| 39 |
_EFFORT_CASCADE: dict[str, list[str]] = {
|
| 40 |
"max": ["max", "xhigh", "high", "medium", "low"],
|
| 41 |
"xhigh": ["xhigh", "high", "medium", "low"],
|
|
|
|
| 46 |
}
|
| 47 |
|
| 48 |
_PROBE_TIMEOUT = 15.0
|
| 49 |
+
# Keep the probe cheap, but high enough that frontier reasoning models can
|
| 50 |
+
# finish a trivial reply instead of tripping a false "output limit reached"
|
| 51 |
+
# error during capability detection.
|
| 52 |
+
_PROBE_MAX_TOKENS = 64
|
| 53 |
|
| 54 |
|
| 55 |
class ProbeInconclusive(Exception):
|
agent/core/llm_params.py
CHANGED
|
@@ -66,13 +66,13 @@ _patch_litellm_effort_validation()
|
|
| 66 |
|
| 67 |
# Effort levels accepted on the wire.
|
| 68 |
# Anthropic (4.6+): low | medium | high | xhigh | max (output_config.effort)
|
| 69 |
-
# OpenAI direct: minimal | low | medium | high
|
| 70 |
# HF router: low | medium | high (extra_body.reasoning_effort)
|
| 71 |
#
|
| 72 |
# We validate *shape* here and let the probe cascade walk down on rejection;
|
| 73 |
# we deliberately do NOT maintain a per-model capability table.
|
| 74 |
_ANTHROPIC_EFFORTS = {"low", "medium", "high", "xhigh", "max"}
|
| 75 |
-
_OPENAI_EFFORTS = {"minimal", "low", "medium", "high"}
|
| 76 |
_HF_EFFORTS = {"low", "medium", "high"}
|
| 77 |
|
| 78 |
|
|
|
|
| 66 |
|
| 67 |
# Effort levels accepted on the wire.
|
| 68 |
# Anthropic (4.6+): low | medium | high | xhigh | max (output_config.effort)
|
| 69 |
+
# OpenAI direct: minimal | low | medium | high | xhigh (reasoning_effort top-level)
|
| 70 |
# HF router: low | medium | high (extra_body.reasoning_effort)
|
| 71 |
#
|
| 72 |
# We validate *shape* here and let the probe cascade walk down on rejection;
|
| 73 |
# we deliberately do NOT maintain a per-model capability table.
|
| 74 |
_ANTHROPIC_EFFORTS = {"low", "medium", "high", "xhigh", "max"}
|
| 75 |
+
_OPENAI_EFFORTS = {"minimal", "low", "medium", "high", "xhigh"}
|
| 76 |
_HF_EFFORTS = {"low", "medium", "high"}
|
| 77 |
|
| 78 |
|
agent/core/model_switcher.py
CHANGED
|
@@ -24,8 +24,11 @@ from agent.core.effort_probe import ProbeInconclusive, probe_effort
|
|
| 24 |
# ":cheapest" / ":preferred" / ":<provider>" to override the default
|
| 25 |
# routing policy (auto = fastest with failover).
|
| 26 |
SUGGESTED_MODELS = [
|
| 27 |
-
{"id": "
|
| 28 |
-
{"id": "
|
|
|
|
|
|
|
|
|
|
| 29 |
{"id": "MiniMaxAI/MiniMax-M2.7", "label": "MiniMax M2.7"},
|
| 30 |
{"id": "moonshotai/Kimi-K2.6", "label": "Kimi K2.6"},
|
| 31 |
{"id": "zai-org/GLM-5.1", "label": "GLM 5.1"},
|
|
|
|
| 24 |
# ":cheapest" / ":preferred" / ":<provider>" to override the default
|
| 25 |
# routing policy (auto = fastest with failover).
|
| 26 |
SUGGESTED_MODELS = [
|
| 27 |
+
{"id": "openai/gpt-5.5", "label": "GPT-5.5"},
|
| 28 |
+
{"id": "openai/gpt-5.4", "label": "GPT-5.4"},
|
| 29 |
+
{"id": "anthropic/claude-opus-4-7", "label": "Claude Opus 4.7"},
|
| 30 |
+
{"id": "anthropic/claude-opus-4-6", "label": "Claude Opus 4.6"},
|
| 31 |
+
{"id": "bedrock/us.anthropic.claude-opus-4-6-v1", "label": "Claude Opus 4.6 via Bedrock"},
|
| 32 |
{"id": "MiniMaxAI/MiniMax-M2.7", "label": "MiniMax M2.7"},
|
| 33 |
{"id": "moonshotai/Kimi-K2.6", "label": "Kimi K2.6"},
|
| 34 |
{"id": "zai-org/GLM-5.1", "label": "GLM 5.1"},
|
agent/main.py
CHANGED
|
@@ -771,8 +771,9 @@ async def _handle_slash_command(
|
|
| 771 |
console.print(f" [dim]{m}: {eff or 'off'}[/dim]")
|
| 772 |
console.print(
|
| 773 |
"[dim]Set with '/effort minimal|low|medium|high|xhigh|max|off'. "
|
| 774 |
-
"'max'
|
| 775 |
-
"to whatever the
|
|
|
|
| 776 |
)
|
| 777 |
return None
|
| 778 |
level = arg.lower()
|
|
|
|
| 771 |
console.print(f" [dim]{m}: {eff or 'off'}[/dim]")
|
| 772 |
console.print(
|
| 773 |
"[dim]Set with '/effort minimal|low|medium|high|xhigh|max|off'. "
|
| 774 |
+
"'max' is Anthropic-only; 'xhigh' is also supported by current "
|
| 775 |
+
"OpenAI GPT-5 models. The cascade falls back to whatever the "
|
| 776 |
+
"model actually accepts.[/dim]"
|
| 777 |
)
|
| 778 |
return None
|
| 779 |
level = arg.lower()
|
configs/main_agent_config.json
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
{
|
| 2 |
-
"model_name": "
|
| 3 |
"save_sessions": true,
|
| 4 |
"session_dataset_repo": "smolagents/ml-intern-sessions",
|
| 5 |
"yolo_mode": false,
|
|
|
|
| 1 |
{
|
| 2 |
+
"model_name": "anthropic/claude-opus-4-6",
|
| 3 |
"save_sessions": true,
|
| 4 |
"session_dataset_repo": "smolagents/ml-intern-sessions",
|
| 5 |
"yolo_mode": false,
|
pyproject.toml
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
[project]
|
| 2 |
-
name = "
|
| 3 |
version = "0.1.0"
|
| 4 |
description = "Add your description here"
|
| 5 |
readme = "README.md"
|
|
@@ -46,7 +46,7 @@ dev = [
|
|
| 46 |
|
| 47 |
# All dependencies (eval + dev)
|
| 48 |
all = [
|
| 49 |
-
"
|
| 50 |
]
|
| 51 |
|
| 52 |
[project.scripts]
|
|
|
|
| 1 |
[project]
|
| 2 |
+
name = "ml-intern"
|
| 3 |
version = "0.1.0"
|
| 4 |
description = "Add your description here"
|
| 5 |
readme = "README.md"
|
|
|
|
| 46 |
|
| 47 |
# All dependencies (eval + dev)
|
| 48 |
all = [
|
| 49 |
+
"ml-intern[eval,dev]",
|
| 50 |
]
|
| 51 |
|
| 52 |
[project.scripts]
|
tests/unit/test_llm_params.py
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from agent.core.llm_params import UnsupportedEffortError, _resolve_llm_params
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def test_openai_xhigh_effort_is_forwarded():
|
| 5 |
+
params = _resolve_llm_params(
|
| 6 |
+
"openai/gpt-5.5",
|
| 7 |
+
reasoning_effort="xhigh",
|
| 8 |
+
strict=True,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
assert params["model"] == "openai/gpt-5.5"
|
| 12 |
+
assert params["reasoning_effort"] == "xhigh"
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def test_openai_max_effort_is_still_rejected():
|
| 16 |
+
try:
|
| 17 |
+
_resolve_llm_params(
|
| 18 |
+
"openai/gpt-5.4",
|
| 19 |
+
reasoning_effort="max",
|
| 20 |
+
strict=True,
|
| 21 |
+
)
|
| 22 |
+
except UnsupportedEffortError as exc:
|
| 23 |
+
assert "OpenAI doesn't accept effort='max'" in str(exc)
|
| 24 |
+
else:
|
| 25 |
+
raise AssertionError("Expected UnsupportedEffortError for max effort")
|
uv.lock
CHANGED
|
@@ -228,6 +228,18 @@ wheels = [
|
|
| 228 |
{ url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" },
|
| 229 |
]
|
| 230 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 231 |
[[package]]
|
| 232 |
name = "attrs"
|
| 233 |
version = "25.4.0"
|
|
@@ -992,78 +1004,6 @@ wheels = [
|
|
| 992 |
{ url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
|
| 993 |
]
|
| 994 |
|
| 995 |
-
[[package]]
|
| 996 |
-
name = "hf-agent"
|
| 997 |
-
version = "0.1.0"
|
| 998 |
-
source = { editable = "." }
|
| 999 |
-
dependencies = [
|
| 1000 |
-
{ name = "boto3" },
|
| 1001 |
-
{ name = "datasets" },
|
| 1002 |
-
{ name = "fastapi" },
|
| 1003 |
-
{ name = "fastmcp" },
|
| 1004 |
-
{ name = "httpx" },
|
| 1005 |
-
{ name = "huggingface-hub" },
|
| 1006 |
-
{ name = "litellm" },
|
| 1007 |
-
{ name = "nbconvert" },
|
| 1008 |
-
{ name = "nbformat" },
|
| 1009 |
-
{ name = "prompt-toolkit" },
|
| 1010 |
-
{ name = "pydantic" },
|
| 1011 |
-
{ name = "python-dotenv" },
|
| 1012 |
-
{ name = "requests" },
|
| 1013 |
-
{ name = "rich" },
|
| 1014 |
-
{ name = "thefuzz" },
|
| 1015 |
-
{ name = "uvicorn", extra = ["standard"] },
|
| 1016 |
-
{ name = "websockets" },
|
| 1017 |
-
{ name = "whoosh" },
|
| 1018 |
-
]
|
| 1019 |
-
|
| 1020 |
-
[package.optional-dependencies]
|
| 1021 |
-
all = [
|
| 1022 |
-
{ name = "datasets" },
|
| 1023 |
-
{ name = "inspect-ai" },
|
| 1024 |
-
{ name = "pandas" },
|
| 1025 |
-
{ name = "pytest" },
|
| 1026 |
-
{ name = "tenacity" },
|
| 1027 |
-
]
|
| 1028 |
-
dev = [
|
| 1029 |
-
{ name = "pytest" },
|
| 1030 |
-
]
|
| 1031 |
-
eval = [
|
| 1032 |
-
{ name = "datasets" },
|
| 1033 |
-
{ name = "inspect-ai" },
|
| 1034 |
-
{ name = "pandas" },
|
| 1035 |
-
{ name = "tenacity" },
|
| 1036 |
-
]
|
| 1037 |
-
|
| 1038 |
-
[package.metadata]
|
| 1039 |
-
requires-dist = [
|
| 1040 |
-
{ name = "boto3", specifier = ">=1.35.0" },
|
| 1041 |
-
{ name = "datasets", specifier = ">=4.4.1" },
|
| 1042 |
-
{ name = "datasets", marker = "extra == 'eval'", specifier = ">=4.3.0" },
|
| 1043 |
-
{ name = "fastapi", specifier = ">=0.115.0" },
|
| 1044 |
-
{ name = "fastmcp", specifier = ">=3.2.0" },
|
| 1045 |
-
{ name = "hf-agent", extras = ["eval", "dev"], marker = "extra == 'all'" },
|
| 1046 |
-
{ name = "httpx", specifier = ">=0.27.0" },
|
| 1047 |
-
{ name = "huggingface-hub", specifier = ">=1.0.1" },
|
| 1048 |
-
{ name = "inspect-ai", marker = "extra == 'eval'", specifier = ">=0.3.149" },
|
| 1049 |
-
{ name = "litellm", specifier = ">=1.83.0" },
|
| 1050 |
-
{ name = "nbconvert", specifier = ">=7.16.6" },
|
| 1051 |
-
{ name = "nbformat", specifier = ">=5.10.4" },
|
| 1052 |
-
{ name = "pandas", marker = "extra == 'eval'", specifier = ">=2.3.3" },
|
| 1053 |
-
{ name = "prompt-toolkit", specifier = ">=3.0.0" },
|
| 1054 |
-
{ name = "pydantic", specifier = ">=2.12.3" },
|
| 1055 |
-
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.2" },
|
| 1056 |
-
{ name = "python-dotenv", specifier = ">=1.2.1" },
|
| 1057 |
-
{ name = "requests", specifier = ">=2.33.0" },
|
| 1058 |
-
{ name = "rich", specifier = ">=13.0.0" },
|
| 1059 |
-
{ name = "tenacity", marker = "extra == 'eval'", specifier = ">=8.0.0" },
|
| 1060 |
-
{ name = "thefuzz", specifier = ">=0.22.1" },
|
| 1061 |
-
{ name = "uvicorn", extras = ["standard"], specifier = ">=0.32.0" },
|
| 1062 |
-
{ name = "websockets", specifier = ">=13.0" },
|
| 1063 |
-
{ name = "whoosh", specifier = ">=2.7.4" },
|
| 1064 |
-
]
|
| 1065 |
-
provides-extras = ["eval", "dev", "all"]
|
| 1066 |
-
|
| 1067 |
[[package]]
|
| 1068 |
name = "hf-xet"
|
| 1069 |
version = "1.2.0"
|
|
@@ -1827,6 +1767,80 @@ wheels = [
|
|
| 1827 |
{ url = "https://files.pythonhosted.org/packages/9b/f7/4a5e785ec9fbd65146a27b6b70b6cdc161a66f2024e4b04ac06a67f5578b/mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1", size = 53598, upload-time = "2025-12-23T11:36:33.211Z" },
|
| 1828 |
]
|
| 1829 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1830 |
[[package]]
|
| 1831 |
name = "mmh3"
|
| 1832 |
version = "5.2.0"
|
|
@@ -3619,6 +3633,18 @@ wheels = [
|
|
| 3619 |
{ url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" },
|
| 3620 |
]
|
| 3621 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3622 |
[[package]]
|
| 3623 |
name = "uc-micro-py"
|
| 3624 |
version = "1.0.3"
|
|
|
|
| 228 |
{ url = "https://files.pythonhosted.org/packages/15/b3/9b1a8074496371342ec1e796a96f99c82c945a339cd81a8e73de28b4cf9e/anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc", size = 109097, upload-time = "2025-09-23T09:19:10.601Z" },
|
| 229 |
]
|
| 230 |
|
| 231 |
+
[[package]]
|
| 232 |
+
name = "apscheduler"
|
| 233 |
+
version = "3.11.2"
|
| 234 |
+
source = { registry = "https://pypi.org/simple" }
|
| 235 |
+
dependencies = [
|
| 236 |
+
{ name = "tzlocal" },
|
| 237 |
+
]
|
| 238 |
+
sdist = { url = "https://files.pythonhosted.org/packages/07/12/3e4389e5920b4c1763390c6d371162f3784f86f85cd6d6c1bfe68eef14e2/apscheduler-3.11.2.tar.gz", hash = "sha256:2a9966b052ec805f020c8c4c3ae6e6a06e24b1bf19f2e11d91d8cca0473eef41", size = 108683, upload-time = "2025-12-22T00:39:34.884Z" }
|
| 239 |
+
wheels = [
|
| 240 |
+
{ url = "https://files.pythonhosted.org/packages/9f/64/2e54428beba8d9992aa478bb8f6de9e4ecaa5f8f513bcfd567ed7fb0262d/apscheduler-3.11.2-py3-none-any.whl", hash = "sha256:ce005177f741409db4e4dd40a7431b76feb856b9dd69d57e0da49d6715bfd26d", size = 64439, upload-time = "2025-12-22T00:39:33.303Z" },
|
| 241 |
+
]
|
| 242 |
+
|
| 243 |
[[package]]
|
| 244 |
name = "attrs"
|
| 245 |
version = "25.4.0"
|
|
|
|
| 1004 |
{ url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515, upload-time = "2025-04-24T03:35:24.344Z" },
|
| 1005 |
]
|
| 1006 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1007 |
[[package]]
|
| 1008 |
name = "hf-xet"
|
| 1009 |
version = "1.2.0"
|
|
|
|
| 1767 |
{ url = "https://files.pythonhosted.org/packages/9b/f7/4a5e785ec9fbd65146a27b6b70b6cdc161a66f2024e4b04ac06a67f5578b/mistune-3.2.0-py3-none-any.whl", hash = "sha256:febdc629a3c78616b94393c6580551e0e34cc289987ec6c35ed3f4be42d0eee1", size = 53598, upload-time = "2025-12-23T11:36:33.211Z" },
|
| 1768 |
]
|
| 1769 |
|
| 1770 |
+
[[package]]
|
| 1771 |
+
name = "ml-intern"
|
| 1772 |
+
version = "0.1.0"
|
| 1773 |
+
source = { editable = "." }
|
| 1774 |
+
dependencies = [
|
| 1775 |
+
{ name = "apscheduler" },
|
| 1776 |
+
{ name = "boto3" },
|
| 1777 |
+
{ name = "datasets" },
|
| 1778 |
+
{ name = "fastapi" },
|
| 1779 |
+
{ name = "fastmcp" },
|
| 1780 |
+
{ name = "httpx" },
|
| 1781 |
+
{ name = "huggingface-hub" },
|
| 1782 |
+
{ name = "litellm" },
|
| 1783 |
+
{ name = "nbconvert" },
|
| 1784 |
+
{ name = "nbformat" },
|
| 1785 |
+
{ name = "prompt-toolkit" },
|
| 1786 |
+
{ name = "pydantic" },
|
| 1787 |
+
{ name = "python-dotenv" },
|
| 1788 |
+
{ name = "requests" },
|
| 1789 |
+
{ name = "rich" },
|
| 1790 |
+
{ name = "thefuzz" },
|
| 1791 |
+
{ name = "uvicorn", extra = ["standard"] },
|
| 1792 |
+
{ name = "websockets" },
|
| 1793 |
+
{ name = "whoosh" },
|
| 1794 |
+
]
|
| 1795 |
+
|
| 1796 |
+
[package.optional-dependencies]
|
| 1797 |
+
all = [
|
| 1798 |
+
{ name = "datasets" },
|
| 1799 |
+
{ name = "inspect-ai" },
|
| 1800 |
+
{ name = "pandas" },
|
| 1801 |
+
{ name = "pytest" },
|
| 1802 |
+
{ name = "tenacity" },
|
| 1803 |
+
]
|
| 1804 |
+
dev = [
|
| 1805 |
+
{ name = "pytest" },
|
| 1806 |
+
]
|
| 1807 |
+
eval = [
|
| 1808 |
+
{ name = "datasets" },
|
| 1809 |
+
{ name = "inspect-ai" },
|
| 1810 |
+
{ name = "pandas" },
|
| 1811 |
+
{ name = "tenacity" },
|
| 1812 |
+
]
|
| 1813 |
+
|
| 1814 |
+
[package.metadata]
|
| 1815 |
+
requires-dist = [
|
| 1816 |
+
{ name = "apscheduler", specifier = ">=3.10,<4" },
|
| 1817 |
+
{ name = "boto3", specifier = ">=1.35.0" },
|
| 1818 |
+
{ name = "datasets", specifier = ">=4.4.1" },
|
| 1819 |
+
{ name = "datasets", marker = "extra == 'eval'", specifier = ">=4.3.0" },
|
| 1820 |
+
{ name = "fastapi", specifier = ">=0.115.0" },
|
| 1821 |
+
{ name = "fastmcp", specifier = ">=3.2.0" },
|
| 1822 |
+
{ name = "httpx", specifier = ">=0.27.0" },
|
| 1823 |
+
{ name = "huggingface-hub", specifier = ">=1.0.1" },
|
| 1824 |
+
{ name = "inspect-ai", marker = "extra == 'eval'", specifier = ">=0.3.149" },
|
| 1825 |
+
{ name = "litellm", specifier = ">=1.83.0" },
|
| 1826 |
+
{ name = "ml-intern", extras = ["eval", "dev"], marker = "extra == 'all'" },
|
| 1827 |
+
{ name = "nbconvert", specifier = ">=7.16.6" },
|
| 1828 |
+
{ name = "nbformat", specifier = ">=5.10.4" },
|
| 1829 |
+
{ name = "pandas", marker = "extra == 'eval'", specifier = ">=2.3.3" },
|
| 1830 |
+
{ name = "prompt-toolkit", specifier = ">=3.0.0" },
|
| 1831 |
+
{ name = "pydantic", specifier = ">=2.12.3" },
|
| 1832 |
+
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=9.0.2" },
|
| 1833 |
+
{ name = "python-dotenv", specifier = ">=1.2.1" },
|
| 1834 |
+
{ name = "requests", specifier = ">=2.33.0" },
|
| 1835 |
+
{ name = "rich", specifier = ">=13.0.0" },
|
| 1836 |
+
{ name = "tenacity", marker = "extra == 'eval'", specifier = ">=8.0.0" },
|
| 1837 |
+
{ name = "thefuzz", specifier = ">=0.22.1" },
|
| 1838 |
+
{ name = "uvicorn", extras = ["standard"], specifier = ">=0.32.0" },
|
| 1839 |
+
{ name = "websockets", specifier = ">=13.0" },
|
| 1840 |
+
{ name = "whoosh", specifier = ">=2.7.4" },
|
| 1841 |
+
]
|
| 1842 |
+
provides-extras = ["eval", "dev", "all"]
|
| 1843 |
+
|
| 1844 |
[[package]]
|
| 1845 |
name = "mmh3"
|
| 1846 |
version = "5.2.0"
|
|
|
|
| 3633 |
{ url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839, upload-time = "2025-03-23T13:54:41.845Z" },
|
| 3634 |
]
|
| 3635 |
|
| 3636 |
+
[[package]]
|
| 3637 |
+
name = "tzlocal"
|
| 3638 |
+
version = "5.3.1"
|
| 3639 |
+
source = { registry = "https://pypi.org/simple" }
|
| 3640 |
+
dependencies = [
|
| 3641 |
+
{ name = "tzdata", marker = "sys_platform == 'win32'" },
|
| 3642 |
+
]
|
| 3643 |
+
sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761, upload-time = "2025-03-05T21:17:41.549Z" }
|
| 3644 |
+
wheels = [
|
| 3645 |
+
{ url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" },
|
| 3646 |
+
]
|
| 3647 |
+
|
| 3648 |
[[package]]
|
| 3649 |
name = "uc-micro-py"
|
| 3650 |
version = "1.0.3"
|