| { |
| "model_name": "anthropic/claude-opus-4-6", |
| "save_sessions": true, |
| "session_dataset_repo": "smolagents/ml-intern-sessions", |
| "share_traces": true, |
| "personal_trace_repo_template": "{hf_user}/ml-intern-sessions", |
| "yolo_mode": false, |
| "confirm_cpu_jobs": true, |
| "auto_file_upload": true, |
| "messaging": { |
| "enabled": false, |
| "auto_event_types": ["approval_required", "error", "turn_complete"], |
| "destinations": {} |
| }, |
| "mcpServers": { |
| "hf-mcp-server": { |
| "transport": "http", |
| "url": "https://huggingface.co/mcp?login" |
| } |
| }, |
| "_local_provider_examples": { |
| "_comment": "Set model_name to any of the prefixes below to use a local / self-hosted LLM. All local providers speak an OpenAI-compatible API. Reasoning effort is automatically disabled for local models.", |
| "llamacpp/llama-3-8b": "llama.cpp server (default http://localhost:8080/v1). Override with LLAMACPP_API_BASE env var.", |
| "lmstudio/llama-3-8b": "LM Studio (default http://localhost:1234/v1). Override with LMSTUDIO_API_BASE env var.", |
| "mlx/llama-3-8b": "MLX OpenAI-compatible wrapper (default http://localhost:8000/v1). Override with MLX_API_BASE env var.", |
| "nim/llama-3-8b": "NVIDIA NIM (default http://localhost:8000/v1). Override with NIM_API_BASE env var.", |
| "ollama/llama3.1": "Ollama (default http://localhost:11434/v1). Override with OLLAMA_API_BASE env var.", |
| "vllm/llama-3-8b": "vLLM (default http://localhost:8000/v1). Override with VLLM_API_BASE env var.", |
| "tgi/llama-3-8b": "Text Generation Inference (default http://localhost:8080/v1). Override with TGI_API_BASE env var.", |
| "local/llama-3-8b": "Custom PyTorch / any OpenAI-compatible server (default http://localhost:8000/v1). Override with LOCAL_API_BASE env var.", |
| "_auth_note": "All local providers use api_key='no-key' by default. Set LOCAL_API_KEY env var if your server requires authentication." |
| } |
| } |
|
|