| { | |
| "note": "NemoClaw runs OpenClaw inside an NVIDIA OpenShell sandbox. OpenShell is the governance runtime — it provides sandboxed execution, policy-enforced egress, and a privacy router for LLM calls. Install: curl -LsSf https://raw.githubusercontent.com/NVIDIA/OpenShell/main/install.sh | sh", | |
| "sandbox": { | |
| "from": "openclaw", | |
| "remote": null, | |
| "gpu": false | |
| }, | |
| "policy": { | |
| "deny_by_default": true, | |
| "hot_reload": true, | |
| "layers": ["filesystem", "network", "process", "inference"] | |
| }, | |
| "inference": { | |
| "provider": "custom", | |
| "model": "deepseek-chat", | |
| "base_url": "https://researchengineering-agi.hf.space/v1", | |
| "api_key": "${LLM_SPACE_API_KEY}" | |
| }, | |
| "providers": { | |
| "openclaw": { | |
| "type": "openclaw", | |
| "token": "${OPENCLAW_GATEWAY_TOKEN}" | |
| } | |
| }, | |
| "gateway": { | |
| "port": 18793, | |
| "auto_create": true | |
| } | |
| } | |