Spaces:
Running
Running
Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- Dockerfile +28 -33
- README.md +336 -35
- __init__.py +31 -0
- client.py +144 -0
- envs/sumo_rl_env/README.md +341 -0
- envs/sumo_rl_env/__init__.py +31 -0
- envs/sumo_rl_env/client.py +144 -0
- envs/sumo_rl_env/models.py +107 -0
- envs/sumo_rl_env/nets/single-intersection/single-intersection.edg.xml +6 -0
- envs/sumo_rl_env/nets/single-intersection/single-intersection.net.xml +86 -0
- envs/sumo_rl_env/nets/single-intersection/single-intersection.nod.xml +7 -0
- envs/sumo_rl_env/nets/single-intersection/single-intersection.rou.xml +6 -0
- envs/sumo_rl_env/nets/single-intersection/single-intersection.sumocfg +10 -0
- envs/sumo_rl_env/server/Dockerfile +65 -0
- envs/sumo_rl_env/server/__init__.py +7 -0
- envs/sumo_rl_env/server/app.py +53 -0
- envs/sumo_rl_env/server/sumo_environment.py +234 -0
- envs/sumo_rl_env/test_sumo_rl.sh +220 -0
- models.py +107 -0
- nets/single-intersection/single-intersection.edg.xml +6 -0
- nets/single-intersection/single-intersection.net.xml +86 -0
- nets/single-intersection/single-intersection.nod.xml +7 -0
- nets/single-intersection/single-intersection.rou.xml +6 -0
- nets/single-intersection/single-intersection.sumocfg +10 -0
- pyproject.toml +147 -0
- server/Dockerfile +65 -0
- server/__init__.py +7 -0
- server/app.py +53 -0
- server/sumo_environment.py +234 -0
- src/__init__.py +7 -0
- src/core/README.md +212 -0
- src/core/__init__.py +70 -8
- src/core/client_types.py +23 -0
- src/core/containers/__init__.py +1 -1
- src/core/containers/images/Dockerfile +29 -11
- src/core/containers/images/README.md +8 -8
- src/core/containers/runtime/__init__.py +12 -2
- src/core/containers/runtime/daytona_provider.py +572 -0
- src/core/containers/runtime/providers.py +389 -9
- src/core/containers/runtime/uv_provider.py +224 -0
- src/core/containers/test_local_docker_provider.py +8 -6
- src/core/env_client.py +484 -0
- src/core/env_server/__init__.py +118 -3
- src/core/env_server/base_transforms.py +1 -1
- src/core/env_server/exceptions.py +105 -0
- src/core/env_server/gradio_theme.py +128 -0
- src/core/env_server/gradio_ui.py +240 -0
- src/core/env_server/http_server.py +1263 -105
- src/core/env_server/interfaces.py +189 -10
- src/core/env_server/mcp_environment.py +624 -0
Dockerfile
CHANGED
|
@@ -1,35 +1,19 @@
|
|
| 1 |
-
#
|
| 2 |
-
#
|
| 3 |
-
#
|
| 4 |
-
# This source code is licensed under the BSD-style license found in the
|
| 5 |
-
# LICENSE file in the root directory of this source tree.
|
| 6 |
-
|
| 7 |
-
# Multi-stage build: First stage builds the base image
|
| 8 |
-
FROM python:3.11-slim as base-builder
|
| 9 |
-
|
| 10 |
-
# Install system dependencies
|
| 11 |
-
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 12 |
-
curl \
|
| 13 |
-
&& rm -rf /var/lib/apt/lists/*
|
| 14 |
-
|
| 15 |
-
# Install Python dependencies that all environments need
|
| 16 |
-
RUN pip install --no-cache-dir \
|
| 17 |
-
fastapi>=0.104.0 \
|
| 18 |
-
"uvicorn[standard]>=0.24.0" \
|
| 19 |
-
requests>=2.25.0 \
|
| 20 |
-
wsproto>=1.0.0
|
| 21 |
-
|
| 22 |
-
# Set working directory
|
| 23 |
-
WORKDIR /app
|
| 24 |
-
|
| 25 |
-
# Default environment variables
|
| 26 |
-
ENV PYTHONPATH=/app/src
|
| 27 |
-
ENV PYTHONUNBUFFERED=1
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
# Install SUMO system dependencies
|
|
|
|
| 33 |
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 34 |
sumo \
|
| 35 |
sumo-tools \
|
|
@@ -39,6 +23,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|
| 39 |
ENV SUMO_HOME=/usr/share/sumo
|
| 40 |
|
| 41 |
# Install SUMO-RL and Python dependencies
|
|
|
|
| 42 |
RUN pip install --no-cache-dir \
|
| 43 |
gymnasium>=0.28 \
|
| 44 |
pettingzoo>=1.24.3 \
|
|
@@ -48,6 +33,16 @@ RUN pip install --no-cache-dir \
|
|
| 48 |
traci>=1.14.0 \
|
| 49 |
sumo-rl>=1.4.5
|
| 50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 51 |
# SUMO environment variables (can be overridden at runtime)
|
| 52 |
ENV SUMO_NET_FILE=/app/nets/single-intersection/single-intersection.net.xml
|
| 53 |
ENV SUMO_ROUTE_FILE=/app/nets/single-intersection/single-intersection.rou.xml
|
|
@@ -59,10 +54,8 @@ ENV SUMO_MAX_GREEN=50
|
|
| 59 |
ENV SUMO_REWARD_FN=diff-waiting-time
|
| 60 |
ENV SUMO_SEED=42
|
| 61 |
|
| 62 |
-
|
| 63 |
-
|
| 64 |
-
COPY src/core/ /app/src/core/
|
| 65 |
-
COPY src/envs/sumo_rl_env/ /app/src/envs/sumo_rl_env/
|
| 66 |
|
| 67 |
# Health check
|
| 68 |
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
|
@@ -70,4 +63,6 @@ HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
|
| 70 |
|
| 71 |
# Run the FastAPI server
|
| 72 |
CMD ["uvicorn", "envs.sumo_rl_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
|
|
|
|
|
|
| 73 |
ENV ENABLE_WEB_INTERFACE=true
|
|
|
|
| 1 |
+
# Dockerfile for SUMO-RL Environment
|
| 2 |
+
# This image provides traffic signal control via SUMO (Simulation of Urban MObility)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
+
# Configurable base image - defaults to local build, can be overridden for CI/CD
|
| 5 |
+
# Base image provides: fastapi, uvicorn, requests, curl, PYTHONPATH=/app/src
|
| 6 |
+
#
|
| 7 |
+
# Local build: docker build -t envtorch-base:latest -f src/core/containers/images/Dockerfile .
|
| 8 |
+
# docker build -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 9 |
+
#
|
| 10 |
+
# CI/CD build: docker build --build-arg BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest \
|
| 11 |
+
# -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 12 |
+
ARG BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest
|
| 13 |
+
FROM ghcr.io/meta-pytorch/openenv-base:latest
|
| 14 |
|
| 15 |
# Install SUMO system dependencies
|
| 16 |
+
# SUMO is available in Debian repositories
|
| 17 |
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 18 |
sumo \
|
| 19 |
sumo-tools \
|
|
|
|
| 23 |
ENV SUMO_HOME=/usr/share/sumo
|
| 24 |
|
| 25 |
# Install SUMO-RL and Python dependencies
|
| 26 |
+
# sumo-rl includes: gymnasium, pettingzoo, numpy, pandas, sumolib, traci
|
| 27 |
RUN pip install --no-cache-dir \
|
| 28 |
gymnasium>=0.28 \
|
| 29 |
pettingzoo>=1.24.3 \
|
|
|
|
| 33 |
traci>=1.14.0 \
|
| 34 |
sumo-rl>=1.4.5
|
| 35 |
|
| 36 |
+
# Copy OpenEnv core (base image already set WORKDIR=/app)
|
| 37 |
+
COPY src/core/ /app/src/core/
|
| 38 |
+
|
| 39 |
+
# Copy SUMO-RL environment code (includes nets/)
|
| 40 |
+
COPY envs/sumo_rl_env/ /app/envs/sumo_rl_env/
|
| 41 |
+
|
| 42 |
+
# Copy example network files to expected location
|
| 43 |
+
# Default: single-intersection (simple 4-way intersection)
|
| 44 |
+
COPY envs/sumo_rl_env/nets/single-intersection/ /app/nets/single-intersection/
|
| 45 |
+
|
| 46 |
# SUMO environment variables (can be overridden at runtime)
|
| 47 |
ENV SUMO_NET_FILE=/app/nets/single-intersection/single-intersection.net.xml
|
| 48 |
ENV SUMO_ROUTE_FILE=/app/nets/single-intersection/single-intersection.rou.xml
|
|
|
|
| 54 |
ENV SUMO_REWARD_FN=diff-waiting-time
|
| 55 |
ENV SUMO_SEED=42
|
| 56 |
|
| 57 |
+
# Expose port
|
| 58 |
+
EXPOSE 8000
|
|
|
|
|
|
|
| 59 |
|
| 60 |
# Health check
|
| 61 |
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
|
|
|
| 63 |
|
| 64 |
# Run the FastAPI server
|
| 65 |
CMD ["uvicorn", "envs.sumo_rl_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
| 66 |
+
ENV PYTHONPATH=/app/src/core:/app/src:${PYTHONPATH}
|
| 67 |
+
|
| 68 |
ENV ENABLE_WEB_INTERFACE=true
|
README.md
CHANGED
|
@@ -1,56 +1,357 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji: 🐳
|
| 4 |
-
colorFrom: blue
|
| 5 |
-
colorTo: green
|
| 6 |
sdk: docker
|
| 7 |
-
pinned: false
|
| 8 |
app_port: 8000
|
| 9 |
base_path: /web
|
|
|
|
|
|
|
|
|
|
| 10 |
---
|
| 11 |
|
| 12 |
-
#
|
| 13 |
|
| 14 |
-
|
| 15 |
|
| 16 |
-
|
| 17 |
|
| 18 |
-
|
| 19 |
-
Built with FastAPI and OpenEnv framework.
|
| 20 |
|
| 21 |
-
|
| 22 |
|
| 23 |
-
|
| 24 |
-
- **HumanAgent Interface**: Interact with the environment using a web form
|
| 25 |
-
- **State Observer**: Real-time view of environment state and action history
|
| 26 |
-
- **Live Updates**: WebSocket-based real-time updates
|
| 27 |
|
| 28 |
-
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
|
| 32 |
-
|
| 33 |
|
| 34 |
-
###
|
| 35 |
-
|
| 36 |
-
```
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
```
|
| 43 |
|
| 44 |
-
###
|
| 45 |
-
-
|
| 46 |
-
-
|
| 47 |
-
-
|
| 48 |
-
- **Configurable Rewards**: Waiting time, queue length, pressure metrics
|
| 49 |
|
| 50 |
-
##
|
| 51 |
|
| 52 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 53 |
|
| 54 |
-
##
|
| 55 |
|
| 56 |
-
|
|
|
|
| 1 |
---
|
| 2 |
+
title: sumo_rl_env Environment
|
|
|
|
|
|
|
|
|
|
| 3 |
sdk: docker
|
|
|
|
| 4 |
app_port: 8000
|
| 5 |
base_path: /web
|
| 6 |
+
tags:
|
| 7 |
+
- openenv
|
| 8 |
+
- openenv-main
|
| 9 |
---
|
| 10 |
|
| 11 |
+
# sumo_rl_env Environment
|
| 12 |
|
| 13 |
+
Space URL: `https://huggingface.co/spaces/openenv/sumo_rl_env`
|
| 14 |
|
| 15 |
+
OpenEnv pinned ref: `main`
|
| 16 |
|
| 17 |
+
# SUMO-RL Environment
|
|
|
|
| 18 |
|
| 19 |
+
Integration of traffic signal control with the OpenEnv framework via SUMO (Simulation of Urban MObility) and SUMO-RL.
|
| 20 |
|
| 21 |
+
## Overview
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
+
This environment enables reinforcement learning for **traffic signal control** using SUMO, a microscopic traffic simulation package. Train RL agents to optimize traffic light timing and minimize vehicle delays.
|
| 24 |
|
| 25 |
+
**Key Features**:
|
| 26 |
+
- **Realistic traffic simulation** via SUMO
|
| 27 |
+
- **Single-agent mode** for single intersection control
|
| 28 |
+
- **Configurable rewards** (waiting time, queue, pressure, speed)
|
| 29 |
+
- **Multiple networks** supported (custom .net.xml and .rou.xml files)
|
| 30 |
+
- **Docker-ready** with pre-bundled example network
|
| 31 |
|
| 32 |
+
## Quick Start
|
| 33 |
|
| 34 |
+
### Using Docker (Recommended)
|
| 35 |
+
|
| 36 |
+
```python
|
| 37 |
+
from envs.sumo_rl_env import SumoRLEnv, SumoAction
|
| 38 |
+
|
| 39 |
+
# Automatically starts container
|
| 40 |
+
env = SumoRLEnv.from_docker_image("sumo-rl-env:latest")
|
| 41 |
+
|
| 42 |
+
# Reset environment
|
| 43 |
+
result = env.reset()
|
| 44 |
+
print(f"Observation shape: {result.observation.observation_shape}")
|
| 45 |
+
print(f"Available actions: {result.observation.action_mask}")
|
| 46 |
+
|
| 47 |
+
# Take action (select next green phase)
|
| 48 |
+
result = env.step(SumoAction(phase_id=1))
|
| 49 |
+
print(f"Reward: {result.reward}, Done: {result.done}")
|
| 50 |
+
|
| 51 |
+
# Get state
|
| 52 |
+
state = env.state()
|
| 53 |
+
print(f"Simulation time: {state.sim_time}")
|
| 54 |
+
print(f"Total vehicles: {state.total_vehicles}")
|
| 55 |
+
print(f"Mean waiting time: {state.mean_waiting_time}")
|
| 56 |
+
|
| 57 |
+
# Cleanup
|
| 58 |
+
env.close()
|
| 59 |
+
```
|
| 60 |
+
|
| 61 |
+
### Building the Docker Image
|
| 62 |
+
|
| 63 |
+
```bash
|
| 64 |
+
cd OpenEnv
|
| 65 |
+
|
| 66 |
+
# Build base image first (if not already built)
|
| 67 |
+
docker build -t envtorch-base:latest -f src/openenv/core/containers/images/Dockerfile .
|
| 68 |
+
|
| 69 |
+
# Build SUMO-RL environment
|
| 70 |
+
docker build -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 71 |
+
```
|
| 72 |
+
|
| 73 |
+
### Running with Different Configurations
|
| 74 |
+
|
| 75 |
+
```bash
|
| 76 |
+
# Default: single-intersection
|
| 77 |
+
docker run -p 8000:8000 sumo-rl-env:latest
|
| 78 |
+
|
| 79 |
+
# Longer simulation
|
| 80 |
+
docker run -p 8000:8000 \
|
| 81 |
+
-e SUMO_NUM_SECONDS=50000 \
|
| 82 |
+
sumo-rl-env:latest
|
| 83 |
+
|
| 84 |
+
# Different reward function
|
| 85 |
+
docker run -p 8000:8000 \
|
| 86 |
+
-e SUMO_REWARD_FN=queue \
|
| 87 |
+
sumo-rl-env:latest
|
| 88 |
+
|
| 89 |
+
# Custom seed for reproducibility
|
| 90 |
+
docker run -p 8000:8000 \
|
| 91 |
+
-e SUMO_SEED=123 \
|
| 92 |
+
sumo-rl-env:latest
|
| 93 |
+
```
|
| 94 |
+
|
| 95 |
+
## Observation
|
| 96 |
+
|
| 97 |
+
The observation is a vector containing:
|
| 98 |
+
- **Phase one-hot**: Current active green phase (one-hot encoded)
|
| 99 |
+
- **Min green flag**: Binary indicator if minimum green time has passed
|
| 100 |
+
- **Lane densities**: Number of vehicles / lane capacity for each incoming lane
|
| 101 |
+
- **Lane queues**: Number of queued vehicles / lane capacity for each incoming lane
|
| 102 |
+
|
| 103 |
+
Observation size varies by network topology (depends on number of phases and lanes).
|
| 104 |
+
|
| 105 |
+
**Default (single-intersection)**:
|
| 106 |
+
- 4 green phases
|
| 107 |
+
- 8 incoming lanes
|
| 108 |
+
- Observation size: ~21 elements
|
| 109 |
+
|
| 110 |
+
## Action Space
|
| 111 |
+
|
| 112 |
+
The action space is discrete and represents selecting the next green phase to activate.
|
| 113 |
+
|
| 114 |
+
- **Action type**: Discrete
|
| 115 |
+
- **Action range**: `[0, num_green_phases - 1]`
|
| 116 |
+
- **Default (single-intersection)**: 4 actions (one per green phase)
|
| 117 |
+
|
| 118 |
+
When a phase change is requested, SUMO automatically inserts a yellow phase before switching.
|
| 119 |
+
|
| 120 |
+
## Rewards
|
| 121 |
+
|
| 122 |
+
Default reward function is **change in cumulative waiting time**:
|
| 123 |
+
```
|
| 124 |
+
reward = -(total_waiting_time_now - total_waiting_time_previous)
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
Positive rewards indicate waiting time decreased (good).
|
| 128 |
+
|
| 129 |
+
### Available Reward Functions
|
| 130 |
+
|
| 131 |
+
Set via `SUMO_REWARD_FN` environment variable:
|
| 132 |
+
|
| 133 |
+
- **`diff-waiting-time`** (default): Change in cumulative waiting time
|
| 134 |
+
- **`average-speed`**: Average speed of all vehicles
|
| 135 |
+
- **`queue`**: Negative total queue length
|
| 136 |
+
- **`pressure`**: Pressure metric (incoming - outgoing vehicles)
|
| 137 |
+
|
| 138 |
+
## Configuration
|
| 139 |
+
|
| 140 |
+
### Environment Variables
|
| 141 |
+
|
| 142 |
+
| Variable | Default | Description |
|
| 143 |
+
|----------|---------|-------------|
|
| 144 |
+
| `SUMO_NET_FILE` | `/app/nets/single-intersection.net.xml` | Network topology file |
|
| 145 |
+
| `SUMO_ROUTE_FILE` | `/app/nets/single-intersection.rou.xml` | Vehicle routes file |
|
| 146 |
+
| `SUMO_NUM_SECONDS` | `20000` | Simulation duration (seconds) |
|
| 147 |
+
| `SUMO_DELTA_TIME` | `5` | Seconds between agent actions |
|
| 148 |
+
| `SUMO_YELLOW_TIME` | `2` | Yellow phase duration (seconds) |
|
| 149 |
+
| `SUMO_MIN_GREEN` | `5` | Minimum green time (seconds) |
|
| 150 |
+
| `SUMO_MAX_GREEN` | `50` | Maximum green time (seconds) |
|
| 151 |
+
| `SUMO_REWARD_FN` | `diff-waiting-time` | Reward function name |
|
| 152 |
+
| `SUMO_SEED` | `42` | Random seed (use for reproducibility) |
|
| 153 |
+
|
| 154 |
+
### Using Custom Networks
|
| 155 |
+
|
| 156 |
+
To use your own SUMO network:
|
| 157 |
+
|
| 158 |
+
```python
|
| 159 |
+
from envs.sumo_rl_env import SumoRLEnv
|
| 160 |
+
|
| 161 |
+
env = SumoRLEnv.from_docker_image(
|
| 162 |
+
"sumo-rl-env:latest",
|
| 163 |
+
volumes={
|
| 164 |
+
"/path/to/your/nets": {"bind": "/nets", "mode": "ro"}
|
| 165 |
+
},
|
| 166 |
+
environment={
|
| 167 |
+
"SUMO_NET_FILE": "/nets/my-network.net.xml",
|
| 168 |
+
"SUMO_ROUTE_FILE": "/nets/my-routes.rou.xml",
|
| 169 |
+
}
|
| 170 |
+
)
|
| 171 |
+
```
|
| 172 |
+
|
| 173 |
+
Your network directory should contain:
|
| 174 |
+
- `.net.xml` - Network topology (roads, junctions, traffic lights)
|
| 175 |
+
- `.rou.xml` - Vehicle routes (trip definitions, flow rates)
|
| 176 |
+
|
| 177 |
+
## API Reference
|
| 178 |
+
|
| 179 |
+
### SumoAction
|
| 180 |
+
|
| 181 |
+
```python
|
| 182 |
+
@dataclass
|
| 183 |
+
class SumoAction(Action):
|
| 184 |
+
phase_id: int # Green phase to activate (0 to num_phases-1)
|
| 185 |
+
ts_id: str = "0" # Traffic signal ID (for multi-agent)
|
| 186 |
+
```
|
| 187 |
+
|
| 188 |
+
### SumoObservation
|
| 189 |
+
|
| 190 |
+
```python
|
| 191 |
+
@dataclass
|
| 192 |
+
class SumoObservation(Observation):
|
| 193 |
+
observation: List[float] # Observation vector
|
| 194 |
+
observation_shape: List[int] # Shape for reshaping
|
| 195 |
+
action_mask: List[int] # Valid action indices
|
| 196 |
+
sim_time: float # Current simulation time
|
| 197 |
+
done: bool # Episode finished
|
| 198 |
+
reward: Optional[float] # Reward from last action
|
| 199 |
+
metadata: Dict # System metrics
|
| 200 |
+
```
|
| 201 |
+
|
| 202 |
+
### SumoState
|
| 203 |
+
|
| 204 |
+
```python
|
| 205 |
+
@dataclass
|
| 206 |
+
class SumoState(State):
|
| 207 |
+
episode_id: str # Unique episode ID
|
| 208 |
+
step_count: int # Steps taken
|
| 209 |
+
net_file: str # Network file path
|
| 210 |
+
route_file: str # Route file path
|
| 211 |
+
sim_time: float # Current simulation time
|
| 212 |
+
total_vehicles: int # Total vehicles in simulation
|
| 213 |
+
total_waiting_time: float # Cumulative waiting time
|
| 214 |
+
mean_waiting_time: float # Mean waiting time
|
| 215 |
+
mean_speed: float # Mean vehicle speed
|
| 216 |
+
# ... configuration parameters
|
| 217 |
+
```
|
| 218 |
+
|
| 219 |
+
## Example Training Loop
|
| 220 |
+
|
| 221 |
+
```python
|
| 222 |
+
from envs.sumo_rl_env import SumoRLEnv, SumoAction
|
| 223 |
+
import numpy as np
|
| 224 |
+
|
| 225 |
+
# Start environment
|
| 226 |
+
env = SumoRLEnv.from_docker_image("sumo-rl-env:latest")
|
| 227 |
+
|
| 228 |
+
# Training loop
|
| 229 |
+
for episode in range(10):
|
| 230 |
+
result = env.reset()
|
| 231 |
+
episode_reward = 0
|
| 232 |
+
steps = 0
|
| 233 |
+
|
| 234 |
+
while not result.done and steps < 1000:
|
| 235 |
+
# Random policy (replace with your RL agent)
|
| 236 |
+
action_id = np.random.choice(result.observation.action_mask)
|
| 237 |
+
|
| 238 |
+
# Take action
|
| 239 |
+
result = env.step(SumoAction(phase_id=int(action_id)))
|
| 240 |
+
|
| 241 |
+
episode_reward += result.reward or 0
|
| 242 |
+
steps += 1
|
| 243 |
+
|
| 244 |
+
# Print progress every 100 steps
|
| 245 |
+
if steps % 100 == 0:
|
| 246 |
+
state = env.state()
|
| 247 |
+
print(f"Step {steps}: "
|
| 248 |
+
f"reward={result.reward:.2f}, "
|
| 249 |
+
f"vehicles={state.total_vehicles}, "
|
| 250 |
+
f"waiting={state.mean_waiting_time:.2f}")
|
| 251 |
+
|
| 252 |
+
print(f"Episode {episode}: total_reward={episode_reward:.2f}, steps={steps}")
|
| 253 |
+
|
| 254 |
+
env.close()
|
| 255 |
+
```
|
| 256 |
+
|
| 257 |
+
## Performance Notes
|
| 258 |
+
|
| 259 |
+
### Simulation Speed
|
| 260 |
+
|
| 261 |
+
- **Reset time**: 1-5 seconds (starts new SUMO simulation)
|
| 262 |
+
- **Step time**: ~50-200ms per step (depends on network size)
|
| 263 |
+
- **Episode duration**: Minutes (20,000 sim seconds with delta_time=5 → ~4,000 steps)
|
| 264 |
+
|
| 265 |
+
### Optimization
|
| 266 |
+
|
| 267 |
+
For faster simulation:
|
| 268 |
+
1. Reduce `SUMO_NUM_SECONDS` for shorter episodes
|
| 269 |
+
2. Increase `SUMO_DELTA_TIME` for fewer decisions
|
| 270 |
+
3. Use simpler networks with fewer vehicles
|
| 271 |
+
|
| 272 |
+
## Architecture
|
| 273 |
+
|
| 274 |
+
```
|
| 275 |
+
┌─────────────────────────────────┐
|
| 276 |
+
│ Client: SumoRLEnv │
|
| 277 |
+
│ .step(phase_id=1) │
|
| 278 |
+
└──────────────┬──────────────────┘
|
| 279 |
+
│ HTTP
|
| 280 |
+
┌──────────────▼──────────────────┐
|
| 281 |
+
│ FastAPI Server (Docker) │
|
| 282 |
+
│ SumoEnvironment │
|
| 283 |
+
│ ├─ Wraps sumo_rl │
|
| 284 |
+
│ ├─ Single-agent mode │
|
| 285 |
+
│ └─ No GUI │
|
| 286 |
+
└──────────────┬──────────────────┘
|
| 287 |
+
│
|
| 288 |
+
┌──────────────▼──────────────────┐
|
| 289 |
+
│ SUMO Simulator │
|
| 290 |
+
│ - Reads .net.xml (network) │
|
| 291 |
+
│ - Reads .rou.xml (routes) │
|
| 292 |
+
│ - Simulates traffic flow │
|
| 293 |
+
│ - Provides observations │
|
| 294 |
+
└─────────────────────────────────┘
|
| 295 |
+
```
|
| 296 |
+
|
| 297 |
+
## Bundled Network
|
| 298 |
+
|
| 299 |
+
The default `single-intersection` network is a simple 4-way intersection with:
|
| 300 |
+
- **4 incoming roads** (North, South, East, West)
|
| 301 |
+
- **4 green phases** (NS straight, NS left, EW straight, EW left)
|
| 302 |
+
- **Vehicle flow**: Continuous stream with varying rates
|
| 303 |
+
|
| 304 |
+
## Limitations
|
| 305 |
+
|
| 306 |
+
- **No GUI in Docker**: SUMO GUI requires X server (not available in containers)
|
| 307 |
+
- **Single-agent only**: Multi-agent (multiple intersections) coming in future version
|
| 308 |
+
- **Fixed network per container**: Each container uses one network topology
|
| 309 |
+
- **Memory usage**: ~500MB for small networks, 2-4GB for large city networks
|
| 310 |
+
|
| 311 |
+
## Troubleshooting
|
| 312 |
+
|
| 313 |
+
### Container won't start
|
| 314 |
+
```bash
|
| 315 |
+
# Check logs
|
| 316 |
+
docker logs <container-id>
|
| 317 |
+
|
| 318 |
+
# Verify network files exist
|
| 319 |
+
docker run sumo-rl-env:latest ls -la /app/nets/
|
| 320 |
+
```
|
| 321 |
+
|
| 322 |
+
### "SUMO_HOME not set" error
|
| 323 |
+
This should be automatic in Docker. If running locally:
|
| 324 |
+
```bash
|
| 325 |
+
export SUMO_HOME=/usr/share/sumo
|
| 326 |
```
|
| 327 |
|
| 328 |
+
### Slow performance
|
| 329 |
+
- Reduce simulation duration: `SUMO_NUM_SECONDS=5000`
|
| 330 |
+
- Increase action interval: `SUMO_DELTA_TIME=10`
|
| 331 |
+
- Use smaller networks with fewer vehicles
|
|
|
|
| 332 |
|
| 333 |
+
## References
|
| 334 |
|
| 335 |
+
- [SUMO Documentation](https://sumo.dlr.de/docs/)
|
| 336 |
+
- [SUMO-RL GitHub](https://github.com/LucasAlegre/sumo-rl)
|
| 337 |
+
- [SUMO-RL Paper](https://peerj.com/articles/cs-575/)
|
| 338 |
+
- [RESCO Benchmarks](https://github.com/jault/RESCO)
|
| 339 |
+
|
| 340 |
+
## Citation
|
| 341 |
+
|
| 342 |
+
If you use SUMO-RL in your research, please cite:
|
| 343 |
+
|
| 344 |
+
```bibtex
|
| 345 |
+
@misc{sumorl,
|
| 346 |
+
author = {Lucas N. Alegre},
|
| 347 |
+
title = {{SUMO-RL}},
|
| 348 |
+
year = {2019},
|
| 349 |
+
publisher = {GitHub},
|
| 350 |
+
journal = {GitHub repository},
|
| 351 |
+
howpublished = {\url{https://github.com/LucasAlegre/sumo-rl}},
|
| 352 |
+
}
|
| 353 |
+
```
|
| 354 |
|
| 355 |
+
## License
|
| 356 |
|
| 357 |
+
This integration is licensed under the BSD-style license. SUMO-RL and SUMO have their own licenses.
|
__init__.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
SUMO-RL Environment for OpenEnv.
|
| 9 |
+
|
| 10 |
+
This module provides OpenEnv integration for traffic signal control using
|
| 11 |
+
SUMO (Simulation of Urban MObility) via the SUMO-RL library.
|
| 12 |
+
|
| 13 |
+
Example:
|
| 14 |
+
>>> from envs.sumo_rl_env import SumoRLEnv, SumoAction
|
| 15 |
+
>>>
|
| 16 |
+
>>> # Connect to a running server or start via Docker
|
| 17 |
+
>>> env = SumoRLEnv.from_docker_image("sumo-rl-env:latest")
|
| 18 |
+
>>>
|
| 19 |
+
>>> # Reset and interact
|
| 20 |
+
>>> result = env.reset()
|
| 21 |
+
>>> result = env.step(SumoAction(phase_id=1))
|
| 22 |
+
>>> print(result.reward, result.done)
|
| 23 |
+
>>>
|
| 24 |
+
>>> # Cleanup
|
| 25 |
+
>>> env.close()
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
from .client import SumoRLEnv
|
| 29 |
+
from .models import SumoAction, SumoObservation, SumoState
|
| 30 |
+
|
| 31 |
+
__all__ = ["SumoRLEnv", "SumoAction", "SumoObservation", "SumoState"]
|
client.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Client for SUMO-RL environment.
|
| 9 |
+
|
| 10 |
+
This module provides a client to interact with the SUMO traffic signal
|
| 11 |
+
control environment via WebSocket for persistent sessions.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from typing import Any, Dict
|
| 15 |
+
|
| 16 |
+
from openenv.core.client_types import StepResult
|
| 17 |
+
from openenv.core.env_client import EnvClient
|
| 18 |
+
|
| 19 |
+
from .models import SumoAction, SumoObservation, SumoState
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class SumoRLEnv(EnvClient[SumoAction, SumoObservation, SumoState]):
|
| 23 |
+
"""
|
| 24 |
+
Client for SUMO-RL traffic signal control environment.
|
| 25 |
+
|
| 26 |
+
This client maintains a persistent WebSocket connection to a SUMO
|
| 27 |
+
environment server to control traffic signals using reinforcement learning.
|
| 28 |
+
|
| 29 |
+
Example:
|
| 30 |
+
>>> # Start container and connect
|
| 31 |
+
>>> env = SumoRLEnv.from_docker_image("sumo-rl-env:latest")
|
| 32 |
+
>>> try:
|
| 33 |
+
... # Reset environment
|
| 34 |
+
... result = env.reset()
|
| 35 |
+
... print(f"Observation shape: {result.observation.observation_shape}")
|
| 36 |
+
... print(f"Action space: {result.observation.action_mask}")
|
| 37 |
+
...
|
| 38 |
+
... # Take action
|
| 39 |
+
... result = env.step(SumoAction(phase_id=1))
|
| 40 |
+
... print(f"Reward: {result.reward}, Done: {result.done}")
|
| 41 |
+
...
|
| 42 |
+
... # Get state
|
| 43 |
+
... state = env.state()
|
| 44 |
+
... print(f"Sim time: {state.sim_time}, Total vehicles: {state.total_vehicles}")
|
| 45 |
+
... finally:
|
| 46 |
+
... env.close()
|
| 47 |
+
|
| 48 |
+
Example with custom network:
|
| 49 |
+
>>> # Use custom SUMO network via volume mount
|
| 50 |
+
>>> env = SumoRLEnv.from_docker_image(
|
| 51 |
+
... "sumo-rl-env:latest",
|
| 52 |
+
... port=8000,
|
| 53 |
+
... volumes={
|
| 54 |
+
... "/path/to/my/nets": {"bind": "/nets", "mode": "ro"}
|
| 55 |
+
... },
|
| 56 |
+
... environment={
|
| 57 |
+
... "SUMO_NET_FILE": "/nets/my-network.net.xml",
|
| 58 |
+
... "SUMO_ROUTE_FILE": "/nets/my-routes.rou.xml",
|
| 59 |
+
... }
|
| 60 |
+
... )
|
| 61 |
+
|
| 62 |
+
Example with configuration:
|
| 63 |
+
>>> # Adjust simulation parameters
|
| 64 |
+
>>> env = SumoRLEnv.from_docker_image(
|
| 65 |
+
... "sumo-rl-env:latest",
|
| 66 |
+
... environment={
|
| 67 |
+
... "SUMO_NUM_SECONDS": "10000",
|
| 68 |
+
... "SUMO_DELTA_TIME": "10",
|
| 69 |
+
... "SUMO_REWARD_FN": "queue",
|
| 70 |
+
... "SUMO_SEED": "123",
|
| 71 |
+
... }
|
| 72 |
+
... )
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def _step_payload(self, action: SumoAction) -> Dict[str, Any]:
|
| 76 |
+
"""
|
| 77 |
+
Convert SumoAction to JSON payload for HTTP request.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
action: SumoAction containing phase_id to execute.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Dictionary payload for step endpoint.
|
| 84 |
+
"""
|
| 85 |
+
return {
|
| 86 |
+
"phase_id": action.phase_id,
|
| 87 |
+
"ts_id": action.ts_id,
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
def _parse_result(self, payload: Dict[str, Any]) -> StepResult[SumoObservation]:
|
| 91 |
+
"""
|
| 92 |
+
Parse step result from HTTP response JSON.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
payload: JSON response from step endpoint.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
StepResult containing SumoObservation.
|
| 99 |
+
"""
|
| 100 |
+
obs_data = payload.get("observation", {})
|
| 101 |
+
|
| 102 |
+
observation = SumoObservation(
|
| 103 |
+
observation=obs_data.get("observation", []),
|
| 104 |
+
observation_shape=obs_data.get("observation_shape", []),
|
| 105 |
+
action_mask=obs_data.get("action_mask", []),
|
| 106 |
+
sim_time=obs_data.get("sim_time", 0.0),
|
| 107 |
+
done=obs_data.get("done", False),
|
| 108 |
+
reward=obs_data.get("reward"),
|
| 109 |
+
metadata=obs_data.get("metadata", {}),
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
return StepResult(
|
| 113 |
+
observation=observation,
|
| 114 |
+
reward=payload.get("reward"),
|
| 115 |
+
done=payload.get("done", False),
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
def _parse_state(self, payload: Dict[str, Any]) -> SumoState:
|
| 119 |
+
"""
|
| 120 |
+
Parse state from HTTP response JSON.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
payload: JSON response from state endpoint.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
SumoState object.
|
| 127 |
+
"""
|
| 128 |
+
return SumoState(
|
| 129 |
+
episode_id=payload.get("episode_id", ""),
|
| 130 |
+
step_count=payload.get("step_count", 0),
|
| 131 |
+
net_file=payload.get("net_file", ""),
|
| 132 |
+
route_file=payload.get("route_file", ""),
|
| 133 |
+
num_seconds=payload.get("num_seconds", 20000),
|
| 134 |
+
delta_time=payload.get("delta_time", 5),
|
| 135 |
+
yellow_time=payload.get("yellow_time", 2),
|
| 136 |
+
min_green=payload.get("min_green", 5),
|
| 137 |
+
max_green=payload.get("max_green", 50),
|
| 138 |
+
reward_fn=payload.get("reward_fn", "diff-waiting-time"),
|
| 139 |
+
sim_time=payload.get("sim_time", 0.0),
|
| 140 |
+
total_vehicles=payload.get("total_vehicles", 0),
|
| 141 |
+
total_waiting_time=payload.get("total_waiting_time", 0.0),
|
| 142 |
+
mean_waiting_time=payload.get("mean_waiting_time", 0.0),
|
| 143 |
+
mean_speed=payload.get("mean_speed", 0.0),
|
| 144 |
+
)
|
envs/sumo_rl_env/README.md
ADDED
|
@@ -0,0 +1,341 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SUMO-RL Environment
|
| 2 |
+
|
| 3 |
+
Integration of traffic signal control with the OpenEnv framework via SUMO (Simulation of Urban MObility) and SUMO-RL.
|
| 4 |
+
|
| 5 |
+
## Overview
|
| 6 |
+
|
| 7 |
+
This environment enables reinforcement learning for **traffic signal control** using SUMO, a microscopic traffic simulation package. Train RL agents to optimize traffic light timing and minimize vehicle delays.
|
| 8 |
+
|
| 9 |
+
**Key Features**:
|
| 10 |
+
- **Realistic traffic simulation** via SUMO
|
| 11 |
+
- **Single-agent mode** for single intersection control
|
| 12 |
+
- **Configurable rewards** (waiting time, queue, pressure, speed)
|
| 13 |
+
- **Multiple networks** supported (custom .net.xml and .rou.xml files)
|
| 14 |
+
- **Docker-ready** with pre-bundled example network
|
| 15 |
+
|
| 16 |
+
## Quick Start
|
| 17 |
+
|
| 18 |
+
### Using Docker (Recommended)
|
| 19 |
+
|
| 20 |
+
```python
|
| 21 |
+
from envs.sumo_rl_env import SumoRLEnv, SumoAction
|
| 22 |
+
|
| 23 |
+
# Automatically starts container
|
| 24 |
+
env = SumoRLEnv.from_docker_image("sumo-rl-env:latest")
|
| 25 |
+
|
| 26 |
+
# Reset environment
|
| 27 |
+
result = env.reset()
|
| 28 |
+
print(f"Observation shape: {result.observation.observation_shape}")
|
| 29 |
+
print(f"Available actions: {result.observation.action_mask}")
|
| 30 |
+
|
| 31 |
+
# Take action (select next green phase)
|
| 32 |
+
result = env.step(SumoAction(phase_id=1))
|
| 33 |
+
print(f"Reward: {result.reward}, Done: {result.done}")
|
| 34 |
+
|
| 35 |
+
# Get state
|
| 36 |
+
state = env.state()
|
| 37 |
+
print(f"Simulation time: {state.sim_time}")
|
| 38 |
+
print(f"Total vehicles: {state.total_vehicles}")
|
| 39 |
+
print(f"Mean waiting time: {state.mean_waiting_time}")
|
| 40 |
+
|
| 41 |
+
# Cleanup
|
| 42 |
+
env.close()
|
| 43 |
+
```
|
| 44 |
+
|
| 45 |
+
### Building the Docker Image
|
| 46 |
+
|
| 47 |
+
```bash
|
| 48 |
+
cd OpenEnv
|
| 49 |
+
|
| 50 |
+
# Build base image first (if not already built)
|
| 51 |
+
docker build -t envtorch-base:latest -f src/openenv/core/containers/images/Dockerfile .
|
| 52 |
+
|
| 53 |
+
# Build SUMO-RL environment
|
| 54 |
+
docker build -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 55 |
+
```
|
| 56 |
+
|
| 57 |
+
### Running with Different Configurations
|
| 58 |
+
|
| 59 |
+
```bash
|
| 60 |
+
# Default: single-intersection
|
| 61 |
+
docker run -p 8000:8000 sumo-rl-env:latest
|
| 62 |
+
|
| 63 |
+
# Longer simulation
|
| 64 |
+
docker run -p 8000:8000 \
|
| 65 |
+
-e SUMO_NUM_SECONDS=50000 \
|
| 66 |
+
sumo-rl-env:latest
|
| 67 |
+
|
| 68 |
+
# Different reward function
|
| 69 |
+
docker run -p 8000:8000 \
|
| 70 |
+
-e SUMO_REWARD_FN=queue \
|
| 71 |
+
sumo-rl-env:latest
|
| 72 |
+
|
| 73 |
+
# Custom seed for reproducibility
|
| 74 |
+
docker run -p 8000:8000 \
|
| 75 |
+
-e SUMO_SEED=123 \
|
| 76 |
+
sumo-rl-env:latest
|
| 77 |
+
```
|
| 78 |
+
|
| 79 |
+
## Observation
|
| 80 |
+
|
| 81 |
+
The observation is a vector containing:
|
| 82 |
+
- **Phase one-hot**: Current active green phase (one-hot encoded)
|
| 83 |
+
- **Min green flag**: Binary indicator if minimum green time has passed
|
| 84 |
+
- **Lane densities**: Number of vehicles / lane capacity for each incoming lane
|
| 85 |
+
- **Lane queues**: Number of queued vehicles / lane capacity for each incoming lane
|
| 86 |
+
|
| 87 |
+
Observation size varies by network topology (depends on number of phases and lanes).
|
| 88 |
+
|
| 89 |
+
**Default (single-intersection)**:
|
| 90 |
+
- 4 green phases
|
| 91 |
+
- 8 incoming lanes
|
| 92 |
+
- Observation size: ~21 elements
|
| 93 |
+
|
| 94 |
+
## Action Space
|
| 95 |
+
|
| 96 |
+
The action space is discrete and represents selecting the next green phase to activate.
|
| 97 |
+
|
| 98 |
+
- **Action type**: Discrete
|
| 99 |
+
- **Action range**: `[0, num_green_phases - 1]`
|
| 100 |
+
- **Default (single-intersection)**: 4 actions (one per green phase)
|
| 101 |
+
|
| 102 |
+
When a phase change is requested, SUMO automatically inserts a yellow phase before switching.
|
| 103 |
+
|
| 104 |
+
## Rewards
|
| 105 |
+
|
| 106 |
+
Default reward function is **change in cumulative waiting time**:
|
| 107 |
+
```
|
| 108 |
+
reward = -(total_waiting_time_now - total_waiting_time_previous)
|
| 109 |
+
```
|
| 110 |
+
|
| 111 |
+
Positive rewards indicate waiting time decreased (good).
|
| 112 |
+
|
| 113 |
+
### Available Reward Functions
|
| 114 |
+
|
| 115 |
+
Set via `SUMO_REWARD_FN` environment variable:
|
| 116 |
+
|
| 117 |
+
- **`diff-waiting-time`** (default): Change in cumulative waiting time
|
| 118 |
+
- **`average-speed`**: Average speed of all vehicles
|
| 119 |
+
- **`queue`**: Negative total queue length
|
| 120 |
+
- **`pressure`**: Pressure metric (incoming - outgoing vehicles)
|
| 121 |
+
|
| 122 |
+
## Configuration
|
| 123 |
+
|
| 124 |
+
### Environment Variables
|
| 125 |
+
|
| 126 |
+
| Variable | Default | Description |
|
| 127 |
+
|----------|---------|-------------|
|
| 128 |
+
| `SUMO_NET_FILE` | `/app/nets/single-intersection.net.xml` | Network topology file |
|
| 129 |
+
| `SUMO_ROUTE_FILE` | `/app/nets/single-intersection.rou.xml` | Vehicle routes file |
|
| 130 |
+
| `SUMO_NUM_SECONDS` | `20000` | Simulation duration (seconds) |
|
| 131 |
+
| `SUMO_DELTA_TIME` | `5` | Seconds between agent actions |
|
| 132 |
+
| `SUMO_YELLOW_TIME` | `2` | Yellow phase duration (seconds) |
|
| 133 |
+
| `SUMO_MIN_GREEN` | `5` | Minimum green time (seconds) |
|
| 134 |
+
| `SUMO_MAX_GREEN` | `50` | Maximum green time (seconds) |
|
| 135 |
+
| `SUMO_REWARD_FN` | `diff-waiting-time` | Reward function name |
|
| 136 |
+
| `SUMO_SEED` | `42` | Random seed (use for reproducibility) |
|
| 137 |
+
|
| 138 |
+
### Using Custom Networks
|
| 139 |
+
|
| 140 |
+
To use your own SUMO network:
|
| 141 |
+
|
| 142 |
+
```python
|
| 143 |
+
from envs.sumo_rl_env import SumoRLEnv
|
| 144 |
+
|
| 145 |
+
env = SumoRLEnv.from_docker_image(
|
| 146 |
+
"sumo-rl-env:latest",
|
| 147 |
+
volumes={
|
| 148 |
+
"/path/to/your/nets": {"bind": "/nets", "mode": "ro"}
|
| 149 |
+
},
|
| 150 |
+
environment={
|
| 151 |
+
"SUMO_NET_FILE": "/nets/my-network.net.xml",
|
| 152 |
+
"SUMO_ROUTE_FILE": "/nets/my-routes.rou.xml",
|
| 153 |
+
}
|
| 154 |
+
)
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
Your network directory should contain:
|
| 158 |
+
- `.net.xml` - Network topology (roads, junctions, traffic lights)
|
| 159 |
+
- `.rou.xml` - Vehicle routes (trip definitions, flow rates)
|
| 160 |
+
|
| 161 |
+
## API Reference
|
| 162 |
+
|
| 163 |
+
### SumoAction
|
| 164 |
+
|
| 165 |
+
```python
|
| 166 |
+
@dataclass
|
| 167 |
+
class SumoAction(Action):
|
| 168 |
+
phase_id: int # Green phase to activate (0 to num_phases-1)
|
| 169 |
+
ts_id: str = "0" # Traffic signal ID (for multi-agent)
|
| 170 |
+
```
|
| 171 |
+
|
| 172 |
+
### SumoObservation
|
| 173 |
+
|
| 174 |
+
```python
|
| 175 |
+
@dataclass
|
| 176 |
+
class SumoObservation(Observation):
|
| 177 |
+
observation: List[float] # Observation vector
|
| 178 |
+
observation_shape: List[int] # Shape for reshaping
|
| 179 |
+
action_mask: List[int] # Valid action indices
|
| 180 |
+
sim_time: float # Current simulation time
|
| 181 |
+
done: bool # Episode finished
|
| 182 |
+
reward: Optional[float] # Reward from last action
|
| 183 |
+
metadata: Dict # System metrics
|
| 184 |
+
```
|
| 185 |
+
|
| 186 |
+
### SumoState
|
| 187 |
+
|
| 188 |
+
```python
|
| 189 |
+
@dataclass
|
| 190 |
+
class SumoState(State):
|
| 191 |
+
episode_id: str # Unique episode ID
|
| 192 |
+
step_count: int # Steps taken
|
| 193 |
+
net_file: str # Network file path
|
| 194 |
+
route_file: str # Route file path
|
| 195 |
+
sim_time: float # Current simulation time
|
| 196 |
+
total_vehicles: int # Total vehicles in simulation
|
| 197 |
+
total_waiting_time: float # Cumulative waiting time
|
| 198 |
+
mean_waiting_time: float # Mean waiting time
|
| 199 |
+
mean_speed: float # Mean vehicle speed
|
| 200 |
+
# ... configuration parameters
|
| 201 |
+
```
|
| 202 |
+
|
| 203 |
+
## Example Training Loop
|
| 204 |
+
|
| 205 |
+
```python
|
| 206 |
+
from envs.sumo_rl_env import SumoRLEnv, SumoAction
|
| 207 |
+
import numpy as np
|
| 208 |
+
|
| 209 |
+
# Start environment
|
| 210 |
+
env = SumoRLEnv.from_docker_image("sumo-rl-env:latest")
|
| 211 |
+
|
| 212 |
+
# Training loop
|
| 213 |
+
for episode in range(10):
|
| 214 |
+
result = env.reset()
|
| 215 |
+
episode_reward = 0
|
| 216 |
+
steps = 0
|
| 217 |
+
|
| 218 |
+
while not result.done and steps < 1000:
|
| 219 |
+
# Random policy (replace with your RL agent)
|
| 220 |
+
action_id = np.random.choice(result.observation.action_mask)
|
| 221 |
+
|
| 222 |
+
# Take action
|
| 223 |
+
result = env.step(SumoAction(phase_id=int(action_id)))
|
| 224 |
+
|
| 225 |
+
episode_reward += result.reward or 0
|
| 226 |
+
steps += 1
|
| 227 |
+
|
| 228 |
+
# Print progress every 100 steps
|
| 229 |
+
if steps % 100 == 0:
|
| 230 |
+
state = env.state()
|
| 231 |
+
print(f"Step {steps}: "
|
| 232 |
+
f"reward={result.reward:.2f}, "
|
| 233 |
+
f"vehicles={state.total_vehicles}, "
|
| 234 |
+
f"waiting={state.mean_waiting_time:.2f}")
|
| 235 |
+
|
| 236 |
+
print(f"Episode {episode}: total_reward={episode_reward:.2f}, steps={steps}")
|
| 237 |
+
|
| 238 |
+
env.close()
|
| 239 |
+
```
|
| 240 |
+
|
| 241 |
+
## Performance Notes
|
| 242 |
+
|
| 243 |
+
### Simulation Speed
|
| 244 |
+
|
| 245 |
+
- **Reset time**: 1-5 seconds (starts new SUMO simulation)
|
| 246 |
+
- **Step time**: ~50-200ms per step (depends on network size)
|
| 247 |
+
- **Episode duration**: Minutes (20,000 sim seconds with delta_time=5 → ~4,000 steps)
|
| 248 |
+
|
| 249 |
+
### Optimization
|
| 250 |
+
|
| 251 |
+
For faster simulation:
|
| 252 |
+
1. Reduce `SUMO_NUM_SECONDS` for shorter episodes
|
| 253 |
+
2. Increase `SUMO_DELTA_TIME` for fewer decisions
|
| 254 |
+
3. Use simpler networks with fewer vehicles
|
| 255 |
+
|
| 256 |
+
## Architecture
|
| 257 |
+
|
| 258 |
+
```
|
| 259 |
+
┌─────────────────────────────────┐
|
| 260 |
+
│ Client: SumoRLEnv │
|
| 261 |
+
│ .step(phase_id=1) │
|
| 262 |
+
└──────────────┬──────────────────┘
|
| 263 |
+
│ HTTP
|
| 264 |
+
┌──────────────▼──────────────────┐
|
| 265 |
+
│ FastAPI Server (Docker) │
|
| 266 |
+
│ SumoEnvironment │
|
| 267 |
+
│ ├─ Wraps sumo_rl │
|
| 268 |
+
│ ├─ Single-agent mode │
|
| 269 |
+
│ └─ No GUI │
|
| 270 |
+
└──────────────┬──────────────────┘
|
| 271 |
+
│
|
| 272 |
+
┌──────────────▼──────────────────┐
|
| 273 |
+
│ SUMO Simulator │
|
| 274 |
+
│ - Reads .net.xml (network) │
|
| 275 |
+
│ - Reads .rou.xml (routes) │
|
| 276 |
+
│ - Simulates traffic flow │
|
| 277 |
+
│ - Provides observations │
|
| 278 |
+
└─────────────────────────────────┘
|
| 279 |
+
```
|
| 280 |
+
|
| 281 |
+
## Bundled Network
|
| 282 |
+
|
| 283 |
+
The default `single-intersection` network is a simple 4-way intersection with:
|
| 284 |
+
- **4 incoming roads** (North, South, East, West)
|
| 285 |
+
- **4 green phases** (NS straight, NS left, EW straight, EW left)
|
| 286 |
+
- **Vehicle flow**: Continuous stream with varying rates
|
| 287 |
+
|
| 288 |
+
## Limitations
|
| 289 |
+
|
| 290 |
+
- **No GUI in Docker**: SUMO GUI requires X server (not available in containers)
|
| 291 |
+
- **Single-agent only**: Multi-agent (multiple intersections) coming in future version
|
| 292 |
+
- **Fixed network per container**: Each container uses one network topology
|
| 293 |
+
- **Memory usage**: ~500MB for small networks, 2-4GB for large city networks
|
| 294 |
+
|
| 295 |
+
## Troubleshooting
|
| 296 |
+
|
| 297 |
+
### Container won't start
|
| 298 |
+
```bash
|
| 299 |
+
# Check logs
|
| 300 |
+
docker logs <container-id>
|
| 301 |
+
|
| 302 |
+
# Verify network files exist
|
| 303 |
+
docker run sumo-rl-env:latest ls -la /app/nets/
|
| 304 |
+
```
|
| 305 |
+
|
| 306 |
+
### "SUMO_HOME not set" error
|
| 307 |
+
This should be automatic in Docker. If running locally:
|
| 308 |
+
```bash
|
| 309 |
+
export SUMO_HOME=/usr/share/sumo
|
| 310 |
+
```
|
| 311 |
+
|
| 312 |
+
### Slow performance
|
| 313 |
+
- Reduce simulation duration: `SUMO_NUM_SECONDS=5000`
|
| 314 |
+
- Increase action interval: `SUMO_DELTA_TIME=10`
|
| 315 |
+
- Use smaller networks with fewer vehicles
|
| 316 |
+
|
| 317 |
+
## References
|
| 318 |
+
|
| 319 |
+
- [SUMO Documentation](https://sumo.dlr.de/docs/)
|
| 320 |
+
- [SUMO-RL GitHub](https://github.com/LucasAlegre/sumo-rl)
|
| 321 |
+
- [SUMO-RL Paper](https://peerj.com/articles/cs-575/)
|
| 322 |
+
- [RESCO Benchmarks](https://github.com/jault/RESCO)
|
| 323 |
+
|
| 324 |
+
## Citation
|
| 325 |
+
|
| 326 |
+
If you use SUMO-RL in your research, please cite:
|
| 327 |
+
|
| 328 |
+
```bibtex
|
| 329 |
+
@misc{sumorl,
|
| 330 |
+
author = {Lucas N. Alegre},
|
| 331 |
+
title = {{SUMO-RL}},
|
| 332 |
+
year = {2019},
|
| 333 |
+
publisher = {GitHub},
|
| 334 |
+
journal = {GitHub repository},
|
| 335 |
+
howpublished = {\url{https://github.com/LucasAlegre/sumo-rl}},
|
| 336 |
+
}
|
| 337 |
+
```
|
| 338 |
+
|
| 339 |
+
## License
|
| 340 |
+
|
| 341 |
+
This integration is licensed under the BSD-style license. SUMO-RL and SUMO have their own licenses.
|
envs/sumo_rl_env/__init__.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
SUMO-RL Environment for OpenEnv.
|
| 9 |
+
|
| 10 |
+
This module provides OpenEnv integration for traffic signal control using
|
| 11 |
+
SUMO (Simulation of Urban MObility) via the SUMO-RL library.
|
| 12 |
+
|
| 13 |
+
Example:
|
| 14 |
+
>>> from envs.sumo_rl_env import SumoRLEnv, SumoAction
|
| 15 |
+
>>>
|
| 16 |
+
>>> # Connect to a running server or start via Docker
|
| 17 |
+
>>> env = SumoRLEnv.from_docker_image("sumo-rl-env:latest")
|
| 18 |
+
>>>
|
| 19 |
+
>>> # Reset and interact
|
| 20 |
+
>>> result = env.reset()
|
| 21 |
+
>>> result = env.step(SumoAction(phase_id=1))
|
| 22 |
+
>>> print(result.reward, result.done)
|
| 23 |
+
>>>
|
| 24 |
+
>>> # Cleanup
|
| 25 |
+
>>> env.close()
|
| 26 |
+
"""
|
| 27 |
+
|
| 28 |
+
from .client import SumoRLEnv
|
| 29 |
+
from .models import SumoAction, SumoObservation, SumoState
|
| 30 |
+
|
| 31 |
+
__all__ = ["SumoRLEnv", "SumoAction", "SumoObservation", "SumoState"]
|
envs/sumo_rl_env/client.py
ADDED
|
@@ -0,0 +1,144 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Client for SUMO-RL environment.
|
| 9 |
+
|
| 10 |
+
This module provides a client to interact with the SUMO traffic signal
|
| 11 |
+
control environment via WebSocket for persistent sessions.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from typing import Any, Dict
|
| 15 |
+
|
| 16 |
+
from openenv.core.client_types import StepResult
|
| 17 |
+
from openenv.core.env_client import EnvClient
|
| 18 |
+
|
| 19 |
+
from .models import SumoAction, SumoObservation, SumoState
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
class SumoRLEnv(EnvClient[SumoAction, SumoObservation, SumoState]):
|
| 23 |
+
"""
|
| 24 |
+
Client for SUMO-RL traffic signal control environment.
|
| 25 |
+
|
| 26 |
+
This client maintains a persistent WebSocket connection to a SUMO
|
| 27 |
+
environment server to control traffic signals using reinforcement learning.
|
| 28 |
+
|
| 29 |
+
Example:
|
| 30 |
+
>>> # Start container and connect
|
| 31 |
+
>>> env = SumoRLEnv.from_docker_image("sumo-rl-env:latest")
|
| 32 |
+
>>> try:
|
| 33 |
+
... # Reset environment
|
| 34 |
+
... result = env.reset()
|
| 35 |
+
... print(f"Observation shape: {result.observation.observation_shape}")
|
| 36 |
+
... print(f"Action space: {result.observation.action_mask}")
|
| 37 |
+
...
|
| 38 |
+
... # Take action
|
| 39 |
+
... result = env.step(SumoAction(phase_id=1))
|
| 40 |
+
... print(f"Reward: {result.reward}, Done: {result.done}")
|
| 41 |
+
...
|
| 42 |
+
... # Get state
|
| 43 |
+
... state = env.state()
|
| 44 |
+
... print(f"Sim time: {state.sim_time}, Total vehicles: {state.total_vehicles}")
|
| 45 |
+
... finally:
|
| 46 |
+
... env.close()
|
| 47 |
+
|
| 48 |
+
Example with custom network:
|
| 49 |
+
>>> # Use custom SUMO network via volume mount
|
| 50 |
+
>>> env = SumoRLEnv.from_docker_image(
|
| 51 |
+
... "sumo-rl-env:latest",
|
| 52 |
+
... port=8000,
|
| 53 |
+
... volumes={
|
| 54 |
+
... "/path/to/my/nets": {"bind": "/nets", "mode": "ro"}
|
| 55 |
+
... },
|
| 56 |
+
... environment={
|
| 57 |
+
... "SUMO_NET_FILE": "/nets/my-network.net.xml",
|
| 58 |
+
... "SUMO_ROUTE_FILE": "/nets/my-routes.rou.xml",
|
| 59 |
+
... }
|
| 60 |
+
... )
|
| 61 |
+
|
| 62 |
+
Example with configuration:
|
| 63 |
+
>>> # Adjust simulation parameters
|
| 64 |
+
>>> env = SumoRLEnv.from_docker_image(
|
| 65 |
+
... "sumo-rl-env:latest",
|
| 66 |
+
... environment={
|
| 67 |
+
... "SUMO_NUM_SECONDS": "10000",
|
| 68 |
+
... "SUMO_DELTA_TIME": "10",
|
| 69 |
+
... "SUMO_REWARD_FN": "queue",
|
| 70 |
+
... "SUMO_SEED": "123",
|
| 71 |
+
... }
|
| 72 |
+
... )
|
| 73 |
+
"""
|
| 74 |
+
|
| 75 |
+
def _step_payload(self, action: SumoAction) -> Dict[str, Any]:
|
| 76 |
+
"""
|
| 77 |
+
Convert SumoAction to JSON payload for HTTP request.
|
| 78 |
+
|
| 79 |
+
Args:
|
| 80 |
+
action: SumoAction containing phase_id to execute.
|
| 81 |
+
|
| 82 |
+
Returns:
|
| 83 |
+
Dictionary payload for step endpoint.
|
| 84 |
+
"""
|
| 85 |
+
return {
|
| 86 |
+
"phase_id": action.phase_id,
|
| 87 |
+
"ts_id": action.ts_id,
|
| 88 |
+
}
|
| 89 |
+
|
| 90 |
+
def _parse_result(self, payload: Dict[str, Any]) -> StepResult[SumoObservation]:
|
| 91 |
+
"""
|
| 92 |
+
Parse step result from HTTP response JSON.
|
| 93 |
+
|
| 94 |
+
Args:
|
| 95 |
+
payload: JSON response from step endpoint.
|
| 96 |
+
|
| 97 |
+
Returns:
|
| 98 |
+
StepResult containing SumoObservation.
|
| 99 |
+
"""
|
| 100 |
+
obs_data = payload.get("observation", {})
|
| 101 |
+
|
| 102 |
+
observation = SumoObservation(
|
| 103 |
+
observation=obs_data.get("observation", []),
|
| 104 |
+
observation_shape=obs_data.get("observation_shape", []),
|
| 105 |
+
action_mask=obs_data.get("action_mask", []),
|
| 106 |
+
sim_time=obs_data.get("sim_time", 0.0),
|
| 107 |
+
done=obs_data.get("done", False),
|
| 108 |
+
reward=obs_data.get("reward"),
|
| 109 |
+
metadata=obs_data.get("metadata", {}),
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
return StepResult(
|
| 113 |
+
observation=observation,
|
| 114 |
+
reward=payload.get("reward"),
|
| 115 |
+
done=payload.get("done", False),
|
| 116 |
+
)
|
| 117 |
+
|
| 118 |
+
def _parse_state(self, payload: Dict[str, Any]) -> SumoState:
|
| 119 |
+
"""
|
| 120 |
+
Parse state from HTTP response JSON.
|
| 121 |
+
|
| 122 |
+
Args:
|
| 123 |
+
payload: JSON response from state endpoint.
|
| 124 |
+
|
| 125 |
+
Returns:
|
| 126 |
+
SumoState object.
|
| 127 |
+
"""
|
| 128 |
+
return SumoState(
|
| 129 |
+
episode_id=payload.get("episode_id", ""),
|
| 130 |
+
step_count=payload.get("step_count", 0),
|
| 131 |
+
net_file=payload.get("net_file", ""),
|
| 132 |
+
route_file=payload.get("route_file", ""),
|
| 133 |
+
num_seconds=payload.get("num_seconds", 20000),
|
| 134 |
+
delta_time=payload.get("delta_time", 5),
|
| 135 |
+
yellow_time=payload.get("yellow_time", 2),
|
| 136 |
+
min_green=payload.get("min_green", 5),
|
| 137 |
+
max_green=payload.get("max_green", 50),
|
| 138 |
+
reward_fn=payload.get("reward_fn", "diff-waiting-time"),
|
| 139 |
+
sim_time=payload.get("sim_time", 0.0),
|
| 140 |
+
total_vehicles=payload.get("total_vehicles", 0),
|
| 141 |
+
total_waiting_time=payload.get("total_waiting_time", 0.0),
|
| 142 |
+
mean_waiting_time=payload.get("mean_waiting_time", 0.0),
|
| 143 |
+
mean_speed=payload.get("mean_speed", 0.0),
|
| 144 |
+
)
|
envs/sumo_rl_env/models.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Data models for SUMO-RL Environment.
|
| 9 |
+
|
| 10 |
+
This module defines the Action, Observation, and State types for traffic
|
| 11 |
+
signal control using SUMO (Simulation of Urban MObility).
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from typing import Dict, List, Optional
|
| 15 |
+
|
| 16 |
+
from openenv.core.env_server import Action, Observation, State
|
| 17 |
+
from pydantic import Field
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class SumoAction(Action):
|
| 21 |
+
"""
|
| 22 |
+
Action for SUMO traffic signal control environment.
|
| 23 |
+
|
| 24 |
+
Represents selecting which traffic light phase to activate next.
|
| 25 |
+
|
| 26 |
+
Attributes:
|
| 27 |
+
phase_id: Index of the green phase to activate (0 to num_phases-1)
|
| 28 |
+
ts_id: Traffic signal ID (for multi-agent support, default "0")
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
phase_id: int
|
| 32 |
+
ts_id: str = "0"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class SumoObservation(Observation):
|
| 36 |
+
"""
|
| 37 |
+
Observation from SUMO traffic signal environment.
|
| 38 |
+
|
| 39 |
+
Contains traffic metrics for decision-making.
|
| 40 |
+
|
| 41 |
+
Attributes:
|
| 42 |
+
observation: Flattened observation vector containing:
|
| 43 |
+
- One-hot encoded current phase
|
| 44 |
+
- Min green flag (binary)
|
| 45 |
+
- Lane densities (normalized)
|
| 46 |
+
- Lane queues (normalized)
|
| 47 |
+
observation_shape: Shape of observation for reshaping
|
| 48 |
+
action_mask: List of valid action indices
|
| 49 |
+
sim_time: Current simulation time in seconds
|
| 50 |
+
done: Whether episode is complete
|
| 51 |
+
reward: Reward from last action (None on reset)
|
| 52 |
+
metadata: Additional info (system metrics, etc.)
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
observation: List[float] = Field(default_factory=list)
|
| 56 |
+
observation_shape: List[int] = Field(default_factory=list)
|
| 57 |
+
action_mask: List[int] = Field(default_factory=list)
|
| 58 |
+
sim_time: float = 0.0
|
| 59 |
+
done: bool = False
|
| 60 |
+
reward: Optional[float] = None
|
| 61 |
+
metadata: Dict = Field(default_factory=dict)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class SumoState(State):
|
| 65 |
+
"""
|
| 66 |
+
State of SUMO traffic signal environment.
|
| 67 |
+
|
| 68 |
+
Tracks both configuration and runtime state.
|
| 69 |
+
|
| 70 |
+
Configuration attributes:
|
| 71 |
+
net_file: Path to SUMO network file (.net.xml)
|
| 72 |
+
route_file: Path to SUMO route file (.rou.xml)
|
| 73 |
+
num_seconds: Total simulation duration in seconds
|
| 74 |
+
delta_time: Seconds between agent actions
|
| 75 |
+
yellow_time: Duration of yellow phase in seconds
|
| 76 |
+
min_green: Minimum green time per phase in seconds
|
| 77 |
+
max_green: Maximum green time per phase in seconds
|
| 78 |
+
reward_fn: Name of reward function used
|
| 79 |
+
|
| 80 |
+
Runtime attributes:
|
| 81 |
+
episode_id: Unique episode identifier
|
| 82 |
+
step_count: Number of steps taken in episode
|
| 83 |
+
sim_time: Current simulation time in seconds
|
| 84 |
+
total_vehicles: Total number of vehicles in simulation
|
| 85 |
+
total_waiting_time: Cumulative waiting time across all vehicles
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
# Episode tracking
|
| 89 |
+
episode_id: str = ""
|
| 90 |
+
step_count: int = 0
|
| 91 |
+
|
| 92 |
+
# SUMO configuration
|
| 93 |
+
net_file: str = ""
|
| 94 |
+
route_file: str = ""
|
| 95 |
+
num_seconds: int = 20000
|
| 96 |
+
delta_time: int = 5
|
| 97 |
+
yellow_time: int = 2
|
| 98 |
+
min_green: int = 5
|
| 99 |
+
max_green: int = 50
|
| 100 |
+
reward_fn: str = "diff-waiting-time"
|
| 101 |
+
|
| 102 |
+
# Runtime metrics
|
| 103 |
+
sim_time: float = 0.0
|
| 104 |
+
total_vehicles: int = 0
|
| 105 |
+
total_waiting_time: float = 0.0
|
| 106 |
+
mean_waiting_time: float = 0.0
|
| 107 |
+
mean_speed: float = 0.0
|
envs/sumo_rl_env/nets/single-intersection/single-intersection.edg.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<edges>
|
| 2 |
+
<edge from="n" id="n_t" to="t" numLanes="2"/>
|
| 3 |
+
<edge from="w" id="w_t" to="t" numLanes="2"/>
|
| 4 |
+
<edge from="t" id="t_s" to="s" numLanes="2"/>
|
| 5 |
+
<edge from="t" id="t_e" to="e" numLanes="2"/>
|
| 6 |
+
</edges>
|
envs/sumo_rl_env/nets/single-intersection/single-intersection.net.xml
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
|
| 3 |
+
<!-- generated on seg 17 dez 2018 17:22:14 -02 by Netedit Version 0.32.0
|
| 4 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 5 |
+
|
| 6 |
+
<configuration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/netconvertConfiguration.xsd">
|
| 7 |
+
|
| 8 |
+
<input>
|
| 9 |
+
<sumo-net-file value="nets/single-intersection/single-intersection.net.xml"/>
|
| 10 |
+
</input>
|
| 11 |
+
|
| 12 |
+
<output>
|
| 13 |
+
<output-file value="/home/lucas/Documents/sumo-rl/nets/single-intersection/single-intersection2.net.xml"/>
|
| 14 |
+
</output>
|
| 15 |
+
|
| 16 |
+
<processing>
|
| 17 |
+
<no-turnarounds value="true"/>
|
| 18 |
+
<offset.disable-normalization value="true"/>
|
| 19 |
+
<lefthand value="false"/>
|
| 20 |
+
<junctions.corner-detail value="0"/>
|
| 21 |
+
<rectangular-lane-cut value="false"/>
|
| 22 |
+
<walkingareas value="false"/>
|
| 23 |
+
</processing>
|
| 24 |
+
|
| 25 |
+
</configuration>
|
| 26 |
+
-->
|
| 27 |
+
|
| 28 |
+
<net version="0.27" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/net_file.xsd">
|
| 29 |
+
|
| 30 |
+
<location netOffset="150.00,150.00" convBoundary="0.00,0.00,300.00,300.00" origBoundary="-150.00,-150.00,150.00,150.00" projParameter="!"/>
|
| 31 |
+
|
| 32 |
+
<edge id=":t_0" function="internal">
|
| 33 |
+
<lane id=":t_0_0" index="0" speed="13.90" length="9.50" shape="145.05,151.45 145.05,141.95"/>
|
| 34 |
+
<lane id=":t_0_1" index="1" speed="13.90" length="9.50" shape="148.35,151.45 148.35,141.95"/>
|
| 35 |
+
</edge>
|
| 36 |
+
<edge id=":t_2" function="internal">
|
| 37 |
+
<lane id=":t_2_0" index="0" speed="13.90" length="9.50" shape="141.95,145.05 151.45,145.05"/>
|
| 38 |
+
<lane id=":t_2_1" index="1" speed="13.90" length="9.50" shape="141.95,148.35 151.45,148.35"/>
|
| 39 |
+
</edge>
|
| 40 |
+
|
| 41 |
+
<edge id="n_t" from="n" to="t" priority="-1">
|
| 42 |
+
<lane id="n_t_0" index="0" speed="13.90" length="148.55" shape="145.05,300.00 145.05,151.45"/>
|
| 43 |
+
<lane id="n_t_1" index="1" speed="13.90" length="148.55" shape="148.35,300.00 148.35,151.45"/>
|
| 44 |
+
</edge>
|
| 45 |
+
<edge id="t_e" from="t" to="e" priority="-1">
|
| 46 |
+
<lane id="t_e_0" index="0" speed="13.90" length="148.55" shape="151.45,145.05 300.00,145.05"/>
|
| 47 |
+
<lane id="t_e_1" index="1" speed="13.90" length="148.55" shape="151.45,148.35 300.00,148.35"/>
|
| 48 |
+
</edge>
|
| 49 |
+
<edge id="t_s" from="t" to="s" priority="-1">
|
| 50 |
+
<lane id="t_s_0" index="0" speed="13.90" length="141.95" shape="145.05,141.95 145.05,0.00"/>
|
| 51 |
+
<lane id="t_s_1" index="1" speed="13.90" length="141.95" shape="148.35,141.95 148.35,0.00"/>
|
| 52 |
+
</edge>
|
| 53 |
+
<edge id="w_t" from="w" to="t" priority="-1">
|
| 54 |
+
<lane id="w_t_0" index="0" speed="13.90" length="141.95" shape="0.00,145.05 141.95,145.05"/>
|
| 55 |
+
<lane id="w_t_1" index="1" speed="13.90" length="141.95" shape="0.00,148.35 141.95,148.35"/>
|
| 56 |
+
</edge>
|
| 57 |
+
|
| 58 |
+
<tlLogic id="t" type="static" programID="0" offset="0">
|
| 59 |
+
<phase duration="42" state="GGrr"/>
|
| 60 |
+
<phase duration="2" state="yyrr"/>
|
| 61 |
+
<phase duration="42" state="rrGG"/>
|
| 62 |
+
<phase duration="2" state="rryy"/>
|
| 63 |
+
</tlLogic>
|
| 64 |
+
|
| 65 |
+
<junction id="e" type="dead_end" x="300.00" y="150.00" incLanes="t_e_0 t_e_1" intLanes="" shape="300.00,143.45 300.00,149.95"/>
|
| 66 |
+
<junction id="n" type="dead_end" x="150.00" y="300.00" incLanes="" intLanes="" shape="149.95,300.00 143.45,300.00"/>
|
| 67 |
+
<junction id="s" type="dead_end" x="150.00" y="0.00" incLanes="t_s_0 t_s_1" intLanes="" shape="143.45,0.00 149.95,0.00"/>
|
| 68 |
+
<junction id="t" type="traffic_light" x="150.00" y="150.00" incLanes="n_t_0 n_t_1 w_t_0 w_t_1" intLanes=":t_0_0 :t_0_1 :t_2_0 :t_2_1" shape="143.45,151.45 149.95,151.45 151.45,149.95 151.45,143.45 149.95,141.95 143.45,141.95 141.95,143.45 141.95,149.95">
|
| 69 |
+
<request index="0" response="1100" foes="1100" cont="0"/>
|
| 70 |
+
<request index="1" response="1100" foes="1100" cont="0"/>
|
| 71 |
+
<request index="2" response="0000" foes="0011" cont="0"/>
|
| 72 |
+
<request index="3" response="0000" foes="0011" cont="0"/>
|
| 73 |
+
</junction>
|
| 74 |
+
<junction id="w" type="dead_end" x="0.00" y="150.00" incLanes="" intLanes="" shape="0.00,149.95 0.00,143.45"/>
|
| 75 |
+
|
| 76 |
+
<connection from="n_t" to="t_s" fromLane="0" toLane="0" via=":t_0_0" tl="t" linkIndex="0" dir="s" state="o"/>
|
| 77 |
+
<connection from="n_t" to="t_s" fromLane="1" toLane="1" via=":t_0_1" tl="t" linkIndex="1" dir="s" state="o"/>
|
| 78 |
+
<connection from="w_t" to="t_e" fromLane="0" toLane="0" via=":t_2_0" tl="t" linkIndex="2" dir="s" state="o"/>
|
| 79 |
+
<connection from="w_t" to="t_e" fromLane="1" toLane="1" via=":t_2_1" tl="t" linkIndex="3" dir="s" state="o"/>
|
| 80 |
+
|
| 81 |
+
<connection from=":t_0" to="t_s" fromLane="0" toLane="0" dir="s" state="M"/>
|
| 82 |
+
<connection from=":t_0" to="t_s" fromLane="1" toLane="1" dir="s" state="M"/>
|
| 83 |
+
<connection from=":t_2" to="t_e" fromLane="0" toLane="0" dir="s" state="M"/>
|
| 84 |
+
<connection from=":t_2" to="t_e" fromLane="1" toLane="1" dir="s" state="M"/>
|
| 85 |
+
|
| 86 |
+
</net>
|
envs/sumo_rl_env/nets/single-intersection/single-intersection.nod.xml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<nodes>
|
| 2 |
+
<node id="n" x="0.0" y="150.0" type="priority"/>
|
| 3 |
+
<node id="s" x="0.0" y="-150.0" type="priority"/>
|
| 4 |
+
<node id="e" x="150.0" y="0.0" type="priority"/>
|
| 5 |
+
<node id="w" x="-150.0" y="0.0" type="priority"/>
|
| 6 |
+
<node id="t" x="0.0" y="0.0" type="priority"/>
|
| 7 |
+
</nodes>
|
envs/sumo_rl_env/nets/single-intersection/single-intersection.rou.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<routes>
|
| 2 |
+
<route id="route_ns" edges="n_t t_s"/>
|
| 3 |
+
<route id="route_we" edges="w_t t_e"/>
|
| 4 |
+
<flow id="flow_ns" route="route_ns" begin="0" end="100000" probability="0.2" departSpeed="max" departPos="base" departLane="best"/>
|
| 5 |
+
<flow id="flow_we" route="route_we" begin="0" end="100000" probability="0.5" departSpeed="max" departPos="base" departLane="best"/>
|
| 6 |
+
</routes>
|
envs/sumo_rl_env/nets/single-intersection/single-intersection.sumocfg
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<configuration>
|
| 2 |
+
<input>
|
| 3 |
+
<net-file value="single-intersection.net.xml"/>
|
| 4 |
+
<route-files value="single-intersection.rou.xml"/>
|
| 5 |
+
</input>
|
| 6 |
+
<time>
|
| 7 |
+
<begin value="0"/>
|
| 8 |
+
<end value="100000"/>
|
| 9 |
+
</time>
|
| 10 |
+
</configuration>
|
envs/sumo_rl_env/server/Dockerfile
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dockerfile for SUMO-RL Environment
|
| 2 |
+
# This image provides traffic signal control via SUMO (Simulation of Urban MObility)
|
| 3 |
+
|
| 4 |
+
# Configurable base image - defaults to local build, can be overridden for CI/CD
|
| 5 |
+
# Base image provides: fastapi, uvicorn, requests, curl, PYTHONPATH=/app/src
|
| 6 |
+
#
|
| 7 |
+
# Local build: docker build -t envtorch-base:latest -f src/core/containers/images/Dockerfile .
|
| 8 |
+
# docker build -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 9 |
+
#
|
| 10 |
+
# CI/CD build: docker build --build-arg BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest \
|
| 11 |
+
# -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 12 |
+
ARG BASE_IMAGE=envtorch-base:latest
|
| 13 |
+
FROM ${BASE_IMAGE}
|
| 14 |
+
|
| 15 |
+
# Install SUMO system dependencies
|
| 16 |
+
# SUMO is available in Debian repositories
|
| 17 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 18 |
+
sumo \
|
| 19 |
+
sumo-tools \
|
| 20 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 21 |
+
|
| 22 |
+
# Set SUMO_HOME environment variable
|
| 23 |
+
ENV SUMO_HOME=/usr/share/sumo
|
| 24 |
+
|
| 25 |
+
# Install SUMO-RL and Python dependencies
|
| 26 |
+
# sumo-rl includes: gymnasium, pettingzoo, numpy, pandas, sumolib, traci
|
| 27 |
+
RUN pip install --no-cache-dir \
|
| 28 |
+
gymnasium>=0.28 \
|
| 29 |
+
pettingzoo>=1.24.3 \
|
| 30 |
+
numpy>=1.24.0 \
|
| 31 |
+
pandas>=2.0.0 \
|
| 32 |
+
sumolib>=1.14.0 \
|
| 33 |
+
traci>=1.14.0 \
|
| 34 |
+
sumo-rl>=1.4.5
|
| 35 |
+
|
| 36 |
+
# Copy OpenEnv core (base image already set WORKDIR=/app)
|
| 37 |
+
COPY src/core/ /app/src/core/
|
| 38 |
+
|
| 39 |
+
# Copy SUMO-RL environment code (includes nets/)
|
| 40 |
+
COPY envs/sumo_rl_env/ /app/envs/sumo_rl_env/
|
| 41 |
+
|
| 42 |
+
# Copy example network files to expected location
|
| 43 |
+
# Default: single-intersection (simple 4-way intersection)
|
| 44 |
+
COPY envs/sumo_rl_env/nets/single-intersection/ /app/nets/single-intersection/
|
| 45 |
+
|
| 46 |
+
# SUMO environment variables (can be overridden at runtime)
|
| 47 |
+
ENV SUMO_NET_FILE=/app/nets/single-intersection/single-intersection.net.xml
|
| 48 |
+
ENV SUMO_ROUTE_FILE=/app/nets/single-intersection/single-intersection.rou.xml
|
| 49 |
+
ENV SUMO_NUM_SECONDS=20000
|
| 50 |
+
ENV SUMO_DELTA_TIME=5
|
| 51 |
+
ENV SUMO_YELLOW_TIME=2
|
| 52 |
+
ENV SUMO_MIN_GREEN=5
|
| 53 |
+
ENV SUMO_MAX_GREEN=50
|
| 54 |
+
ENV SUMO_REWARD_FN=diff-waiting-time
|
| 55 |
+
ENV SUMO_SEED=42
|
| 56 |
+
|
| 57 |
+
# Expose port
|
| 58 |
+
EXPOSE 8000
|
| 59 |
+
|
| 60 |
+
# Health check
|
| 61 |
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
| 62 |
+
CMD curl -f http://localhost:8000/health || exit 1
|
| 63 |
+
|
| 64 |
+
# Run the FastAPI server
|
| 65 |
+
CMD ["uvicorn", "envs.sumo_rl_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
envs/sumo_rl_env/server/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""SUMO-RL environment server package."""
|
envs/sumo_rl_env/server/app.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
FastAPI application for SUMO-RL environment server.
|
| 9 |
+
|
| 10 |
+
This module creates an HTTP server that exposes traffic signal control
|
| 11 |
+
via the OpenEnv API using SUMO (Simulation of Urban MObility).
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
|
| 16 |
+
from openenv.core.env_server import create_app
|
| 17 |
+
|
| 18 |
+
from ..models import SumoAction, SumoObservation
|
| 19 |
+
from .sumo_environment import SumoEnvironment
|
| 20 |
+
|
| 21 |
+
# Get configuration from environment variables
|
| 22 |
+
net_file = os.getenv("SUMO_NET_FILE", "/app/nets/single-intersection.net.xml")
|
| 23 |
+
route_file = os.getenv("SUMO_ROUTE_FILE", "/app/nets/single-intersection.rou.xml")
|
| 24 |
+
num_seconds = int(os.getenv("SUMO_NUM_SECONDS", "20000"))
|
| 25 |
+
delta_time = int(os.getenv("SUMO_DELTA_TIME", "5"))
|
| 26 |
+
yellow_time = int(os.getenv("SUMO_YELLOW_TIME", "2"))
|
| 27 |
+
min_green = int(os.getenv("SUMO_MIN_GREEN", "5"))
|
| 28 |
+
max_green = int(os.getenv("SUMO_MAX_GREEN", "50"))
|
| 29 |
+
reward_fn = os.getenv("SUMO_REWARD_FN", "diff-waiting-time")
|
| 30 |
+
sumo_seed = int(os.getenv("SUMO_SEED", "42"))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# Factory function to create SumoEnvironment instances
|
| 34 |
+
def create_sumo_environment():
|
| 35 |
+
"""Factory function that creates SumoEnvironment with config."""
|
| 36 |
+
return SumoEnvironment(
|
| 37 |
+
net_file=net_file,
|
| 38 |
+
route_file=route_file,
|
| 39 |
+
num_seconds=num_seconds,
|
| 40 |
+
delta_time=delta_time,
|
| 41 |
+
yellow_time=yellow_time,
|
| 42 |
+
min_green=min_green,
|
| 43 |
+
max_green=max_green,
|
| 44 |
+
reward_fn=reward_fn,
|
| 45 |
+
sumo_seed=sumo_seed,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Create FastAPI app
|
| 50 |
+
# Pass the factory function instead of an instance for WebSocket session support
|
| 51 |
+
app = create_app(
|
| 52 |
+
create_sumo_environment, SumoAction, SumoObservation, env_name="sumo_rl_env"
|
| 53 |
+
)
|
envs/sumo_rl_env/server/sumo_environment.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
SUMO-RL Environment Server Implementation.
|
| 9 |
+
|
| 10 |
+
This module wraps the SUMO-RL SumoEnvironment and exposes it
|
| 11 |
+
via the OpenEnv Environment interface for traffic signal control.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
import uuid
|
| 16 |
+
from typing import Any, Dict
|
| 17 |
+
|
| 18 |
+
# Set SUMO_HOME before importing sumo_rl
|
| 19 |
+
os.environ.setdefault("SUMO_HOME", "/usr/share/sumo")
|
| 20 |
+
|
| 21 |
+
from openenv.core.env_server import Action, Environment, Observation
|
| 22 |
+
|
| 23 |
+
from ..models import SumoAction, SumoObservation, SumoState
|
| 24 |
+
|
| 25 |
+
# Import SUMO-RL
|
| 26 |
+
try:
|
| 27 |
+
from sumo_rl import SumoEnvironment as BaseSumoEnv
|
| 28 |
+
except ImportError as e:
|
| 29 |
+
raise ImportError(
|
| 30 |
+
"sumo-rl is not installed. Please install it with: pip install sumo-rl"
|
| 31 |
+
) from e
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class SumoEnvironment(Environment):
|
| 35 |
+
"""
|
| 36 |
+
SUMO-RL Environment wrapper for OpenEnv.
|
| 37 |
+
|
| 38 |
+
This environment wraps the SUMO traffic signal control environment
|
| 39 |
+
for single-agent reinforcement learning.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
net_file: Path to SUMO network file (.net.xml)
|
| 43 |
+
route_file: Path to SUMO route file (.rou.xml)
|
| 44 |
+
num_seconds: Simulation duration in seconds (default: 20000)
|
| 45 |
+
delta_time: Seconds between agent actions (default: 5)
|
| 46 |
+
yellow_time: Yellow phase duration in seconds (default: 2)
|
| 47 |
+
min_green: Minimum green time in seconds (default: 5)
|
| 48 |
+
max_green: Maximum green time in seconds (default: 50)
|
| 49 |
+
reward_fn: Reward function name (default: "diff-waiting-time")
|
| 50 |
+
sumo_seed: Random seed for reproducibility (default: 42)
|
| 51 |
+
|
| 52 |
+
Example:
|
| 53 |
+
>>> env = SumoEnvironment(
|
| 54 |
+
... net_file="/app/nets/single-intersection.net.xml",
|
| 55 |
+
... route_file="/app/nets/single-intersection.rou.xml"
|
| 56 |
+
... )
|
| 57 |
+
>>> obs = env.reset()
|
| 58 |
+
>>> print(obs.observation_shape)
|
| 59 |
+
>>> obs = env.step(SumoAction(phase_id=1))
|
| 60 |
+
>>> print(obs.reward, obs.done)
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(
|
| 64 |
+
self,
|
| 65 |
+
net_file: str,
|
| 66 |
+
route_file: str,
|
| 67 |
+
num_seconds: int = 20000,
|
| 68 |
+
delta_time: int = 5,
|
| 69 |
+
yellow_time: int = 2,
|
| 70 |
+
min_green: int = 5,
|
| 71 |
+
max_green: int = 50,
|
| 72 |
+
reward_fn: str = "diff-waiting-time",
|
| 73 |
+
sumo_seed: int = 42,
|
| 74 |
+
):
|
| 75 |
+
"""Initialize SUMO traffic signal environment."""
|
| 76 |
+
super().__init__()
|
| 77 |
+
|
| 78 |
+
# Store configuration
|
| 79 |
+
self.net_file = net_file
|
| 80 |
+
self.route_file = route_file
|
| 81 |
+
self.num_seconds = num_seconds
|
| 82 |
+
self.delta_time = delta_time
|
| 83 |
+
self.yellow_time = yellow_time
|
| 84 |
+
self.min_green = min_green
|
| 85 |
+
self.max_green = max_green
|
| 86 |
+
self.reward_fn = reward_fn
|
| 87 |
+
self.sumo_seed = sumo_seed
|
| 88 |
+
|
| 89 |
+
# Create SUMO environment (single-agent mode)
|
| 90 |
+
# Key settings:
|
| 91 |
+
# - use_gui=False: No GUI in Docker
|
| 92 |
+
# - single_agent=True: Returns single obs/reward (not dict)
|
| 93 |
+
# - sumo_warnings=False: Suppress SUMO warnings
|
| 94 |
+
# - out_csv_name=None: Don't write CSV files
|
| 95 |
+
self.env = BaseSumoEnv(
|
| 96 |
+
net_file=net_file,
|
| 97 |
+
route_file=route_file,
|
| 98 |
+
use_gui=False,
|
| 99 |
+
single_agent=True,
|
| 100 |
+
num_seconds=num_seconds,
|
| 101 |
+
delta_time=delta_time,
|
| 102 |
+
yellow_time=yellow_time,
|
| 103 |
+
min_green=min_green,
|
| 104 |
+
max_green=max_green,
|
| 105 |
+
reward_fn=reward_fn,
|
| 106 |
+
sumo_seed=sumo_seed,
|
| 107 |
+
sumo_warnings=False,
|
| 108 |
+
out_csv_name=None, # Disable CSV output
|
| 109 |
+
add_system_info=True,
|
| 110 |
+
add_per_agent_info=False,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# Initialize state
|
| 114 |
+
self._state = SumoState(
|
| 115 |
+
net_file=net_file,
|
| 116 |
+
route_file=route_file,
|
| 117 |
+
num_seconds=num_seconds,
|
| 118 |
+
delta_time=delta_time,
|
| 119 |
+
yellow_time=yellow_time,
|
| 120 |
+
min_green=min_green,
|
| 121 |
+
max_green=max_green,
|
| 122 |
+
reward_fn=reward_fn,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
self._last_info = {}
|
| 126 |
+
|
| 127 |
+
def reset(self) -> Observation:
|
| 128 |
+
"""
|
| 129 |
+
Reset the environment and return initial observation.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
Initial SumoObservation for the agent.
|
| 133 |
+
"""
|
| 134 |
+
# Reset SUMO simulation
|
| 135 |
+
obs, info = self.env.reset()
|
| 136 |
+
|
| 137 |
+
# Update state tracking
|
| 138 |
+
self._state.episode_id = str(uuid.uuid4())
|
| 139 |
+
self._state.step_count = 0
|
| 140 |
+
self._state.sim_time = 0.0
|
| 141 |
+
|
| 142 |
+
# Store info for metadata
|
| 143 |
+
self._last_info = info
|
| 144 |
+
|
| 145 |
+
return self._make_observation(obs, reward=None, done=False, info=info)
|
| 146 |
+
|
| 147 |
+
def step(self, action: Action) -> Observation:
|
| 148 |
+
"""
|
| 149 |
+
Execute agent's action and return resulting observation.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
action: SumoAction containing the phase_id to execute.
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
SumoObservation after action execution.
|
| 156 |
+
|
| 157 |
+
Raises:
|
| 158 |
+
ValueError: If action is not a SumoAction.
|
| 159 |
+
"""
|
| 160 |
+
if not isinstance(action, SumoAction):
|
| 161 |
+
raise ValueError(f"Expected SumoAction, got {type(action)}")
|
| 162 |
+
|
| 163 |
+
# Validate phase_id
|
| 164 |
+
num_phases = self.env.action_space.n
|
| 165 |
+
if action.phase_id < 0 or action.phase_id >= num_phases:
|
| 166 |
+
raise ValueError(
|
| 167 |
+
f"Invalid phase_id: {action.phase_id}. "
|
| 168 |
+
f"Valid range: [0, {num_phases - 1}]"
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# Execute action in SUMO
|
| 172 |
+
# Returns: (obs, reward, terminated, truncated, info)
|
| 173 |
+
obs, reward, terminated, truncated, info = self.env.step(action.phase_id)
|
| 174 |
+
done = terminated or truncated
|
| 175 |
+
|
| 176 |
+
# Update state
|
| 177 |
+
self._state.step_count += 1
|
| 178 |
+
self._state.sim_time = info.get("step", 0.0)
|
| 179 |
+
self._state.total_vehicles = info.get("system_total_running", 0)
|
| 180 |
+
self._state.total_waiting_time = info.get("system_total_waiting_time", 0.0)
|
| 181 |
+
self._state.mean_waiting_time = info.get("system_mean_waiting_time", 0.0)
|
| 182 |
+
self._state.mean_speed = info.get("system_mean_speed", 0.0)
|
| 183 |
+
|
| 184 |
+
# Store info for metadata
|
| 185 |
+
self._last_info = info
|
| 186 |
+
|
| 187 |
+
return self._make_observation(obs, reward=reward, done=done, info=info)
|
| 188 |
+
|
| 189 |
+
@property
|
| 190 |
+
def state(self) -> SumoState:
|
| 191 |
+
"""Get current environment state."""
|
| 192 |
+
return self._state
|
| 193 |
+
|
| 194 |
+
def _make_observation(
|
| 195 |
+
self, obs: Any, reward: float, done: bool, info: Dict
|
| 196 |
+
) -> SumoObservation:
|
| 197 |
+
"""
|
| 198 |
+
Create SumoObservation from SUMO environment output.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
obs: Observation array from SUMO environment
|
| 202 |
+
reward: Reward value (None on reset)
|
| 203 |
+
done: Whether episode is complete
|
| 204 |
+
info: Info dictionary from SUMO environment
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
SumoObservation for the agent.
|
| 208 |
+
"""
|
| 209 |
+
# Convert observation to list
|
| 210 |
+
if hasattr(obs, "tolist"):
|
| 211 |
+
obs_list = obs.tolist()
|
| 212 |
+
else:
|
| 213 |
+
obs_list = list(obs)
|
| 214 |
+
|
| 215 |
+
# Get action mask (all actions valid in SUMO-RL)
|
| 216 |
+
num_phases = self.env.action_space.n
|
| 217 |
+
action_mask = list(range(num_phases))
|
| 218 |
+
|
| 219 |
+
# Extract system metrics for metadata
|
| 220 |
+
system_info = {k: v for k, v in info.items() if k.startswith("system_")}
|
| 221 |
+
|
| 222 |
+
# Create observation
|
| 223 |
+
return SumoObservation(
|
| 224 |
+
observation=obs_list,
|
| 225 |
+
observation_shape=[len(obs_list)],
|
| 226 |
+
action_mask=action_mask,
|
| 227 |
+
sim_time=info.get("step", 0.0),
|
| 228 |
+
done=done,
|
| 229 |
+
reward=reward,
|
| 230 |
+
metadata={
|
| 231 |
+
"num_green_phases": num_phases,
|
| 232 |
+
"system_info": system_info,
|
| 233 |
+
},
|
| 234 |
+
)
|
envs/sumo_rl_env/test_sumo_rl.sh
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/bin/bash
|
| 2 |
+
# Complete SUMO-RL Integration Test Script
|
| 3 |
+
# Run this to verify everything works!
|
| 4 |
+
|
| 5 |
+
set -e # Exit on error
|
| 6 |
+
|
| 7 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 8 |
+
echo "🚀 SUMO-RL Environment Test Script"
|
| 9 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 10 |
+
echo ""
|
| 11 |
+
|
| 12 |
+
# Navigate to repo root
|
| 13 |
+
cd /Users/sanyambhutani/GH/OpenEnv
|
| 14 |
+
|
| 15 |
+
echo "📁 Working directory: $(pwd)"
|
| 16 |
+
echo ""
|
| 17 |
+
|
| 18 |
+
# Step 1: Check if base image exists
|
| 19 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 20 |
+
echo "Step 1: Checking for base image..."
|
| 21 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 22 |
+
|
| 23 |
+
if docker images | grep -q "envtorch-base.*latest"; then
|
| 24 |
+
echo "✅ envtorch-base:latest found"
|
| 25 |
+
else
|
| 26 |
+
echo "⚠️ envtorch-base:latest not found - building it now..."
|
| 27 |
+
echo ""
|
| 28 |
+
docker build -t envtorch-base:latest -f src/core/containers/images/Dockerfile .
|
| 29 |
+
echo ""
|
| 30 |
+
echo "✅ Base image built successfully"
|
| 31 |
+
fi
|
| 32 |
+
echo ""
|
| 33 |
+
|
| 34 |
+
# Step 2: Build SUMO-RL environment
|
| 35 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 36 |
+
echo "Step 2: Building SUMO-RL environment image..."
|
| 37 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 38 |
+
echo "⏳ This will take 5-10 minutes (installing SUMO)..."
|
| 39 |
+
echo ""
|
| 40 |
+
|
| 41 |
+
docker build -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 42 |
+
|
| 43 |
+
echo ""
|
| 44 |
+
echo "✅ SUMO-RL environment built successfully"
|
| 45 |
+
echo ""
|
| 46 |
+
|
| 47 |
+
# Check image size
|
| 48 |
+
IMAGE_SIZE=$(docker images sumo-rl-env:latest --format "{{.Size}}")
|
| 49 |
+
echo "📦 Image size: $IMAGE_SIZE"
|
| 50 |
+
echo ""
|
| 51 |
+
|
| 52 |
+
# Step 3: Start container
|
| 53 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 54 |
+
echo "Step 3: Starting SUMO-RL container..."
|
| 55 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 56 |
+
|
| 57 |
+
# Stop any existing container
|
| 58 |
+
docker stop sumo-rl-test 2>/dev/null || true
|
| 59 |
+
docker rm sumo-rl-test 2>/dev/null || true
|
| 60 |
+
|
| 61 |
+
# Start new container
|
| 62 |
+
docker run -d -p 8000:8000 --name sumo-rl-test sumo-rl-env:latest
|
| 63 |
+
|
| 64 |
+
echo "⏳ Waiting for container to start..."
|
| 65 |
+
sleep 5
|
| 66 |
+
|
| 67 |
+
# Check if container is running
|
| 68 |
+
if docker ps | grep -q sumo-rl-test; then
|
| 69 |
+
echo "✅ Container is running"
|
| 70 |
+
else
|
| 71 |
+
echo "❌ Container failed to start!"
|
| 72 |
+
echo "Logs:"
|
| 73 |
+
docker logs sumo-rl-test
|
| 74 |
+
exit 1
|
| 75 |
+
fi
|
| 76 |
+
echo ""
|
| 77 |
+
|
| 78 |
+
# Step 4: Test health endpoint
|
| 79 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 80 |
+
echo "Step 4: Testing health endpoint..."
|
| 81 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 82 |
+
|
| 83 |
+
HEALTH_RESPONSE=$(curl -s http://localhost:8000/health)
|
| 84 |
+
echo "Response: $HEALTH_RESPONSE"
|
| 85 |
+
|
| 86 |
+
if echo "$HEALTH_RESPONSE" | grep -q "healthy"; then
|
| 87 |
+
echo "✅ Health check passed"
|
| 88 |
+
else
|
| 89 |
+
echo "❌ Health check failed!"
|
| 90 |
+
exit 1
|
| 91 |
+
fi
|
| 92 |
+
echo ""
|
| 93 |
+
|
| 94 |
+
# Step 5: Test reset endpoint
|
| 95 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 96 |
+
echo "Step 5: Testing reset endpoint..."
|
| 97 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 98 |
+
echo "⏳ This may take 3-5 seconds (SUMO simulation starting)..."
|
| 99 |
+
|
| 100 |
+
RESET_RESPONSE=$(curl -s -X POST http://localhost:8000/reset)
|
| 101 |
+
|
| 102 |
+
if echo "$RESET_RESPONSE" | jq -e '.observation.observation' > /dev/null 2>&1; then
|
| 103 |
+
echo "✅ Reset successful"
|
| 104 |
+
|
| 105 |
+
# Extract observation details
|
| 106 |
+
OBS_SHAPE=$(echo "$RESET_RESPONSE" | jq '.observation.observation_shape')
|
| 107 |
+
ACTION_MASK=$(echo "$RESET_RESPONSE" | jq '.observation.action_mask')
|
| 108 |
+
|
| 109 |
+
echo " 📊 Observation shape: $OBS_SHAPE"
|
| 110 |
+
echo " 🎮 Available actions: $ACTION_MASK"
|
| 111 |
+
else
|
| 112 |
+
echo "❌ Reset failed!"
|
| 113 |
+
echo "Response: $RESET_RESPONSE"
|
| 114 |
+
exit 1
|
| 115 |
+
fi
|
| 116 |
+
echo ""
|
| 117 |
+
|
| 118 |
+
# Step 6: Test step endpoint
|
| 119 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 120 |
+
echo "Step 6: Testing step endpoint (taking 5 actions)..."
|
| 121 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 122 |
+
|
| 123 |
+
for i in {1..5}; do
|
| 124 |
+
# Take action (cycle through phases 0-1)
|
| 125 |
+
PHASE_ID=$((i % 2))
|
| 126 |
+
|
| 127 |
+
STEP_RESPONSE=$(curl -s -X POST http://localhost:8000/step \
|
| 128 |
+
-H "Content-Type: application/json" \
|
| 129 |
+
-d "{\"action\": {\"phase_id\": $PHASE_ID, \"ts_id\": \"0\"}}")
|
| 130 |
+
|
| 131 |
+
if echo "$STEP_RESPONSE" | jq -e '.reward' > /dev/null 2>&1; then
|
| 132 |
+
REWARD=$(echo "$STEP_RESPONSE" | jq '.reward')
|
| 133 |
+
DONE=$(echo "$STEP_RESPONSE" | jq '.done')
|
| 134 |
+
echo " Step $i: phase=$PHASE_ID, reward=$REWARD, done=$DONE"
|
| 135 |
+
else
|
| 136 |
+
echo "❌ Step $i failed!"
|
| 137 |
+
echo "Response: $STEP_RESPONSE"
|
| 138 |
+
exit 1
|
| 139 |
+
fi
|
| 140 |
+
done
|
| 141 |
+
|
| 142 |
+
echo "✅ All steps successful"
|
| 143 |
+
echo ""
|
| 144 |
+
|
| 145 |
+
# Step 7: Test state endpoint
|
| 146 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 147 |
+
echo "Step 7: Testing state endpoint..."
|
| 148 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 149 |
+
|
| 150 |
+
STATE_RESPONSE=$(curl -s http://localhost:8000/state)
|
| 151 |
+
|
| 152 |
+
if echo "$STATE_RESPONSE" | jq -e '.episode_id' > /dev/null 2>&1; then
|
| 153 |
+
echo "✅ State endpoint working"
|
| 154 |
+
|
| 155 |
+
# Extract state details
|
| 156 |
+
EPISODE_ID=$(echo "$STATE_RESPONSE" | jq -r '.episode_id')
|
| 157 |
+
STEP_COUNT=$(echo "$STATE_RESPONSE" | jq '.step_count')
|
| 158 |
+
SIM_TIME=$(echo "$STATE_RESPONSE" | jq '.sim_time')
|
| 159 |
+
TOTAL_VEHICLES=$(echo "$STATE_RESPONSE" | jq '.total_vehicles')
|
| 160 |
+
|
| 161 |
+
echo " 📝 Episode ID: ${EPISODE_ID:0:8}..."
|
| 162 |
+
echo " 🔢 Step count: $STEP_COUNT"
|
| 163 |
+
echo " ⏱️ Simulation time: $SIM_TIME seconds"
|
| 164 |
+
echo " 🚗 Total vehicles: $TOTAL_VEHICLES"
|
| 165 |
+
else
|
| 166 |
+
echo "❌ State endpoint failed!"
|
| 167 |
+
echo "Response: $STATE_RESPONSE"
|
| 168 |
+
exit 1
|
| 169 |
+
fi
|
| 170 |
+
echo ""
|
| 171 |
+
|
| 172 |
+
# Step 8: Check logs for errors
|
| 173 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 174 |
+
echo "Step 8: Checking container logs for errors..."
|
| 175 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 176 |
+
|
| 177 |
+
LOGS=$(docker logs sumo-rl-test 2>&1)
|
| 178 |
+
|
| 179 |
+
# Check for Python errors (but ignore LoggerMode.Error which is expected)
|
| 180 |
+
if echo "$LOGS" | grep -i "error\|exception\|traceback" | grep -v "LoggerMode.Error"; then
|
| 181 |
+
echo "⚠️ Found errors in logs:"
|
| 182 |
+
echo "$LOGS" | grep -i "error\|exception\|traceback" | grep -v "LoggerMode.Error"
|
| 183 |
+
else
|
| 184 |
+
echo "✅ No errors found in logs"
|
| 185 |
+
fi
|
| 186 |
+
echo ""
|
| 187 |
+
|
| 188 |
+
# Step 9: Cleanup
|
| 189 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 190 |
+
echo "Step 9: Cleanup..."
|
| 191 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 192 |
+
|
| 193 |
+
echo "🧹 Stopping and removing test container..."
|
| 194 |
+
docker stop sumo-rl-test
|
| 195 |
+
docker rm sumo-rl-test
|
| 196 |
+
|
| 197 |
+
echo "✅ Cleanup complete"
|
| 198 |
+
echo ""
|
| 199 |
+
|
| 200 |
+
# Final summary
|
| 201 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 202 |
+
echo "🎉 ALL TESTS PASSED!"
|
| 203 |
+
echo "━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━"
|
| 204 |
+
echo ""
|
| 205 |
+
echo "Summary:"
|
| 206 |
+
echo " ✅ Docker image built successfully ($IMAGE_SIZE)"
|
| 207 |
+
echo " ✅ Container started and ran"
|
| 208 |
+
echo " ✅ Health endpoint working"
|
| 209 |
+
echo " ✅ Reset endpoint working"
|
| 210 |
+
echo " ✅ Step endpoint working (5 actions executed)"
|
| 211 |
+
echo " ✅ State endpoint working"
|
| 212 |
+
echo " ✅ No errors in logs"
|
| 213 |
+
echo ""
|
| 214 |
+
echo "🎯 SUMO-RL integration is working perfectly!"
|
| 215 |
+
echo ""
|
| 216 |
+
echo "Next steps:"
|
| 217 |
+
echo " 1. Test Python client: python examples/sumo_rl_simple.py"
|
| 218 |
+
echo " 2. Push to GitHub to trigger CI/CD"
|
| 219 |
+
echo " 3. Use for RL training!"
|
| 220 |
+
echo ""
|
models.py
ADDED
|
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Data models for SUMO-RL Environment.
|
| 9 |
+
|
| 10 |
+
This module defines the Action, Observation, and State types for traffic
|
| 11 |
+
signal control using SUMO (Simulation of Urban MObility).
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from typing import Dict, List, Optional
|
| 15 |
+
|
| 16 |
+
from openenv.core.env_server import Action, Observation, State
|
| 17 |
+
from pydantic import Field
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class SumoAction(Action):
|
| 21 |
+
"""
|
| 22 |
+
Action for SUMO traffic signal control environment.
|
| 23 |
+
|
| 24 |
+
Represents selecting which traffic light phase to activate next.
|
| 25 |
+
|
| 26 |
+
Attributes:
|
| 27 |
+
phase_id: Index of the green phase to activate (0 to num_phases-1)
|
| 28 |
+
ts_id: Traffic signal ID (for multi-agent support, default "0")
|
| 29 |
+
"""
|
| 30 |
+
|
| 31 |
+
phase_id: int
|
| 32 |
+
ts_id: str = "0"
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
class SumoObservation(Observation):
|
| 36 |
+
"""
|
| 37 |
+
Observation from SUMO traffic signal environment.
|
| 38 |
+
|
| 39 |
+
Contains traffic metrics for decision-making.
|
| 40 |
+
|
| 41 |
+
Attributes:
|
| 42 |
+
observation: Flattened observation vector containing:
|
| 43 |
+
- One-hot encoded current phase
|
| 44 |
+
- Min green flag (binary)
|
| 45 |
+
- Lane densities (normalized)
|
| 46 |
+
- Lane queues (normalized)
|
| 47 |
+
observation_shape: Shape of observation for reshaping
|
| 48 |
+
action_mask: List of valid action indices
|
| 49 |
+
sim_time: Current simulation time in seconds
|
| 50 |
+
done: Whether episode is complete
|
| 51 |
+
reward: Reward from last action (None on reset)
|
| 52 |
+
metadata: Additional info (system metrics, etc.)
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
observation: List[float] = Field(default_factory=list)
|
| 56 |
+
observation_shape: List[int] = Field(default_factory=list)
|
| 57 |
+
action_mask: List[int] = Field(default_factory=list)
|
| 58 |
+
sim_time: float = 0.0
|
| 59 |
+
done: bool = False
|
| 60 |
+
reward: Optional[float] = None
|
| 61 |
+
metadata: Dict = Field(default_factory=dict)
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class SumoState(State):
|
| 65 |
+
"""
|
| 66 |
+
State of SUMO traffic signal environment.
|
| 67 |
+
|
| 68 |
+
Tracks both configuration and runtime state.
|
| 69 |
+
|
| 70 |
+
Configuration attributes:
|
| 71 |
+
net_file: Path to SUMO network file (.net.xml)
|
| 72 |
+
route_file: Path to SUMO route file (.rou.xml)
|
| 73 |
+
num_seconds: Total simulation duration in seconds
|
| 74 |
+
delta_time: Seconds between agent actions
|
| 75 |
+
yellow_time: Duration of yellow phase in seconds
|
| 76 |
+
min_green: Minimum green time per phase in seconds
|
| 77 |
+
max_green: Maximum green time per phase in seconds
|
| 78 |
+
reward_fn: Name of reward function used
|
| 79 |
+
|
| 80 |
+
Runtime attributes:
|
| 81 |
+
episode_id: Unique episode identifier
|
| 82 |
+
step_count: Number of steps taken in episode
|
| 83 |
+
sim_time: Current simulation time in seconds
|
| 84 |
+
total_vehicles: Total number of vehicles in simulation
|
| 85 |
+
total_waiting_time: Cumulative waiting time across all vehicles
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
# Episode tracking
|
| 89 |
+
episode_id: str = ""
|
| 90 |
+
step_count: int = 0
|
| 91 |
+
|
| 92 |
+
# SUMO configuration
|
| 93 |
+
net_file: str = ""
|
| 94 |
+
route_file: str = ""
|
| 95 |
+
num_seconds: int = 20000
|
| 96 |
+
delta_time: int = 5
|
| 97 |
+
yellow_time: int = 2
|
| 98 |
+
min_green: int = 5
|
| 99 |
+
max_green: int = 50
|
| 100 |
+
reward_fn: str = "diff-waiting-time"
|
| 101 |
+
|
| 102 |
+
# Runtime metrics
|
| 103 |
+
sim_time: float = 0.0
|
| 104 |
+
total_vehicles: int = 0
|
| 105 |
+
total_waiting_time: float = 0.0
|
| 106 |
+
mean_waiting_time: float = 0.0
|
| 107 |
+
mean_speed: float = 0.0
|
nets/single-intersection/single-intersection.edg.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<edges>
|
| 2 |
+
<edge from="n" id="n_t" to="t" numLanes="2"/>
|
| 3 |
+
<edge from="w" id="w_t" to="t" numLanes="2"/>
|
| 4 |
+
<edge from="t" id="t_s" to="s" numLanes="2"/>
|
| 5 |
+
<edge from="t" id="t_e" to="e" numLanes="2"/>
|
| 6 |
+
</edges>
|
nets/single-intersection/single-intersection.net.xml
ADDED
|
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 2 |
+
|
| 3 |
+
<!-- generated on seg 17 dez 2018 17:22:14 -02 by Netedit Version 0.32.0
|
| 4 |
+
<?xml version="1.0" encoding="UTF-8"?>
|
| 5 |
+
|
| 6 |
+
<configuration xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/netconvertConfiguration.xsd">
|
| 7 |
+
|
| 8 |
+
<input>
|
| 9 |
+
<sumo-net-file value="nets/single-intersection/single-intersection.net.xml"/>
|
| 10 |
+
</input>
|
| 11 |
+
|
| 12 |
+
<output>
|
| 13 |
+
<output-file value="/home/lucas/Documents/sumo-rl/nets/single-intersection/single-intersection2.net.xml"/>
|
| 14 |
+
</output>
|
| 15 |
+
|
| 16 |
+
<processing>
|
| 17 |
+
<no-turnarounds value="true"/>
|
| 18 |
+
<offset.disable-normalization value="true"/>
|
| 19 |
+
<lefthand value="false"/>
|
| 20 |
+
<junctions.corner-detail value="0"/>
|
| 21 |
+
<rectangular-lane-cut value="false"/>
|
| 22 |
+
<walkingareas value="false"/>
|
| 23 |
+
</processing>
|
| 24 |
+
|
| 25 |
+
</configuration>
|
| 26 |
+
-->
|
| 27 |
+
|
| 28 |
+
<net version="0.27" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:noNamespaceSchemaLocation="http://sumo.dlr.de/xsd/net_file.xsd">
|
| 29 |
+
|
| 30 |
+
<location netOffset="150.00,150.00" convBoundary="0.00,0.00,300.00,300.00" origBoundary="-150.00,-150.00,150.00,150.00" projParameter="!"/>
|
| 31 |
+
|
| 32 |
+
<edge id=":t_0" function="internal">
|
| 33 |
+
<lane id=":t_0_0" index="0" speed="13.90" length="9.50" shape="145.05,151.45 145.05,141.95"/>
|
| 34 |
+
<lane id=":t_0_1" index="1" speed="13.90" length="9.50" shape="148.35,151.45 148.35,141.95"/>
|
| 35 |
+
</edge>
|
| 36 |
+
<edge id=":t_2" function="internal">
|
| 37 |
+
<lane id=":t_2_0" index="0" speed="13.90" length="9.50" shape="141.95,145.05 151.45,145.05"/>
|
| 38 |
+
<lane id=":t_2_1" index="1" speed="13.90" length="9.50" shape="141.95,148.35 151.45,148.35"/>
|
| 39 |
+
</edge>
|
| 40 |
+
|
| 41 |
+
<edge id="n_t" from="n" to="t" priority="-1">
|
| 42 |
+
<lane id="n_t_0" index="0" speed="13.90" length="148.55" shape="145.05,300.00 145.05,151.45"/>
|
| 43 |
+
<lane id="n_t_1" index="1" speed="13.90" length="148.55" shape="148.35,300.00 148.35,151.45"/>
|
| 44 |
+
</edge>
|
| 45 |
+
<edge id="t_e" from="t" to="e" priority="-1">
|
| 46 |
+
<lane id="t_e_0" index="0" speed="13.90" length="148.55" shape="151.45,145.05 300.00,145.05"/>
|
| 47 |
+
<lane id="t_e_1" index="1" speed="13.90" length="148.55" shape="151.45,148.35 300.00,148.35"/>
|
| 48 |
+
</edge>
|
| 49 |
+
<edge id="t_s" from="t" to="s" priority="-1">
|
| 50 |
+
<lane id="t_s_0" index="0" speed="13.90" length="141.95" shape="145.05,141.95 145.05,0.00"/>
|
| 51 |
+
<lane id="t_s_1" index="1" speed="13.90" length="141.95" shape="148.35,141.95 148.35,0.00"/>
|
| 52 |
+
</edge>
|
| 53 |
+
<edge id="w_t" from="w" to="t" priority="-1">
|
| 54 |
+
<lane id="w_t_0" index="0" speed="13.90" length="141.95" shape="0.00,145.05 141.95,145.05"/>
|
| 55 |
+
<lane id="w_t_1" index="1" speed="13.90" length="141.95" shape="0.00,148.35 141.95,148.35"/>
|
| 56 |
+
</edge>
|
| 57 |
+
|
| 58 |
+
<tlLogic id="t" type="static" programID="0" offset="0">
|
| 59 |
+
<phase duration="42" state="GGrr"/>
|
| 60 |
+
<phase duration="2" state="yyrr"/>
|
| 61 |
+
<phase duration="42" state="rrGG"/>
|
| 62 |
+
<phase duration="2" state="rryy"/>
|
| 63 |
+
</tlLogic>
|
| 64 |
+
|
| 65 |
+
<junction id="e" type="dead_end" x="300.00" y="150.00" incLanes="t_e_0 t_e_1" intLanes="" shape="300.00,143.45 300.00,149.95"/>
|
| 66 |
+
<junction id="n" type="dead_end" x="150.00" y="300.00" incLanes="" intLanes="" shape="149.95,300.00 143.45,300.00"/>
|
| 67 |
+
<junction id="s" type="dead_end" x="150.00" y="0.00" incLanes="t_s_0 t_s_1" intLanes="" shape="143.45,0.00 149.95,0.00"/>
|
| 68 |
+
<junction id="t" type="traffic_light" x="150.00" y="150.00" incLanes="n_t_0 n_t_1 w_t_0 w_t_1" intLanes=":t_0_0 :t_0_1 :t_2_0 :t_2_1" shape="143.45,151.45 149.95,151.45 151.45,149.95 151.45,143.45 149.95,141.95 143.45,141.95 141.95,143.45 141.95,149.95">
|
| 69 |
+
<request index="0" response="1100" foes="1100" cont="0"/>
|
| 70 |
+
<request index="1" response="1100" foes="1100" cont="0"/>
|
| 71 |
+
<request index="2" response="0000" foes="0011" cont="0"/>
|
| 72 |
+
<request index="3" response="0000" foes="0011" cont="0"/>
|
| 73 |
+
</junction>
|
| 74 |
+
<junction id="w" type="dead_end" x="0.00" y="150.00" incLanes="" intLanes="" shape="0.00,149.95 0.00,143.45"/>
|
| 75 |
+
|
| 76 |
+
<connection from="n_t" to="t_s" fromLane="0" toLane="0" via=":t_0_0" tl="t" linkIndex="0" dir="s" state="o"/>
|
| 77 |
+
<connection from="n_t" to="t_s" fromLane="1" toLane="1" via=":t_0_1" tl="t" linkIndex="1" dir="s" state="o"/>
|
| 78 |
+
<connection from="w_t" to="t_e" fromLane="0" toLane="0" via=":t_2_0" tl="t" linkIndex="2" dir="s" state="o"/>
|
| 79 |
+
<connection from="w_t" to="t_e" fromLane="1" toLane="1" via=":t_2_1" tl="t" linkIndex="3" dir="s" state="o"/>
|
| 80 |
+
|
| 81 |
+
<connection from=":t_0" to="t_s" fromLane="0" toLane="0" dir="s" state="M"/>
|
| 82 |
+
<connection from=":t_0" to="t_s" fromLane="1" toLane="1" dir="s" state="M"/>
|
| 83 |
+
<connection from=":t_2" to="t_e" fromLane="0" toLane="0" dir="s" state="M"/>
|
| 84 |
+
<connection from=":t_2" to="t_e" fromLane="1" toLane="1" dir="s" state="M"/>
|
| 85 |
+
|
| 86 |
+
</net>
|
nets/single-intersection/single-intersection.nod.xml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<nodes>
|
| 2 |
+
<node id="n" x="0.0" y="150.0" type="priority"/>
|
| 3 |
+
<node id="s" x="0.0" y="-150.0" type="priority"/>
|
| 4 |
+
<node id="e" x="150.0" y="0.0" type="priority"/>
|
| 5 |
+
<node id="w" x="-150.0" y="0.0" type="priority"/>
|
| 6 |
+
<node id="t" x="0.0" y="0.0" type="priority"/>
|
| 7 |
+
</nodes>
|
nets/single-intersection/single-intersection.rou.xml
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<routes>
|
| 2 |
+
<route id="route_ns" edges="n_t t_s"/>
|
| 3 |
+
<route id="route_we" edges="w_t t_e"/>
|
| 4 |
+
<flow id="flow_ns" route="route_ns" begin="0" end="100000" probability="0.2" departSpeed="max" departPos="base" departLane="best"/>
|
| 5 |
+
<flow id="flow_we" route="route_we" begin="0" end="100000" probability="0.5" departSpeed="max" departPos="base" departLane="best"/>
|
| 6 |
+
</routes>
|
nets/single-intersection/single-intersection.sumocfg
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
<configuration>
|
| 2 |
+
<input>
|
| 3 |
+
<net-file value="single-intersection.net.xml"/>
|
| 4 |
+
<route-files value="single-intersection.rou.xml"/>
|
| 5 |
+
</input>
|
| 6 |
+
<time>
|
| 7 |
+
<begin value="0"/>
|
| 8 |
+
<end value="100000"/>
|
| 9 |
+
</time>
|
| 10 |
+
</configuration>
|
pyproject.toml
ADDED
|
@@ -0,0 +1,147 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[build-system]
|
| 2 |
+
requires = ["setuptools>=45", "wheel"]
|
| 3 |
+
build-backend = "setuptools.build_meta"
|
| 4 |
+
|
| 5 |
+
[project]
|
| 6 |
+
name = "openenv-core"
|
| 7 |
+
version = "0.2.2.dev0"
|
| 8 |
+
description = "A unified framework for reinforcement learning environments"
|
| 9 |
+
readme = "README.md"
|
| 10 |
+
requires-python = ">=3.10"
|
| 11 |
+
dependencies = [
|
| 12 |
+
# Core shared dependencies - minimal set required for all environments
|
| 13 |
+
# Heavy dependencies (torch, numpy, smolagents, etc.) should be in
|
| 14 |
+
# individual environment pyproject.toml files
|
| 15 |
+
"fastapi>=0.104.0",
|
| 16 |
+
"pydantic>=2.0.0",
|
| 17 |
+
"uvicorn>=0.24.0",
|
| 18 |
+
"requests>=2.25.0",
|
| 19 |
+
# CLI dependencies
|
| 20 |
+
"typer>=0.9.0",
|
| 21 |
+
"rich>=13.0.0",
|
| 22 |
+
"pyyaml>=6.0",
|
| 23 |
+
"huggingface_hub>=0.20.0",
|
| 24 |
+
"openai>=2.7.2",
|
| 25 |
+
"tomli>=2.3.0",
|
| 26 |
+
"tomli-w>=1.2.0",
|
| 27 |
+
"websockets>=15.0.1",
|
| 28 |
+
# MCP support
|
| 29 |
+
"fastmcp>=3.0.0",
|
| 30 |
+
# Web UI dependencies
|
| 31 |
+
"gradio>=4.0.0",
|
| 32 |
+
]
|
| 33 |
+
|
| 34 |
+
[project.optional-dependencies]
|
| 35 |
+
core = [
|
| 36 |
+
"fastapi>=0.104.0",
|
| 37 |
+
"pydantic>=2.0.0",
|
| 38 |
+
"uvicorn>=0.24.0",
|
| 39 |
+
"requests>=2.25.0",
|
| 40 |
+
"websockets>=15.0.1",
|
| 41 |
+
]
|
| 42 |
+
cli = [
|
| 43 |
+
"typer>=0.9.0",
|
| 44 |
+
"rich>=13.0.0",
|
| 45 |
+
"pyyaml>=6.0",
|
| 46 |
+
"huggingface_hub>=0.20.0",
|
| 47 |
+
"openai>=2.7.2",
|
| 48 |
+
"tomli>=2.3.0",
|
| 49 |
+
"tomli-w>=1.2.0",
|
| 50 |
+
]
|
| 51 |
+
docs = [
|
| 52 |
+
"sphinx==7.2.6",
|
| 53 |
+
"pytorch-sphinx-theme2",
|
| 54 |
+
"sphinxcontrib.katex==0.9.10",
|
| 55 |
+
"docutils>=0.18.1,<0.21",
|
| 56 |
+
"sphinx-design==0.6.1",
|
| 57 |
+
"sphinxcontrib-mermaid==1.0.0",
|
| 58 |
+
"myst-parser",
|
| 59 |
+
"sphinxext-opengraph",
|
| 60 |
+
"sphinx-sitemap==2.7.1",
|
| 61 |
+
"sphinx-gallery>=0.14.0",
|
| 62 |
+
"matplotlib",
|
| 63 |
+
"nest-asyncio",
|
| 64 |
+
"smolagents",
|
| 65 |
+
]
|
| 66 |
+
all = [
|
| 67 |
+
"openenv-core[core] @ git+https://github.com/meta-pytorch/OpenEnv.git@main",
|
| 68 |
+
"openenv-core[cli]",
|
| 69 |
+
]
|
| 70 |
+
daytona = [
|
| 71 |
+
"daytona>=0.136.0",
|
| 72 |
+
"pyyaml>=6.0",
|
| 73 |
+
]
|
| 74 |
+
inspect = [
|
| 75 |
+
"inspect-ai>=0.3.0",
|
| 76 |
+
]
|
| 77 |
+
|
| 78 |
+
[project.scripts]
|
| 79 |
+
openenv = "openenv.cli.__main__:main"
|
| 80 |
+
|
| 81 |
+
[tool.setuptools]
|
| 82 |
+
package-dir = {"" = "src"}
|
| 83 |
+
include-package-data = true
|
| 84 |
+
|
| 85 |
+
[tool.setuptools.package-data]
|
| 86 |
+
"openenv.cli" = ["templates/**/*"]
|
| 87 |
+
|
| 88 |
+
[tool.setuptools.packages.find]
|
| 89 |
+
where = ["src"]
|
| 90 |
+
|
| 91 |
+
[tool.coverage.run]
|
| 92 |
+
omit = [
|
| 93 |
+
"openenv/cli/templates/**",
|
| 94 |
+
"**/templates/**",
|
| 95 |
+
"openenv/cli/__main__.py",
|
| 96 |
+
]
|
| 97 |
+
|
| 98 |
+
[tool.coverage.report]
|
| 99 |
+
exclude_lines = [
|
| 100 |
+
"pragma: no cover",
|
| 101 |
+
"def __repr__",
|
| 102 |
+
"raise AssertionError",
|
| 103 |
+
"raise NotImplementedError",
|
| 104 |
+
"if __name__ == .__main__.:",
|
| 105 |
+
"if TYPE_CHECKING:",
|
| 106 |
+
]
|
| 107 |
+
|
| 108 |
+
[tool.pytest.ini_options]
|
| 109 |
+
asyncio_mode = "auto"
|
| 110 |
+
asyncio_default_fixture_loop_scope = "function"
|
| 111 |
+
markers = [
|
| 112 |
+
"docker: Tests that require Docker to be running",
|
| 113 |
+
"network: Tests that require network access (HuggingFace, etc.)",
|
| 114 |
+
"integration: Integration tests with external resources",
|
| 115 |
+
]
|
| 116 |
+
|
| 117 |
+
[dependency-groups]
|
| 118 |
+
dev = [
|
| 119 |
+
"ruff>=0.14.0",
|
| 120 |
+
"usort>=1.1.0",
|
| 121 |
+
"pytest>=7.0",
|
| 122 |
+
"pytest-asyncio>=0.21",
|
| 123 |
+
]
|
| 124 |
+
|
| 125 |
+
[tool.usort]
|
| 126 |
+
# Disable first_party auto-detection so all non-stdlib imports land in
|
| 127 |
+
# the same "third_party" bucket (the default_category). This matches
|
| 128 |
+
# pyfmt's usort behavior inside arc f, which groups openenv.* and env
|
| 129 |
+
# package imports together without blank-line separators.
|
| 130 |
+
first_party_detection = false
|
| 131 |
+
|
| 132 |
+
[tool.ruff]
|
| 133 |
+
line-length = 88
|
| 134 |
+
|
| 135 |
+
[tool.ruff.lint]
|
| 136 |
+
select = ["E", "F", "W"]
|
| 137 |
+
ignore = [
|
| 138 |
+
"E402", # Module level import not at top of file (needed for pytest.importorskip patterns)
|
| 139 |
+
"E501", # Line too long (not enforced previously, would require large refactor)
|
| 140 |
+
]
|
| 141 |
+
|
| 142 |
+
[tool.ruff.lint.per-file-ignores]
|
| 143 |
+
# Context manager variables that are intentionally unused
|
| 144 |
+
"tests/envs/test_websockets.py" = ["F841"]
|
| 145 |
+
"tests/test_cli/test_push.py" = ["F841"]
|
| 146 |
+
# Compatibility shim module
|
| 147 |
+
"src/openenv_core/__init__.py" = ["F401"]
|
server/Dockerfile
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Dockerfile for SUMO-RL Environment
|
| 2 |
+
# This image provides traffic signal control via SUMO (Simulation of Urban MObility)
|
| 3 |
+
|
| 4 |
+
# Configurable base image - defaults to local build, can be overridden for CI/CD
|
| 5 |
+
# Base image provides: fastapi, uvicorn, requests, curl, PYTHONPATH=/app/src
|
| 6 |
+
#
|
| 7 |
+
# Local build: docker build -t envtorch-base:latest -f src/core/containers/images/Dockerfile .
|
| 8 |
+
# docker build -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 9 |
+
#
|
| 10 |
+
# CI/CD build: docker build --build-arg BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest \
|
| 11 |
+
# -f envs/sumo_rl_env/server/Dockerfile -t sumo-rl-env:latest .
|
| 12 |
+
ARG BASE_IMAGE=envtorch-base:latest
|
| 13 |
+
FROM ${BASE_IMAGE}
|
| 14 |
+
|
| 15 |
+
# Install SUMO system dependencies
|
| 16 |
+
# SUMO is available in Debian repositories
|
| 17 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 18 |
+
sumo \
|
| 19 |
+
sumo-tools \
|
| 20 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 21 |
+
|
| 22 |
+
# Set SUMO_HOME environment variable
|
| 23 |
+
ENV SUMO_HOME=/usr/share/sumo
|
| 24 |
+
|
| 25 |
+
# Install SUMO-RL and Python dependencies
|
| 26 |
+
# sumo-rl includes: gymnasium, pettingzoo, numpy, pandas, sumolib, traci
|
| 27 |
+
RUN pip install --no-cache-dir \
|
| 28 |
+
gymnasium>=0.28 \
|
| 29 |
+
pettingzoo>=1.24.3 \
|
| 30 |
+
numpy>=1.24.0 \
|
| 31 |
+
pandas>=2.0.0 \
|
| 32 |
+
sumolib>=1.14.0 \
|
| 33 |
+
traci>=1.14.0 \
|
| 34 |
+
sumo-rl>=1.4.5
|
| 35 |
+
|
| 36 |
+
# Copy OpenEnv core (base image already set WORKDIR=/app)
|
| 37 |
+
COPY src/core/ /app/src/core/
|
| 38 |
+
|
| 39 |
+
# Copy SUMO-RL environment code (includes nets/)
|
| 40 |
+
COPY envs/sumo_rl_env/ /app/envs/sumo_rl_env/
|
| 41 |
+
|
| 42 |
+
# Copy example network files to expected location
|
| 43 |
+
# Default: single-intersection (simple 4-way intersection)
|
| 44 |
+
COPY envs/sumo_rl_env/nets/single-intersection/ /app/nets/single-intersection/
|
| 45 |
+
|
| 46 |
+
# SUMO environment variables (can be overridden at runtime)
|
| 47 |
+
ENV SUMO_NET_FILE=/app/nets/single-intersection/single-intersection.net.xml
|
| 48 |
+
ENV SUMO_ROUTE_FILE=/app/nets/single-intersection/single-intersection.rou.xml
|
| 49 |
+
ENV SUMO_NUM_SECONDS=20000
|
| 50 |
+
ENV SUMO_DELTA_TIME=5
|
| 51 |
+
ENV SUMO_YELLOW_TIME=2
|
| 52 |
+
ENV SUMO_MIN_GREEN=5
|
| 53 |
+
ENV SUMO_MAX_GREEN=50
|
| 54 |
+
ENV SUMO_REWARD_FN=diff-waiting-time
|
| 55 |
+
ENV SUMO_SEED=42
|
| 56 |
+
|
| 57 |
+
# Expose port
|
| 58 |
+
EXPOSE 8000
|
| 59 |
+
|
| 60 |
+
# Health check
|
| 61 |
+
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
|
| 62 |
+
CMD curl -f http://localhost:8000/health || exit 1
|
| 63 |
+
|
| 64 |
+
# Run the FastAPI server
|
| 65 |
+
CMD ["uvicorn", "envs.sumo_rl_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
server/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""SUMO-RL environment server package."""
|
server/app.py
ADDED
|
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
FastAPI application for SUMO-RL environment server.
|
| 9 |
+
|
| 10 |
+
This module creates an HTTP server that exposes traffic signal control
|
| 11 |
+
via the OpenEnv API using SUMO (Simulation of Urban MObility).
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
|
| 16 |
+
from openenv.core.env_server import create_app
|
| 17 |
+
|
| 18 |
+
from ..models import SumoAction, SumoObservation
|
| 19 |
+
from .sumo_environment import SumoEnvironment
|
| 20 |
+
|
| 21 |
+
# Get configuration from environment variables
|
| 22 |
+
net_file = os.getenv("SUMO_NET_FILE", "/app/nets/single-intersection.net.xml")
|
| 23 |
+
route_file = os.getenv("SUMO_ROUTE_FILE", "/app/nets/single-intersection.rou.xml")
|
| 24 |
+
num_seconds = int(os.getenv("SUMO_NUM_SECONDS", "20000"))
|
| 25 |
+
delta_time = int(os.getenv("SUMO_DELTA_TIME", "5"))
|
| 26 |
+
yellow_time = int(os.getenv("SUMO_YELLOW_TIME", "2"))
|
| 27 |
+
min_green = int(os.getenv("SUMO_MIN_GREEN", "5"))
|
| 28 |
+
max_green = int(os.getenv("SUMO_MAX_GREEN", "50"))
|
| 29 |
+
reward_fn = os.getenv("SUMO_REWARD_FN", "diff-waiting-time")
|
| 30 |
+
sumo_seed = int(os.getenv("SUMO_SEED", "42"))
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
# Factory function to create SumoEnvironment instances
|
| 34 |
+
def create_sumo_environment():
|
| 35 |
+
"""Factory function that creates SumoEnvironment with config."""
|
| 36 |
+
return SumoEnvironment(
|
| 37 |
+
net_file=net_file,
|
| 38 |
+
route_file=route_file,
|
| 39 |
+
num_seconds=num_seconds,
|
| 40 |
+
delta_time=delta_time,
|
| 41 |
+
yellow_time=yellow_time,
|
| 42 |
+
min_green=min_green,
|
| 43 |
+
max_green=max_green,
|
| 44 |
+
reward_fn=reward_fn,
|
| 45 |
+
sumo_seed=sumo_seed,
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
# Create FastAPI app
|
| 50 |
+
# Pass the factory function instead of an instance for WebSocket session support
|
| 51 |
+
app = create_app(
|
| 52 |
+
create_sumo_environment, SumoAction, SumoObservation, env_name="sumo_rl_env"
|
| 53 |
+
)
|
server/sumo_environment.py
ADDED
|
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
SUMO-RL Environment Server Implementation.
|
| 9 |
+
|
| 10 |
+
This module wraps the SUMO-RL SumoEnvironment and exposes it
|
| 11 |
+
via the OpenEnv Environment interface for traffic signal control.
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
import os
|
| 15 |
+
import uuid
|
| 16 |
+
from typing import Any, Dict
|
| 17 |
+
|
| 18 |
+
# Set SUMO_HOME before importing sumo_rl
|
| 19 |
+
os.environ.setdefault("SUMO_HOME", "/usr/share/sumo")
|
| 20 |
+
|
| 21 |
+
from openenv.core.env_server import Action, Environment, Observation
|
| 22 |
+
|
| 23 |
+
from ..models import SumoAction, SumoObservation, SumoState
|
| 24 |
+
|
| 25 |
+
# Import SUMO-RL
|
| 26 |
+
try:
|
| 27 |
+
from sumo_rl import SumoEnvironment as BaseSumoEnv
|
| 28 |
+
except ImportError as e:
|
| 29 |
+
raise ImportError(
|
| 30 |
+
"sumo-rl is not installed. Please install it with: pip install sumo-rl"
|
| 31 |
+
) from e
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
class SumoEnvironment(Environment):
|
| 35 |
+
"""
|
| 36 |
+
SUMO-RL Environment wrapper for OpenEnv.
|
| 37 |
+
|
| 38 |
+
This environment wraps the SUMO traffic signal control environment
|
| 39 |
+
for single-agent reinforcement learning.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
net_file: Path to SUMO network file (.net.xml)
|
| 43 |
+
route_file: Path to SUMO route file (.rou.xml)
|
| 44 |
+
num_seconds: Simulation duration in seconds (default: 20000)
|
| 45 |
+
delta_time: Seconds between agent actions (default: 5)
|
| 46 |
+
yellow_time: Yellow phase duration in seconds (default: 2)
|
| 47 |
+
min_green: Minimum green time in seconds (default: 5)
|
| 48 |
+
max_green: Maximum green time in seconds (default: 50)
|
| 49 |
+
reward_fn: Reward function name (default: "diff-waiting-time")
|
| 50 |
+
sumo_seed: Random seed for reproducibility (default: 42)
|
| 51 |
+
|
| 52 |
+
Example:
|
| 53 |
+
>>> env = SumoEnvironment(
|
| 54 |
+
... net_file="/app/nets/single-intersection.net.xml",
|
| 55 |
+
... route_file="/app/nets/single-intersection.rou.xml"
|
| 56 |
+
... )
|
| 57 |
+
>>> obs = env.reset()
|
| 58 |
+
>>> print(obs.observation_shape)
|
| 59 |
+
>>> obs = env.step(SumoAction(phase_id=1))
|
| 60 |
+
>>> print(obs.reward, obs.done)
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(
|
| 64 |
+
self,
|
| 65 |
+
net_file: str,
|
| 66 |
+
route_file: str,
|
| 67 |
+
num_seconds: int = 20000,
|
| 68 |
+
delta_time: int = 5,
|
| 69 |
+
yellow_time: int = 2,
|
| 70 |
+
min_green: int = 5,
|
| 71 |
+
max_green: int = 50,
|
| 72 |
+
reward_fn: str = "diff-waiting-time",
|
| 73 |
+
sumo_seed: int = 42,
|
| 74 |
+
):
|
| 75 |
+
"""Initialize SUMO traffic signal environment."""
|
| 76 |
+
super().__init__()
|
| 77 |
+
|
| 78 |
+
# Store configuration
|
| 79 |
+
self.net_file = net_file
|
| 80 |
+
self.route_file = route_file
|
| 81 |
+
self.num_seconds = num_seconds
|
| 82 |
+
self.delta_time = delta_time
|
| 83 |
+
self.yellow_time = yellow_time
|
| 84 |
+
self.min_green = min_green
|
| 85 |
+
self.max_green = max_green
|
| 86 |
+
self.reward_fn = reward_fn
|
| 87 |
+
self.sumo_seed = sumo_seed
|
| 88 |
+
|
| 89 |
+
# Create SUMO environment (single-agent mode)
|
| 90 |
+
# Key settings:
|
| 91 |
+
# - use_gui=False: No GUI in Docker
|
| 92 |
+
# - single_agent=True: Returns single obs/reward (not dict)
|
| 93 |
+
# - sumo_warnings=False: Suppress SUMO warnings
|
| 94 |
+
# - out_csv_name=None: Don't write CSV files
|
| 95 |
+
self.env = BaseSumoEnv(
|
| 96 |
+
net_file=net_file,
|
| 97 |
+
route_file=route_file,
|
| 98 |
+
use_gui=False,
|
| 99 |
+
single_agent=True,
|
| 100 |
+
num_seconds=num_seconds,
|
| 101 |
+
delta_time=delta_time,
|
| 102 |
+
yellow_time=yellow_time,
|
| 103 |
+
min_green=min_green,
|
| 104 |
+
max_green=max_green,
|
| 105 |
+
reward_fn=reward_fn,
|
| 106 |
+
sumo_seed=sumo_seed,
|
| 107 |
+
sumo_warnings=False,
|
| 108 |
+
out_csv_name=None, # Disable CSV output
|
| 109 |
+
add_system_info=True,
|
| 110 |
+
add_per_agent_info=False,
|
| 111 |
+
)
|
| 112 |
+
|
| 113 |
+
# Initialize state
|
| 114 |
+
self._state = SumoState(
|
| 115 |
+
net_file=net_file,
|
| 116 |
+
route_file=route_file,
|
| 117 |
+
num_seconds=num_seconds,
|
| 118 |
+
delta_time=delta_time,
|
| 119 |
+
yellow_time=yellow_time,
|
| 120 |
+
min_green=min_green,
|
| 121 |
+
max_green=max_green,
|
| 122 |
+
reward_fn=reward_fn,
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
self._last_info = {}
|
| 126 |
+
|
| 127 |
+
def reset(self) -> Observation:
|
| 128 |
+
"""
|
| 129 |
+
Reset the environment and return initial observation.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
Initial SumoObservation for the agent.
|
| 133 |
+
"""
|
| 134 |
+
# Reset SUMO simulation
|
| 135 |
+
obs, info = self.env.reset()
|
| 136 |
+
|
| 137 |
+
# Update state tracking
|
| 138 |
+
self._state.episode_id = str(uuid.uuid4())
|
| 139 |
+
self._state.step_count = 0
|
| 140 |
+
self._state.sim_time = 0.0
|
| 141 |
+
|
| 142 |
+
# Store info for metadata
|
| 143 |
+
self._last_info = info
|
| 144 |
+
|
| 145 |
+
return self._make_observation(obs, reward=None, done=False, info=info)
|
| 146 |
+
|
| 147 |
+
def step(self, action: Action) -> Observation:
|
| 148 |
+
"""
|
| 149 |
+
Execute agent's action and return resulting observation.
|
| 150 |
+
|
| 151 |
+
Args:
|
| 152 |
+
action: SumoAction containing the phase_id to execute.
|
| 153 |
+
|
| 154 |
+
Returns:
|
| 155 |
+
SumoObservation after action execution.
|
| 156 |
+
|
| 157 |
+
Raises:
|
| 158 |
+
ValueError: If action is not a SumoAction.
|
| 159 |
+
"""
|
| 160 |
+
if not isinstance(action, SumoAction):
|
| 161 |
+
raise ValueError(f"Expected SumoAction, got {type(action)}")
|
| 162 |
+
|
| 163 |
+
# Validate phase_id
|
| 164 |
+
num_phases = self.env.action_space.n
|
| 165 |
+
if action.phase_id < 0 or action.phase_id >= num_phases:
|
| 166 |
+
raise ValueError(
|
| 167 |
+
f"Invalid phase_id: {action.phase_id}. "
|
| 168 |
+
f"Valid range: [0, {num_phases - 1}]"
|
| 169 |
+
)
|
| 170 |
+
|
| 171 |
+
# Execute action in SUMO
|
| 172 |
+
# Returns: (obs, reward, terminated, truncated, info)
|
| 173 |
+
obs, reward, terminated, truncated, info = self.env.step(action.phase_id)
|
| 174 |
+
done = terminated or truncated
|
| 175 |
+
|
| 176 |
+
# Update state
|
| 177 |
+
self._state.step_count += 1
|
| 178 |
+
self._state.sim_time = info.get("step", 0.0)
|
| 179 |
+
self._state.total_vehicles = info.get("system_total_running", 0)
|
| 180 |
+
self._state.total_waiting_time = info.get("system_total_waiting_time", 0.0)
|
| 181 |
+
self._state.mean_waiting_time = info.get("system_mean_waiting_time", 0.0)
|
| 182 |
+
self._state.mean_speed = info.get("system_mean_speed", 0.0)
|
| 183 |
+
|
| 184 |
+
# Store info for metadata
|
| 185 |
+
self._last_info = info
|
| 186 |
+
|
| 187 |
+
return self._make_observation(obs, reward=reward, done=done, info=info)
|
| 188 |
+
|
| 189 |
+
@property
|
| 190 |
+
def state(self) -> SumoState:
|
| 191 |
+
"""Get current environment state."""
|
| 192 |
+
return self._state
|
| 193 |
+
|
| 194 |
+
def _make_observation(
|
| 195 |
+
self, obs: Any, reward: float, done: bool, info: Dict
|
| 196 |
+
) -> SumoObservation:
|
| 197 |
+
"""
|
| 198 |
+
Create SumoObservation from SUMO environment output.
|
| 199 |
+
|
| 200 |
+
Args:
|
| 201 |
+
obs: Observation array from SUMO environment
|
| 202 |
+
reward: Reward value (None on reset)
|
| 203 |
+
done: Whether episode is complete
|
| 204 |
+
info: Info dictionary from SUMO environment
|
| 205 |
+
|
| 206 |
+
Returns:
|
| 207 |
+
SumoObservation for the agent.
|
| 208 |
+
"""
|
| 209 |
+
# Convert observation to list
|
| 210 |
+
if hasattr(obs, "tolist"):
|
| 211 |
+
obs_list = obs.tolist()
|
| 212 |
+
else:
|
| 213 |
+
obs_list = list(obs)
|
| 214 |
+
|
| 215 |
+
# Get action mask (all actions valid in SUMO-RL)
|
| 216 |
+
num_phases = self.env.action_space.n
|
| 217 |
+
action_mask = list(range(num_phases))
|
| 218 |
+
|
| 219 |
+
# Extract system metrics for metadata
|
| 220 |
+
system_info = {k: v for k, v in info.items() if k.startswith("system_")}
|
| 221 |
+
|
| 222 |
+
# Create observation
|
| 223 |
+
return SumoObservation(
|
| 224 |
+
observation=obs_list,
|
| 225 |
+
observation_shape=[len(obs_list)],
|
| 226 |
+
action_mask=action_mask,
|
| 227 |
+
sim_time=info.get("step", 0.0),
|
| 228 |
+
done=done,
|
| 229 |
+
reward=reward,
|
| 230 |
+
metadata={
|
| 231 |
+
"num_green_phases": num_phases,
|
| 232 |
+
"system_info": system_info,
|
| 233 |
+
},
|
| 234 |
+
)
|
src/__init__.py
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""EnvTorch: Standardized agentic execution environments."""
|
src/core/README.md
ADDED
|
@@ -0,0 +1,212 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# <img width="35" height="35" alt="image" src="https://github.com/user-attachments/assets/2700a971-e5d6-4036-b03f-2f89c9791609" /> OpenEnv: Agentic Execution Environments
|
| 2 |
+
|
| 3 |
+
An e2e framework for creating, deploying and using isolated execution environments for agentic RL training, built using Gymnasium style simple APIs. OpenEnv provides a standard for interacting with agentic execution environments via simple Gymnasium style APIs - step(), reset(), state(). Users of agentic execution environments can interact with the environment during RL training loops using these simple APIs.
|
| 4 |
+
|
| 5 |
+
In addition to making it easier for researchers and RL framework writers, we also provide tools for environment creators making it easier for them to create richer environments and make them available over familiar protocols like HTTP and packaged using canonical technologies like docker. Environment creators can use the OpenEnv framework to create environments that are isolated, secure, and easy to deploy and use.
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
## Overview
|
| 9 |
+
`openenv.core` provides the foundational building blocks for creating and interacting with containerized environments over HTTP. It enables you to build agent environments that can be deployed as Docker containers and accessed via a simple HTTP API.
|
| 10 |
+
|
| 11 |
+
> ⚠️ **Early Development Warning** OpenEnv is currently in an experimental
|
| 12 |
+
> stage. You should expect bugs, incomplete features, and APIs that may change
|
| 13 |
+
> in future versions. The project welcomes bugfixes, but to make sure things are
|
| 14 |
+
> well coordinated you should discuss any significant change before starting the
|
| 15 |
+
> work. It's recommended that you signal your intention to contribute in the
|
| 16 |
+
> issue tracker, either by filing a new issue or by claiming an existing one.
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# OpenEnv Core
|
| 20 |
+
|
| 21 |
+
Core components for OpenEnv - a framework for building HTTP-based agentic environments.
|
| 22 |
+
|
| 23 |
+
## Features
|
| 24 |
+
|
| 25 |
+
- **EnvClient**: Async-first client for interacting with remote environments
|
| 26 |
+
- **SyncEnvClient**: Synchronous wrapper via `.sync()` for sync codebases
|
| 27 |
+
- **HTTPEnvServer**: FastAPI-based server wrapper for exposing environments over HTTP/WebSocket
|
| 28 |
+
- **Container Providers**: Pluggable architecture for running containers (Docker, Kubernetes, etc.)
|
| 29 |
+
- **Type System**: Strongly-typed Action/Observation/State interfaces
|
| 30 |
+
- **Web Interface**: Optional web UI for interacting with environments
|
| 31 |
+
|
| 32 |
+
## Installation
|
| 33 |
+
|
| 34 |
+
```bash
|
| 35 |
+
pip install "openenv[core]"
|
| 36 |
+
```
|
| 37 |
+
|
| 38 |
+
For development:
|
| 39 |
+
```bash
|
| 40 |
+
pip install "openenv[core]"
|
| 41 |
+
```
|
| 42 |
+
|
| 43 |
+
## Quick Start
|
| 44 |
+
|
| 45 |
+
### Creating an Environment Client
|
| 46 |
+
|
| 47 |
+
EnvClient is **async by default**. Use `async with` and `await` for all operations:
|
| 48 |
+
|
| 49 |
+
```python
|
| 50 |
+
import asyncio
|
| 51 |
+
from openenv.core import EnvClient, StepResult
|
| 52 |
+
from dataclasses import dataclass
|
| 53 |
+
from typing import Any
|
| 54 |
+
|
| 55 |
+
@dataclass
|
| 56 |
+
class MyAction:
|
| 57 |
+
text: str
|
| 58 |
+
|
| 59 |
+
@dataclass
|
| 60 |
+
class MyObservation:
|
| 61 |
+
response: str
|
| 62 |
+
|
| 63 |
+
class MyEnvClient(EnvClient[MyAction, MyObservation, Any]):
|
| 64 |
+
def _step_payload(self, action: MyAction) -> dict:
|
| 65 |
+
return {"text": action.text}
|
| 66 |
+
|
| 67 |
+
def _parse_result(self, payload: dict) -> StepResult[MyObservation]:
|
| 68 |
+
obs_data = payload["observation"]
|
| 69 |
+
return StepResult(
|
| 70 |
+
observation=MyObservation(**obs_data),
|
| 71 |
+
reward=payload.get("reward"),
|
| 72 |
+
done=payload.get("done", False)
|
| 73 |
+
)
|
| 74 |
+
|
| 75 |
+
def _parse_state(self, payload: dict) -> Any:
|
| 76 |
+
return payload
|
| 77 |
+
|
| 78 |
+
# Async usage (recommended)
|
| 79 |
+
async def main():
|
| 80 |
+
client = await MyEnvClient.from_docker_image("my-env:latest")
|
| 81 |
+
async with client:
|
| 82 |
+
result = await client.reset()
|
| 83 |
+
step_result = await client.step(MyAction(text="hello"))
|
| 84 |
+
|
| 85 |
+
asyncio.run(main())
|
| 86 |
+
|
| 87 |
+
# Sync usage (via .sync() wrapper)
|
| 88 |
+
with MyEnvClient(base_url="http://localhost:8000").sync() as client:
|
| 89 |
+
result = client.reset()
|
| 90 |
+
step_result = client.step(MyAction(text="hello"))
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
### Creating an Environment Server
|
| 94 |
+
|
| 95 |
+
```python
|
| 96 |
+
from openenv.core.env_server import Environment, HTTPEnvServer, create_app
|
| 97 |
+
from dataclasses import dataclass
|
| 98 |
+
|
| 99 |
+
@dataclass
|
| 100 |
+
class MyAction:
|
| 101 |
+
text: str
|
| 102 |
+
|
| 103 |
+
@dataclass
|
| 104 |
+
class MyObservation:
|
| 105 |
+
response: str
|
| 106 |
+
reward: float = 0.0
|
| 107 |
+
done: bool = False
|
| 108 |
+
|
| 109 |
+
class MyEnvironment(Environment):
|
| 110 |
+
def reset(self) -> MyObservation:
|
| 111 |
+
return MyObservation(response="Ready")
|
| 112 |
+
|
| 113 |
+
def step(self, action: MyAction) -> MyObservation:
|
| 114 |
+
return MyObservation(
|
| 115 |
+
response=f"Echo: {action.text}",
|
| 116 |
+
reward=1.0,
|
| 117 |
+
done=False
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
# Create FastAPI app
|
| 121 |
+
env = MyEnvironment()
|
| 122 |
+
app = create_app(env, MyAction, MyObservation)
|
| 123 |
+
|
| 124 |
+
# Run with: uvicorn module:app --host 0.0.0.0 --port 8000
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
## Container Providers
|
| 128 |
+
|
| 129 |
+
OpenEnv Core supports multiple container providers:
|
| 130 |
+
|
| 131 |
+
### Local Docker Provider
|
| 132 |
+
|
| 133 |
+
```python
|
| 134 |
+
from openenv.core.containers.runtime import LocalDockerProvider
|
| 135 |
+
|
| 136 |
+
provider = LocalDockerProvider()
|
| 137 |
+
base_url = provider.start_container("my-env:latest")
|
| 138 |
+
provider.wait_for_ready(base_url)
|
| 139 |
+
# Use environment...
|
| 140 |
+
provider.stop_container()
|
| 141 |
+
```
|
| 142 |
+
|
| 143 |
+
### Kubernetes Provider (Coming Soon)
|
| 144 |
+
|
| 145 |
+
```python
|
| 146 |
+
from openenv.core.containers.runtime import KubernetesProvider
|
| 147 |
+
|
| 148 |
+
provider = KubernetesProvider(namespace="envs")
|
| 149 |
+
base_url = provider.start_container("my-env:latest")
|
| 150 |
+
# Use environment...
|
| 151 |
+
provider.stop_container()
|
| 152 |
+
```
|
| 153 |
+
|
| 154 |
+
|
| 155 |
+
## API Reference
|
| 156 |
+
|
| 157 |
+
### EnvClient
|
| 158 |
+
|
| 159 |
+
Async base class for environment clients. Key methods:
|
| 160 |
+
|
| 161 |
+
- `async connect()`: Establish WebSocket connection
|
| 162 |
+
- `async reset(**kwargs)`: Reset environment
|
| 163 |
+
- `async step(action)`: Execute action
|
| 164 |
+
- `async state()`: Get current state
|
| 165 |
+
- `async close()`: Close connection and cleanup
|
| 166 |
+
- `sync()`: Return a SyncEnvClient wrapper for synchronous usage
|
| 167 |
+
|
| 168 |
+
Abstract methods to implement:
|
| 169 |
+
- `_step_payload(action)`: Convert action to JSON
|
| 170 |
+
- `_parse_result(payload)`: Parse response to StepResult
|
| 171 |
+
- `_parse_state(payload)`: Parse state response
|
| 172 |
+
|
| 173 |
+
### SyncEnvClient
|
| 174 |
+
|
| 175 |
+
Synchronous wrapper around EnvClient. Use `client.sync()` to get one:
|
| 176 |
+
|
| 177 |
+
```python
|
| 178 |
+
sync_client = async_client.sync()
|
| 179 |
+
with sync_client:
|
| 180 |
+
result = sync_client.reset()
|
| 181 |
+
result = sync_client.step(action)
|
| 182 |
+
```
|
| 183 |
+
|
| 184 |
+
### HTTPEnvServer
|
| 185 |
+
|
| 186 |
+
Server wrapper with these methods:
|
| 187 |
+
|
| 188 |
+
- `register_routes(app)`: Register endpoints on FastAPI app
|
| 189 |
+
- `_deserialize_action(data)`: Convert JSON to Action
|
| 190 |
+
- `_serialize_observation(obs)`: Convert Observation to JSON
|
| 191 |
+
|
| 192 |
+
### Environment Interface
|
| 193 |
+
|
| 194 |
+
Base interface for environment implementations:
|
| 195 |
+
|
| 196 |
+
- `reset()`: Reset environment and return initial observation
|
| 197 |
+
- `step(action)`: Execute action and return observation
|
| 198 |
+
- `state`: Property returning current environment state
|
| 199 |
+
|
| 200 |
+
## License
|
| 201 |
+
|
| 202 |
+
This project is licensed under the BSD-3-Clause License - see the LICENSE file for details.
|
| 203 |
+
|
| 204 |
+
## Contributing
|
| 205 |
+
|
| 206 |
+
Contributions are welcome! Please see the main OpenEnv repository for contribution guidelines.
|
| 207 |
+
|
| 208 |
+
## Links
|
| 209 |
+
|
| 210 |
+
- **Homepage**: https://github.com/meta-pytorch/OpenEnv
|
| 211 |
+
- **Documentation**: https://github.com/meta-pytorch/OpenEnv/blob/main/README.md
|
| 212 |
+
- **Bug Tracker**: https://github.com/meta-pytorch/OpenEnv/issues
|
src/core/__init__.py
CHANGED
|
@@ -6,14 +6,76 @@
|
|
| 6 |
|
| 7 |
"""Core components for agentic environments."""
|
| 8 |
|
| 9 |
-
|
| 10 |
-
from .env_server import *
|
| 11 |
-
from .http_env_client import HTTPEnvClient
|
| 12 |
-
from .types import StepResult
|
| 13 |
|
| 14 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 15 |
|
| 16 |
__all__ = [
|
| 17 |
-
"
|
| 18 |
-
"
|
| 19 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
"""Core components for agentic environments."""
|
| 8 |
|
| 9 |
+
from __future__ import annotations
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
+
from importlib import import_module
|
| 12 |
+
from typing import TYPE_CHECKING
|
| 13 |
+
|
| 14 |
+
from . import env_server
|
| 15 |
+
from .env_server import * # noqa: F403
|
| 16 |
+
|
| 17 |
+
if TYPE_CHECKING:
|
| 18 |
+
from .env_client import EnvClient
|
| 19 |
+
from .generic_client import GenericAction, GenericEnvClient
|
| 20 |
+
from .llm_client import (
|
| 21 |
+
AnthropicClient,
|
| 22 |
+
create_llm_client,
|
| 23 |
+
LLMClient,
|
| 24 |
+
LLMResponse,
|
| 25 |
+
OpenAIClient,
|
| 26 |
+
ToolCall,
|
| 27 |
+
)
|
| 28 |
+
from .mcp_client import MCPClientBase, MCPToolClient
|
| 29 |
+
from .sync_client import SyncEnvClient
|
| 30 |
|
| 31 |
__all__ = [
|
| 32 |
+
"EnvClient",
|
| 33 |
+
"SyncEnvClient",
|
| 34 |
+
"GenericEnvClient",
|
| 35 |
+
"GenericAction",
|
| 36 |
+
"MCPClientBase",
|
| 37 |
+
"MCPToolClient",
|
| 38 |
+
"AnthropicClient",
|
| 39 |
+
"LLMClient",
|
| 40 |
+
"LLMResponse",
|
| 41 |
+
"OpenAIClient",
|
| 42 |
+
"ToolCall",
|
| 43 |
+
"create_llm_client",
|
| 44 |
+
] + env_server.__all__ # type: ignore
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
_LAZY_ATTRS = {
|
| 48 |
+
"EnvClient": (".env_client", "EnvClient"),
|
| 49 |
+
"SyncEnvClient": (".sync_client", "SyncEnvClient"),
|
| 50 |
+
"GenericEnvClient": (".generic_client", "GenericEnvClient"),
|
| 51 |
+
"GenericAction": (".generic_client", "GenericAction"),
|
| 52 |
+
"MCPClientBase": (".mcp_client", "MCPClientBase"),
|
| 53 |
+
"MCPToolClient": (".mcp_client", "MCPToolClient"),
|
| 54 |
+
"AnthropicClient": (".llm_client", "AnthropicClient"),
|
| 55 |
+
"LLMClient": (".llm_client", "LLMClient"),
|
| 56 |
+
"LLMResponse": (".llm_client", "LLMResponse"),
|
| 57 |
+
"OpenAIClient": (".llm_client", "OpenAIClient"),
|
| 58 |
+
"ToolCall": (".llm_client", "ToolCall"),
|
| 59 |
+
"create_llm_client": (".llm_client", "create_llm_client"),
|
| 60 |
+
}
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def __getattr__(name: str):
|
| 64 |
+
if name in _LAZY_ATTRS:
|
| 65 |
+
module_path, attr_name = _LAZY_ATTRS[name]
|
| 66 |
+
module = import_module(module_path, __name__)
|
| 67 |
+
value = getattr(module, attr_name)
|
| 68 |
+
globals()[name] = value
|
| 69 |
+
return value
|
| 70 |
+
|
| 71 |
+
try:
|
| 72 |
+
value = getattr(env_server, name)
|
| 73 |
+
except AttributeError as exc:
|
| 74 |
+
raise AttributeError(f"module {__name__!r} has no attribute {name!r}") from exc
|
| 75 |
+
|
| 76 |
+
globals()[name] = value
|
| 77 |
+
return value
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def __dir__() -> list[str]:
|
| 81 |
+
return sorted(set(globals().keys()) | set(__all__))
|
src/core/client_types.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Type definitions for EnvTorch
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import Generic, Optional, TypeVar
|
| 4 |
+
|
| 5 |
+
# Generic type for observations
|
| 6 |
+
ObsT = TypeVar("ObsT")
|
| 7 |
+
StateT = TypeVar("StateT")
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
@dataclass
|
| 11 |
+
class StepResult(Generic[ObsT]):
|
| 12 |
+
"""
|
| 13 |
+
Represents the result of one environment step.
|
| 14 |
+
|
| 15 |
+
Attributes:
|
| 16 |
+
observation: The environment's observation after the action.
|
| 17 |
+
reward: Scalar reward for this step (optional).
|
| 18 |
+
done: Whether the episode is finished.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
observation: ObsT
|
| 22 |
+
reward: Optional[float] = None
|
| 23 |
+
done: bool = False
|
src/core/containers/__init__.py
CHANGED
|
@@ -4,4 +4,4 @@
|
|
| 4 |
# This source code is licensed under the BSD-style license found in the
|
| 5 |
# LICENSE file in the root directory of this source tree.
|
| 6 |
|
| 7 |
-
"""Container management for environment servers."""
|
|
|
|
| 4 |
# This source code is licensed under the BSD-style license found in the
|
| 5 |
# LICENSE file in the root directory of this source tree.
|
| 6 |
|
| 7 |
+
"""Container management for environment servers."""
|
src/core/containers/images/Dockerfile
CHANGED
|
@@ -8,30 +8,47 @@
|
|
| 8 |
# OpenEnv Base Image
|
| 9 |
#
|
| 10 |
# This is the standard base image for all OpenEnv environment servers.
|
| 11 |
-
# It includes the minimal dependencies needed to run HTTP environment servers
|
|
|
|
| 12 |
#
|
| 13 |
-
# Build: docker build -t openenv-base:latest -f src/core/containers/images/Dockerfile .
|
| 14 |
-
# Tag: docker tag openenv-base:latest openenv-base:0.
|
| 15 |
#
|
| 16 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
FROM python:3.11-slim
|
| 18 |
|
| 19 |
# Set metadata
|
| 20 |
LABEL maintainer="OpenEnv Team"
|
| 21 |
-
LABEL description="Base image for OpenEnv based environment servers"
|
| 22 |
-
LABEL version="0.
|
| 23 |
|
| 24 |
# Install system dependencies
|
| 25 |
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 26 |
curl \
|
|
|
|
| 27 |
&& rm -rf /var/lib/apt/lists/*
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
|
|
|
|
|
|
| 35 |
|
| 36 |
# Set working directory
|
| 37 |
WORKDIR /app
|
|
@@ -39,6 +56,7 @@ WORKDIR /app
|
|
| 39 |
# Default environment variables
|
| 40 |
ENV PYTHONPATH=/app/src
|
| 41 |
ENV PYTHONUNBUFFERED=1
|
|
|
|
| 42 |
|
| 43 |
# Default expose port (can be overridden)
|
| 44 |
EXPOSE 8000
|
|
|
|
| 8 |
# OpenEnv Base Image
|
| 9 |
#
|
| 10 |
# This is the standard base image for all OpenEnv environment servers.
|
| 11 |
+
# It includes the minimal dependencies needed to run HTTP environment servers
|
| 12 |
+
# and uv for fast dependency management.
|
| 13 |
#
|
| 14 |
+
# Build from repo root: docker build -t openenv-base:latest -f src/openenv/core/containers/images/Dockerfile .
|
| 15 |
+
# Tag: docker tag openenv-base:latest openenv-base:0.2.0
|
| 16 |
#
|
| 17 |
|
| 18 |
+
FROM ghcr.io/astral-sh/uv:0.5.27-python3.11-bookworm-slim AS builder
|
| 19 |
+
|
| 20 |
+
# Set working directory
|
| 21 |
+
WORKDIR /app
|
| 22 |
+
|
| 23 |
+
# Copy core pyproject.toml and lockfile for dependency installation
|
| 24 |
+
COPY pyproject.toml uv.lock* ./
|
| 25 |
+
|
| 26 |
+
# Install core dependencies using uv with cache mount
|
| 27 |
+
RUN --mount=type=cache,target=/root/.cache/uv \
|
| 28 |
+
uv pip install --system -r pyproject.toml
|
| 29 |
+
|
| 30 |
+
# Final runtime stage
|
| 31 |
FROM python:3.11-slim
|
| 32 |
|
| 33 |
# Set metadata
|
| 34 |
LABEL maintainer="OpenEnv Team"
|
| 35 |
+
LABEL description="Base image for OpenEnv based environment servers with uv"
|
| 36 |
+
LABEL version="0.2.0"
|
| 37 |
|
| 38 |
# Install system dependencies
|
| 39 |
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 40 |
curl \
|
| 41 |
+
ca-certificates \
|
| 42 |
&& rm -rf /var/lib/apt/lists/*
|
| 43 |
|
| 44 |
+
# Copy uv from builder
|
| 45 |
+
COPY --from=builder /usr/local/bin/uv /usr/local/bin/uvx /usr/local/bin/
|
| 46 |
+
|
| 47 |
+
# Copy installed Python packages from builder
|
| 48 |
+
COPY --from=builder /usr/local/lib/python3.11/site-packages /usr/local/lib/python3.11/site-packages
|
| 49 |
+
|
| 50 |
+
# Copy console scripts installed by pip (uvicorn, fastapi, etc.)
|
| 51 |
+
COPY --from=builder /usr/local/bin/uvicorn /usr/local/bin/fastapi /usr/local/bin/
|
| 52 |
|
| 53 |
# Set working directory
|
| 54 |
WORKDIR /app
|
|
|
|
| 56 |
# Default environment variables
|
| 57 |
ENV PYTHONPATH=/app/src
|
| 58 |
ENV PYTHONUNBUFFERED=1
|
| 59 |
+
ENV UV_SYSTEM_PYTHON=1
|
| 60 |
|
| 61 |
# Default expose port (can be overridden)
|
| 62 |
EXPOSE 8000
|
src/core/containers/images/README.md
CHANGED
|
@@ -36,7 +36,7 @@ Total: 465 MB (base shared, minimal duplication)
|
|
| 36 |
|
| 37 |
```bash
|
| 38 |
# From project root
|
| 39 |
-
docker build -t openenv-base:latest -f src/core/containers/images/Dockerfile .
|
| 40 |
```
|
| 41 |
|
| 42 |
## Usage in Environment Dockerfiles
|
|
@@ -47,8 +47,8 @@ Each environment Dockerfile should start with:
|
|
| 47 |
FROM openenv-base:latest
|
| 48 |
|
| 49 |
# Copy only environment-specific files
|
| 50 |
-
COPY src/core/ /app/src/core/
|
| 51 |
-
COPY
|
| 52 |
|
| 53 |
# Run the server
|
| 54 |
CMD ["uvicorn", "envs.my_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
|
@@ -66,10 +66,10 @@ CMD ["uvicorn", "envs.my_env.server.app:app", "--host", "0.0.0.0", "--port", "80
|
|
| 66 |
|
| 67 |
```bash
|
| 68 |
# Step 1: Build base image (do this once)
|
| 69 |
-
docker build -t openenv-base:latest -f src/core/containers/images/Dockerfile .
|
| 70 |
|
| 71 |
# Step 2: Build echo environment (uses base)
|
| 72 |
-
docker build -t echo-env:latest -f
|
| 73 |
|
| 74 |
# Step 3: Run echo environment
|
| 75 |
docker run -p 8000:8000 echo-env:latest
|
|
@@ -79,14 +79,14 @@ docker run -p 8000:8000 echo-env:latest
|
|
| 79 |
|
| 80 |
When dependencies need updating:
|
| 81 |
|
| 82 |
-
1. Update `src/core/containers/images/Dockerfile`
|
| 83 |
2. Rebuild base image
|
| 84 |
3. Rebuild all environment images (they'll use new base)
|
| 85 |
|
| 86 |
```bash
|
| 87 |
# Update base
|
| 88 |
-
docker build -t openenv-base:latest -f src/core/containers/images/Dockerfile .
|
| 89 |
|
| 90 |
# Rebuild environments (they automatically use new base)
|
| 91 |
-
docker build -t echo-env:latest -f
|
| 92 |
```
|
|
|
|
| 36 |
|
| 37 |
```bash
|
| 38 |
# From project root
|
| 39 |
+
docker build -t openenv-base:latest -f src/openenv/core/containers/images/Dockerfile .
|
| 40 |
```
|
| 41 |
|
| 42 |
## Usage in Environment Dockerfiles
|
|
|
|
| 47 |
FROM openenv-base:latest
|
| 48 |
|
| 49 |
# Copy only environment-specific files
|
| 50 |
+
COPY src/openenv/core/ /app/src/openenv/core/
|
| 51 |
+
COPY envs/my_env/ /app/envs/my_env/
|
| 52 |
|
| 53 |
# Run the server
|
| 54 |
CMD ["uvicorn", "envs.my_env.server.app:app", "--host", "0.0.0.0", "--port", "8000"]
|
|
|
|
| 66 |
|
| 67 |
```bash
|
| 68 |
# Step 1: Build base image (do this once)
|
| 69 |
+
docker build -t openenv-base:latest -f src/openenv/core/containers/images/Dockerfile .
|
| 70 |
|
| 71 |
# Step 2: Build echo environment (uses base)
|
| 72 |
+
docker build -t echo-env:latest -f envs/echo_env/server/Dockerfile .
|
| 73 |
|
| 74 |
# Step 3: Run echo environment
|
| 75 |
docker run -p 8000:8000 echo-env:latest
|
|
|
|
| 79 |
|
| 80 |
When dependencies need updating:
|
| 81 |
|
| 82 |
+
1. Update `src/openenv/core/containers/images/Dockerfile`
|
| 83 |
2. Rebuild base image
|
| 84 |
3. Rebuild all environment images (they'll use new base)
|
| 85 |
|
| 86 |
```bash
|
| 87 |
# Update base
|
| 88 |
+
docker build -t openenv-base:latest -f src/openenv/core/containers/images/Dockerfile .
|
| 89 |
|
| 90 |
# Rebuild environments (they automatically use new base)
|
| 91 |
+
docker build -t echo-env:latest -f envs/echo_env/server/Dockerfile .
|
| 92 |
```
|
src/core/containers/runtime/__init__.py
CHANGED
|
@@ -6,10 +6,20 @@
|
|
| 6 |
|
| 7 |
"""Container runtime providers."""
|
| 8 |
|
| 9 |
-
from .providers import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 10 |
|
| 11 |
__all__ = [
|
| 12 |
"ContainerProvider",
|
|
|
|
| 13 |
"LocalDockerProvider",
|
| 14 |
"KubernetesProvider",
|
| 15 |
-
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
"""Container runtime providers."""
|
| 8 |
|
| 9 |
+
from .providers import (
|
| 10 |
+
ContainerProvider,
|
| 11 |
+
DockerSwarmProvider,
|
| 12 |
+
KubernetesProvider,
|
| 13 |
+
LocalDockerProvider,
|
| 14 |
+
RuntimeProvider,
|
| 15 |
+
)
|
| 16 |
+
from .uv_provider import UVProvider
|
| 17 |
|
| 18 |
__all__ = [
|
| 19 |
"ContainerProvider",
|
| 20 |
+
"DockerSwarmProvider",
|
| 21 |
"LocalDockerProvider",
|
| 22 |
"KubernetesProvider",
|
| 23 |
+
"RuntimeProvider",
|
| 24 |
+
"UVProvider",
|
| 25 |
+
]
|
src/core/containers/runtime/daytona_provider.py
ADDED
|
@@ -0,0 +1,572 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Daytona container provider for running OpenEnv environments in Daytona cloud sandboxes.
|
| 9 |
+
|
| 10 |
+
Requires the ``daytona`` SDK: ``pip install daytona>=0.10``
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
from __future__ import annotations
|
| 14 |
+
|
| 15 |
+
import json
|
| 16 |
+
import os
|
| 17 |
+
import shlex
|
| 18 |
+
import time
|
| 19 |
+
from typing import Any, Callable, Dict, Optional
|
| 20 |
+
|
| 21 |
+
import yaml
|
| 22 |
+
|
| 23 |
+
from .providers import ContainerProvider
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class DaytonaProvider(ContainerProvider):
|
| 27 |
+
"""
|
| 28 |
+
Container provider that runs environments in Daytona cloud sandboxes.
|
| 29 |
+
|
| 30 |
+
Example:
|
| 31 |
+
>>> provider = DaytonaProvider(api_key="your-key")
|
| 32 |
+
>>> image = DaytonaProvider.image_from_dockerfile("envs/echo_env/server/Dockerfile")
|
| 33 |
+
>>> base_url = provider.start_container(image)
|
| 34 |
+
>>> provider.wait_for_ready(base_url)
|
| 35 |
+
>>> provider.stop_container()
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
_dockerfile_registry: Dict[str, Dict[str, Any]] = {}
|
| 39 |
+
|
| 40 |
+
def __init__(
|
| 41 |
+
self,
|
| 42 |
+
*,
|
| 43 |
+
api_key: Optional[str] = None,
|
| 44 |
+
public: bool = False,
|
| 45 |
+
resources: Optional[Any] = None,
|
| 46 |
+
auto_stop_interval: int = 15,
|
| 47 |
+
target: Optional[str] = None,
|
| 48 |
+
on_snapshot_create_logs: Optional[Callable[[str], None]] = None,
|
| 49 |
+
cmd: Optional[str] = None,
|
| 50 |
+
create_timeout: float = 300,
|
| 51 |
+
):
|
| 52 |
+
"""
|
| 53 |
+
Args:
|
| 54 |
+
api_key: Daytona API key. Falls back to ``DAYTONA_API_KEY`` env var.
|
| 55 |
+
public: If True, the sandbox preview is publicly accessible.
|
| 56 |
+
resources: Optional ``daytona.Resources`` instance for CPU/memory.
|
| 57 |
+
auto_stop_interval: Minutes of inactivity before auto-stop (0 disables).
|
| 58 |
+
target: Daytona target region (e.g. "us").
|
| 59 |
+
on_snapshot_create_logs: Callback for snapshot build log lines.
|
| 60 |
+
cmd: Shell command to start the server inside the sandbox.
|
| 61 |
+
create_timeout: Seconds to wait for sandbox creation (default 300).
|
| 62 |
+
Heavy images (e.g. with Playwright/Chromium) may need more.
|
| 63 |
+
"""
|
| 64 |
+
from daytona import Daytona, DaytonaConfig
|
| 65 |
+
|
| 66 |
+
config_kwargs: Dict[str, Any] = {}
|
| 67 |
+
resolved_key = api_key or os.environ.get("DAYTONA_API_KEY")
|
| 68 |
+
if resolved_key:
|
| 69 |
+
config_kwargs["api_key"] = resolved_key
|
| 70 |
+
if target:
|
| 71 |
+
config_kwargs["target"] = target
|
| 72 |
+
|
| 73 |
+
self._daytona = Daytona(DaytonaConfig(**config_kwargs))
|
| 74 |
+
self._public = public
|
| 75 |
+
self._resources = resources
|
| 76 |
+
self._auto_stop_interval = auto_stop_interval
|
| 77 |
+
self._on_snapshot_create_logs = on_snapshot_create_logs
|
| 78 |
+
self._cmd = cmd
|
| 79 |
+
self._create_timeout = create_timeout
|
| 80 |
+
self._sandbox: Any = None
|
| 81 |
+
self._preview_url: Optional[str] = None
|
| 82 |
+
|
| 83 |
+
def _discover_server_cmd(self, sandbox: Any, port: int = 8000) -> str:
|
| 84 |
+
"""Discover the server command from ``openenv.yaml`` inside *sandbox*.
|
| 85 |
+
|
| 86 |
+
Finds the file, reads the ``app`` field, and constructs a command
|
| 87 |
+
of the form ``cd <env_root> && python -m uvicorn <app> --host 0.0.0.0 --port <port>``.
|
| 88 |
+
|
| 89 |
+
Raises:
|
| 90 |
+
ValueError: If ``openenv.yaml`` is not found or lacks an ``app`` field.
|
| 91 |
+
"""
|
| 92 |
+
yaml_path = self._find_openenv_yaml(sandbox)
|
| 93 |
+
if yaml_path is None:
|
| 94 |
+
raise ValueError(
|
| 95 |
+
"Could not find openenv.yaml inside the sandbox. "
|
| 96 |
+
"Pass an explicit cmd= to DaytonaProvider or start_container()."
|
| 97 |
+
)
|
| 98 |
+
|
| 99 |
+
cat_resp = sandbox.process.exec(f"cat {shlex.quote(yaml_path)}", timeout=10)
|
| 100 |
+
content = cat_resp.result if hasattr(cat_resp, "result") else str(cat_resp)
|
| 101 |
+
app = self._parse_app_field(content)
|
| 102 |
+
if app is None:
|
| 103 |
+
raise ValueError(
|
| 104 |
+
f"openenv.yaml at {yaml_path} does not contain an 'app' field. "
|
| 105 |
+
"Pass an explicit cmd= to DaytonaProvider or start_container()."
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# The directory containing openenv.yaml is the env root
|
| 109 |
+
env_root = yaml_path.rsplit("/", 1)[0]
|
| 110 |
+
return (
|
| 111 |
+
f"cd {shlex.quote(env_root)} && "
|
| 112 |
+
f"python -m uvicorn {shlex.quote(app)} --host 0.0.0.0 --port {port}"
|
| 113 |
+
)
|
| 114 |
+
|
| 115 |
+
def _find_openenv_yaml(self, sandbox: Any) -> Optional[str]:
|
| 116 |
+
"""Locate ``openenv.yaml`` inside the sandbox.
|
| 117 |
+
|
| 118 |
+
Tries the modern layout path ``/app/env/openenv.yaml`` first,
|
| 119 |
+
then falls back to a ``find`` command for the old layout.
|
| 120 |
+
"""
|
| 121 |
+
# Fast path: modern Dockerfile layout
|
| 122 |
+
resp = sandbox.process.exec(
|
| 123 |
+
"test -f /app/env/openenv.yaml && echo found", timeout=10
|
| 124 |
+
)
|
| 125 |
+
out = resp.result if hasattr(resp, "result") else str(resp)
|
| 126 |
+
if "found" in (out or ""):
|
| 127 |
+
return "/app/env/openenv.yaml"
|
| 128 |
+
|
| 129 |
+
# Fallback: search for it (redirect stderr so error messages
|
| 130 |
+
# like "No such file or directory" don't get mistaken for paths).
|
| 131 |
+
resp = sandbox.process.exec(
|
| 132 |
+
"find /app -maxdepth 4 -name openenv.yaml -print -quit 2>/dev/null",
|
| 133 |
+
timeout=10,
|
| 134 |
+
)
|
| 135 |
+
path = (resp.result if hasattr(resp, "result") else str(resp) or "").strip()
|
| 136 |
+
if path and path.startswith("/"):
|
| 137 |
+
return path
|
| 138 |
+
|
| 139 |
+
return None
|
| 140 |
+
|
| 141 |
+
@staticmethod
|
| 142 |
+
def _parse_app_field(yaml_content: str) -> Optional[str]:
|
| 143 |
+
"""Extract the ``app`` value from raw openenv.yaml content.
|
| 144 |
+
|
| 145 |
+
Uses PyYAML to handle comments, quotes, and nested keys correctly.
|
| 146 |
+
"""
|
| 147 |
+
try:
|
| 148 |
+
data = yaml.safe_load(yaml_content) or {}
|
| 149 |
+
except Exception:
|
| 150 |
+
return None
|
| 151 |
+
|
| 152 |
+
if not isinstance(data, dict):
|
| 153 |
+
return None
|
| 154 |
+
|
| 155 |
+
value = data.get("app")
|
| 156 |
+
if isinstance(value, str):
|
| 157 |
+
value = value.strip()
|
| 158 |
+
return value if value else None
|
| 159 |
+
return None
|
| 160 |
+
|
| 161 |
+
@staticmethod
|
| 162 |
+
def _parse_dockerfile_cmd(dockerfile_content: str) -> Optional[str]:
|
| 163 |
+
"""Extract the server command from the last ``CMD`` in a Dockerfile.
|
| 164 |
+
|
| 165 |
+
Handles exec form (``CMD ["prog", "arg"]``) and shell form
|
| 166 |
+
(``CMD prog arg``). When a Dockerfile has multiple ``CMD``
|
| 167 |
+
instructions (e.g. multi-stage builds), the last one wins - same
|
| 168 |
+
semantics as Docker itself. Lines where ``CMD`` appears inside a
|
| 169 |
+
comment are ignored.
|
| 170 |
+
|
| 171 |
+
Returns:
|
| 172 |
+
The command as a single string, or ``None`` if no ``CMD`` found.
|
| 173 |
+
"""
|
| 174 |
+
import re
|
| 175 |
+
|
| 176 |
+
last_cmd: Optional[str] = None
|
| 177 |
+
for line in dockerfile_content.splitlines():
|
| 178 |
+
stripped = line.strip()
|
| 179 |
+
# Skip comments
|
| 180 |
+
if stripped.startswith("#"):
|
| 181 |
+
continue
|
| 182 |
+
match = re.match(r"CMD\s+(.+)", stripped, flags=re.IGNORECASE)
|
| 183 |
+
if match:
|
| 184 |
+
last_cmd = match.group(1).strip()
|
| 185 |
+
|
| 186 |
+
if last_cmd is None:
|
| 187 |
+
return None
|
| 188 |
+
|
| 189 |
+
# Exec form: CMD ["executable", "param1", ...]
|
| 190 |
+
if last_cmd.startswith("["):
|
| 191 |
+
try:
|
| 192 |
+
parts = json.loads(last_cmd)
|
| 193 |
+
if isinstance(parts, list) and all(isinstance(p, str) for p in parts):
|
| 194 |
+
return " ".join(parts)
|
| 195 |
+
except (json.JSONDecodeError, TypeError):
|
| 196 |
+
pass
|
| 197 |
+
|
| 198 |
+
# Shell form: CMD executable param1 ...
|
| 199 |
+
return last_cmd if last_cmd else None
|
| 200 |
+
|
| 201 |
+
@staticmethod
|
| 202 |
+
def strip_buildkit_syntax(dockerfile_content: str) -> str:
|
| 203 |
+
"""Remove BuildKit ``--mount=...`` flags from ``RUN`` instructions.
|
| 204 |
+
|
| 205 |
+
Handles single-line flags, multi-line continuations, and multiple
|
| 206 |
+
``--mount`` flags spread across continuation lines. Only leading
|
| 207 |
+
``--mount`` flags are removed (before the actual command starts).
|
| 208 |
+
|
| 209 |
+
Daytona's ``Image.from_dockerfile`` does not support BuildKit
|
| 210 |
+
``--mount`` syntax. This helper strips the flags so that standard
|
| 211 |
+
Dockerfiles (like the ones generated by ``openenv build``) can
|
| 212 |
+
be used directly.
|
| 213 |
+
"""
|
| 214 |
+
import re
|
| 215 |
+
|
| 216 |
+
def strip_leading_mounts(text: str) -> str:
|
| 217 |
+
remaining = text
|
| 218 |
+
while True:
|
| 219 |
+
match = re.match(r"\s*--mount=\S+\s*", remaining)
|
| 220 |
+
if not match:
|
| 221 |
+
return remaining
|
| 222 |
+
remaining = remaining[match.end() :]
|
| 223 |
+
|
| 224 |
+
lines = dockerfile_content.split("\n")
|
| 225 |
+
result: list[str] = []
|
| 226 |
+
in_run = False
|
| 227 |
+
in_mount_prefix = False
|
| 228 |
+
|
| 229 |
+
for line in lines:
|
| 230 |
+
line_out = line
|
| 231 |
+
run_start = False
|
| 232 |
+
if re.match(r"\s*RUN(\s+|$)", line, flags=re.IGNORECASE):
|
| 233 |
+
in_run = True
|
| 234 |
+
in_mount_prefix = True
|
| 235 |
+
run_start = True
|
| 236 |
+
|
| 237 |
+
if in_run and in_mount_prefix:
|
| 238 |
+
original_ends_with_slash = line_out.rstrip().endswith("\\")
|
| 239 |
+
if run_start:
|
| 240 |
+
match = re.match(r"(\s*RUN\s+)(.*)$", line_out, flags=re.IGNORECASE)
|
| 241 |
+
if match:
|
| 242 |
+
run_prefix, remainder = match.group(1), match.group(2)
|
| 243 |
+
else:
|
| 244 |
+
run_prefix, remainder = line_out, ""
|
| 245 |
+
new_remainder = strip_leading_mounts(remainder)
|
| 246 |
+
line_out = run_prefix + new_remainder
|
| 247 |
+
content_for_check = new_remainder
|
| 248 |
+
else:
|
| 249 |
+
new_remainder = strip_leading_mounts(line_out)
|
| 250 |
+
line_out = new_remainder
|
| 251 |
+
content_for_check = new_remainder
|
| 252 |
+
|
| 253 |
+
if original_ends_with_slash and not line_out.rstrip().endswith("\\"):
|
| 254 |
+
line_out = line_out.rstrip() + " \\"
|
| 255 |
+
|
| 256 |
+
if content_for_check.strip() not in ("", "\\"):
|
| 257 |
+
in_mount_prefix = False
|
| 258 |
+
|
| 259 |
+
if in_run and not line_out.rstrip().endswith("\\"):
|
| 260 |
+
in_run = False
|
| 261 |
+
in_mount_prefix = False
|
| 262 |
+
|
| 263 |
+
result.append(line_out)
|
| 264 |
+
|
| 265 |
+
return "\n".join(result)
|
| 266 |
+
|
| 267 |
+
@classmethod
|
| 268 |
+
def image_from_dockerfile(
|
| 269 |
+
cls,
|
| 270 |
+
dockerfile_path: str,
|
| 271 |
+
context_dir: str | None = None,
|
| 272 |
+
) -> str:
|
| 273 |
+
"""Validate a Dockerfile and return a ``dockerfile:`` URI for
|
| 274 |
+
:meth:`start_container`.
|
| 275 |
+
|
| 276 |
+
Eagerly validates the Dockerfile (existence, COPY sources,
|
| 277 |
+
BuildKit stripping) and stores the processed content in an
|
| 278 |
+
internal registry. The actual ``daytona.Image`` is created
|
| 279 |
+
later inside ``start_container``.
|
| 280 |
+
|
| 281 |
+
Args:
|
| 282 |
+
dockerfile_path: Path to the Dockerfile on disk.
|
| 283 |
+
context_dir: Build context directory. Defaults to the
|
| 284 |
+
Dockerfile's grandparent directory, matching the
|
| 285 |
+
``openenv init`` convention where Dockerfiles live in
|
| 286 |
+
``<env>/server/Dockerfile`` and the build context is
|
| 287 |
+
``<env>/``. Pass explicitly for non-standard layouts
|
| 288 |
+
(e.g. ``context_dir="."`` for repo-root contexts).
|
| 289 |
+
|
| 290 |
+
Returns:
|
| 291 |
+
A ``"dockerfile:<abs_path>"`` string to pass to
|
| 292 |
+
``start_container``.
|
| 293 |
+
|
| 294 |
+
Raises:
|
| 295 |
+
FileNotFoundError: If *dockerfile_path* does not exist.
|
| 296 |
+
ValueError: If *context_dir* is given but does not exist,
|
| 297 |
+
or if COPY sources in the Dockerfile cannot be found
|
| 298 |
+
under the resolved context directory.
|
| 299 |
+
"""
|
| 300 |
+
import pathlib
|
| 301 |
+
import re
|
| 302 |
+
|
| 303 |
+
src = pathlib.Path(dockerfile_path).resolve()
|
| 304 |
+
if not src.is_file():
|
| 305 |
+
raise FileNotFoundError(f"Dockerfile not found: {dockerfile_path}")
|
| 306 |
+
|
| 307 |
+
if context_dir is not None:
|
| 308 |
+
ctx = pathlib.Path(context_dir)
|
| 309 |
+
if not ctx.is_dir():
|
| 310 |
+
raise ValueError(f"context_dir does not exist: {context_dir}")
|
| 311 |
+
else:
|
| 312 |
+
# Default: grandparent of the Dockerfile, matching the
|
| 313 |
+
# openenv init layout (<env>/server/Dockerfile -> <env>/).
|
| 314 |
+
ctx = src.parent.parent
|
| 315 |
+
|
| 316 |
+
content = src.read_text()
|
| 317 |
+
stripped = cls.strip_buildkit_syntax(content)
|
| 318 |
+
|
| 319 |
+
# Validate that COPY sources exist under the context directory.
|
| 320 |
+
# This catches mismatches early (e.g. a Dockerfile expecting repo
|
| 321 |
+
# root as context when we defaulted to the env directory).
|
| 322 |
+
for line in stripped.splitlines():
|
| 323 |
+
m = re.match(r"^\s*COPY\s+(?!--from=)(\S+)\s+", line, re.IGNORECASE)
|
| 324 |
+
if not m:
|
| 325 |
+
continue
|
| 326 |
+
copy_src = m.group(1)
|
| 327 |
+
if copy_src.startswith("/"):
|
| 328 |
+
continue
|
| 329 |
+
resolved = ctx / copy_src
|
| 330 |
+
if not resolved.exists() and not any(ctx.glob(copy_src)):
|
| 331 |
+
raise ValueError(
|
| 332 |
+
f"Dockerfile COPY source '{copy_src}' not found "
|
| 333 |
+
f"under context_dir '{ctx}'. This Dockerfile may "
|
| 334 |
+
f"expect a different build context (e.g. the repo "
|
| 335 |
+
f"root). Pass context_dir explicitly."
|
| 336 |
+
)
|
| 337 |
+
|
| 338 |
+
# Parse CMD from the original Dockerfile so start_container can
|
| 339 |
+
# use it as a fallback when openenv.yaml is unavailable.
|
| 340 |
+
parsed_cmd = cls._parse_dockerfile_cmd(content)
|
| 341 |
+
|
| 342 |
+
cls._dockerfile_registry[str(src)] = {
|
| 343 |
+
"stripped_content": stripped,
|
| 344 |
+
"context_dir": str(ctx),
|
| 345 |
+
"server_cmd": parsed_cmd,
|
| 346 |
+
}
|
| 347 |
+
|
| 348 |
+
return f"dockerfile:{src}"
|
| 349 |
+
|
| 350 |
+
def start_container(
|
| 351 |
+
self,
|
| 352 |
+
image: str,
|
| 353 |
+
port: Optional[int] = None,
|
| 354 |
+
env_vars: Optional[Dict[str, str]] = None,
|
| 355 |
+
**kwargs: Any,
|
| 356 |
+
) -> str:
|
| 357 |
+
"""
|
| 358 |
+
Create a Daytona sandbox from a Docker image or snapshot.
|
| 359 |
+
|
| 360 |
+
Daytona does not execute the image's CMD (known bug — ENTRYPOINT
|
| 361 |
+
runs, CMD does not). The server command is resolved in order:
|
| 362 |
+
|
| 363 |
+
1. Explicit ``cmd`` passed to the constructor.
|
| 364 |
+
2. ``cmd`` key in ``**kwargs`` (popped before forwarding).
|
| 365 |
+
3. Auto-discovered from ``openenv.yaml`` inside the sandbox.
|
| 366 |
+
4. ``CMD`` parsed from the Dockerfile (when *image* came from
|
| 367 |
+
``image_from_dockerfile``).
|
| 368 |
+
|
| 369 |
+
Args:
|
| 370 |
+
image: Docker image name (e.g. ``"echo-env:latest"``),
|
| 371 |
+
``"snapshot:<name>"`` to create from a pre-built snapshot,
|
| 372 |
+
or ``"dockerfile:<path>"`` returned by
|
| 373 |
+
:meth:`image_from_dockerfile`.
|
| 374 |
+
port: Must be ``None`` or ``8000``. Daytona exposes port 8000
|
| 375 |
+
via its preview proxy; other ports raise ``ValueError``.
|
| 376 |
+
env_vars: Environment variables forwarded to the sandbox.
|
| 377 |
+
**kwargs: ``cmd`` (str) to override the server command;
|
| 378 |
+
remaining kwargs passed through to ``Daytona.create()``.
|
| 379 |
+
|
| 380 |
+
Returns:
|
| 381 |
+
HTTPS preview URL for the sandbox (base_url).
|
| 382 |
+
"""
|
| 383 |
+
if port is not None and port != 8000:
|
| 384 |
+
raise ValueError(
|
| 385 |
+
f"DaytonaProvider only supports port 8000 (got {port}). "
|
| 386 |
+
"The Daytona preview proxy routes to port 8000 inside the sandbox."
|
| 387 |
+
)
|
| 388 |
+
|
| 389 |
+
# Resolve the server command (may be None; discovery happens after
|
| 390 |
+
# sandbox creation when we can inspect the filesystem).
|
| 391 |
+
cmd = kwargs.pop("cmd", None) or self._cmd
|
| 392 |
+
|
| 393 |
+
# CMD parsed from Dockerfile (populated for "dockerfile:" images).
|
| 394 |
+
parsed_cmd: Optional[str] = None
|
| 395 |
+
|
| 396 |
+
# Build creation params
|
| 397 |
+
create_kwargs: Dict[str, Any] = {}
|
| 398 |
+
if env_vars:
|
| 399 |
+
create_kwargs["env_vars"] = env_vars
|
| 400 |
+
if self._public:
|
| 401 |
+
create_kwargs["public"] = True
|
| 402 |
+
if self._auto_stop_interval != 15:
|
| 403 |
+
create_kwargs["auto_stop_interval"] = self._auto_stop_interval
|
| 404 |
+
|
| 405 |
+
if image.startswith("snapshot:"):
|
| 406 |
+
from daytona import CreateSandboxFromSnapshotParams
|
| 407 |
+
|
| 408 |
+
snapshot_name = image[len("snapshot:") :]
|
| 409 |
+
params = CreateSandboxFromSnapshotParams(
|
| 410 |
+
snapshot=snapshot_name, **create_kwargs
|
| 411 |
+
)
|
| 412 |
+
elif image.startswith("dockerfile:"):
|
| 413 |
+
from daytona import CreateSandboxFromImageParams, Image
|
| 414 |
+
|
| 415 |
+
dockerfile_path = image[len("dockerfile:") :]
|
| 416 |
+
meta = self._dockerfile_registry.get(dockerfile_path)
|
| 417 |
+
if meta is None:
|
| 418 |
+
raise ValueError(
|
| 419 |
+
f"No registered Dockerfile metadata for {dockerfile_path}. "
|
| 420 |
+
"Call DaytonaProvider.image_from_dockerfile() first."
|
| 421 |
+
)
|
| 422 |
+
|
| 423 |
+
parsed_cmd = meta.get("server_cmd")
|
| 424 |
+
|
| 425 |
+
# Build the daytona Image from the pre-stripped content.
|
| 426 |
+
import pathlib
|
| 427 |
+
import uuid
|
| 428 |
+
|
| 429 |
+
ctx = pathlib.Path(meta["context_dir"])
|
| 430 |
+
tmp_name = f".daytona-{uuid.uuid4().hex[:8]}.dockerfile"
|
| 431 |
+
tmp_path = ctx / tmp_name
|
| 432 |
+
try:
|
| 433 |
+
tmp_path.write_text(meta["stripped_content"])
|
| 434 |
+
daytona_image = Image.from_dockerfile(str(tmp_path))
|
| 435 |
+
finally:
|
| 436 |
+
tmp_path.unlink(missing_ok=True)
|
| 437 |
+
|
| 438 |
+
img_kwargs: Dict[str, Any] = {
|
| 439 |
+
"image": daytona_image,
|
| 440 |
+
**create_kwargs,
|
| 441 |
+
}
|
| 442 |
+
if self._resources is not None:
|
| 443 |
+
img_kwargs["resources"] = self._resources
|
| 444 |
+
params = CreateSandboxFromImageParams(**img_kwargs)
|
| 445 |
+
else:
|
| 446 |
+
from daytona import CreateSandboxFromImageParams
|
| 447 |
+
|
| 448 |
+
img_kwargs = {"image": image, **create_kwargs}
|
| 449 |
+
if self._resources is not None:
|
| 450 |
+
img_kwargs["resources"] = self._resources
|
| 451 |
+
params = CreateSandboxFromImageParams(**img_kwargs)
|
| 452 |
+
|
| 453 |
+
# Create sandbox
|
| 454 |
+
extra: Dict[str, Any] = dict(kwargs)
|
| 455 |
+
if self._on_snapshot_create_logs is not None:
|
| 456 |
+
extra["on_snapshot_create_logs"] = self._on_snapshot_create_logs
|
| 457 |
+
|
| 458 |
+
self._sandbox = self._daytona.create(
|
| 459 |
+
params, timeout=self._create_timeout, **extra
|
| 460 |
+
)
|
| 461 |
+
|
| 462 |
+
try:
|
| 463 |
+
# Discover server command from openenv.yaml if not explicitly set.
|
| 464 |
+
if cmd is None:
|
| 465 |
+
try:
|
| 466 |
+
cmd = self._discover_server_cmd(self._sandbox)
|
| 467 |
+
except ValueError:
|
| 468 |
+
# Fall back to CMD parsed from Dockerfile (if available).
|
| 469 |
+
if parsed_cmd:
|
| 470 |
+
cmd = parsed_cmd
|
| 471 |
+
else:
|
| 472 |
+
raise
|
| 473 |
+
|
| 474 |
+
# Wrap in bash -c so compound commands (cd ... && uvicorn ...)
|
| 475 |
+
# are handled correctly by nohup. Write PID so we can check
|
| 476 |
+
# if the process crashed later in wait_for_ready().
|
| 477 |
+
escaped_cmd = shlex.quote(cmd)
|
| 478 |
+
self._sandbox.process.exec(
|
| 479 |
+
f"nohup bash -c {escaped_cmd} > /tmp/openenv-server.log 2>&1 &"
|
| 480 |
+
" echo $! > /tmp/openenv-server.pid",
|
| 481 |
+
timeout=10,
|
| 482 |
+
)
|
| 483 |
+
|
| 484 |
+
# Get a signed preview URL for port 8000. The token is
|
| 485 |
+
# embedded in the URL itself so no extra headers are needed.
|
| 486 |
+
signed = self._sandbox.create_signed_preview_url(
|
| 487 |
+
8000, expires_in_seconds=86400
|
| 488 |
+
)
|
| 489 |
+
self._preview_url = signed.url
|
| 490 |
+
except Exception:
|
| 491 |
+
self.stop_container()
|
| 492 |
+
raise
|
| 493 |
+
|
| 494 |
+
return self._preview_url
|
| 495 |
+
|
| 496 |
+
def refresh_preview_url(self) -> str:
|
| 497 |
+
"""Get a fresh signed preview URL (valid for 24h).
|
| 498 |
+
|
| 499 |
+
Daytona signed URLs expire after at most 24 hours. Call this to
|
| 500 |
+
get a new one for long-running sessions. The returned URL points
|
| 501 |
+
to the same sandbox — clients will need to reconnect using it.
|
| 502 |
+
"""
|
| 503 |
+
if self._sandbox is None:
|
| 504 |
+
raise RuntimeError("No active sandbox to refresh URL for.")
|
| 505 |
+
signed = self._sandbox.create_signed_preview_url(8000, expires_in_seconds=86400)
|
| 506 |
+
self._preview_url = signed.url
|
| 507 |
+
return self._preview_url
|
| 508 |
+
|
| 509 |
+
def stop_container(self) -> None:
|
| 510 |
+
"""Delete the Daytona sandbox."""
|
| 511 |
+
if self._sandbox is None:
|
| 512 |
+
return
|
| 513 |
+
|
| 514 |
+
try:
|
| 515 |
+
self._daytona.delete(self._sandbox)
|
| 516 |
+
finally:
|
| 517 |
+
self._sandbox = None
|
| 518 |
+
self._preview_url = None
|
| 519 |
+
|
| 520 |
+
def wait_for_ready(self, base_url: str, timeout_s: float = 120.0) -> None:
|
| 521 |
+
"""
|
| 522 |
+
Poll the /health endpoint until the sandbox is ready.
|
| 523 |
+
|
| 524 |
+
Uses a longer default timeout (120s) than Docker providers because
|
| 525 |
+
Daytona sandboxes may have cold-start latency.
|
| 526 |
+
|
| 527 |
+
Args:
|
| 528 |
+
base_url: Preview URL returned by ``start_container()``.
|
| 529 |
+
timeout_s: Maximum seconds to wait.
|
| 530 |
+
|
| 531 |
+
Raises:
|
| 532 |
+
TimeoutError: If the sandbox doesn't become ready in time.
|
| 533 |
+
RuntimeError: If the server process died (detected via PID check).
|
| 534 |
+
"""
|
| 535 |
+
import requests
|
| 536 |
+
|
| 537 |
+
health_url = f"{base_url}/health"
|
| 538 |
+
|
| 539 |
+
deadline = time.time() + timeout_s
|
| 540 |
+
while time.time() < deadline:
|
| 541 |
+
try:
|
| 542 |
+
response = requests.get(health_url, timeout=5.0)
|
| 543 |
+
if response.status_code == 200:
|
| 544 |
+
return
|
| 545 |
+
except requests.RequestException:
|
| 546 |
+
pass
|
| 547 |
+
|
| 548 |
+
# Early exit: if the server process died, raise immediately
|
| 549 |
+
# instead of waiting for the full health-check timeout.
|
| 550 |
+
if self._sandbox is not None:
|
| 551 |
+
resp = self._sandbox.process.exec(
|
| 552 |
+
"kill -0 $(cat /tmp/openenv-server.pid) 2>/dev/null"
|
| 553 |
+
" && echo RUNNING || echo DEAD",
|
| 554 |
+
timeout=10,
|
| 555 |
+
)
|
| 556 |
+
out = resp.result if hasattr(resp, "result") else str(resp)
|
| 557 |
+
if "DEAD" in (out or ""):
|
| 558 |
+
log_resp = self._sandbox.process.exec(
|
| 559 |
+
"cat /tmp/openenv-server.log 2>/dev/null", timeout=10
|
| 560 |
+
)
|
| 561 |
+
log = (
|
| 562 |
+
log_resp.result
|
| 563 |
+
if hasattr(log_resp, "result")
|
| 564 |
+
else str(log_resp)
|
| 565 |
+
)
|
| 566 |
+
raise RuntimeError(f"Server process died.\nLog:\n{log}")
|
| 567 |
+
|
| 568 |
+
time.sleep(1.0)
|
| 569 |
+
|
| 570 |
+
raise TimeoutError(
|
| 571 |
+
f"Daytona sandbox at {base_url} did not become ready within {timeout_s}s"
|
| 572 |
+
)
|
src/core/containers/runtime/providers.py
CHANGED
|
@@ -8,13 +8,13 @@
|
|
| 8 |
Container provider abstractions for running environment servers.
|
| 9 |
|
| 10 |
This module provides a pluggable architecture for different container providers
|
| 11 |
-
(local Docker, Kubernetes, cloud providers, etc.) to be used with
|
| 12 |
"""
|
| 13 |
|
| 14 |
from __future__ import annotations
|
| 15 |
|
| 16 |
from abc import ABC, abstractmethod
|
| 17 |
-
from typing import Any, Dict, Optional
|
| 18 |
|
| 19 |
|
| 20 |
class ContainerProvider(ABC):
|
|
@@ -118,7 +118,11 @@ class LocalDockerProvider(ContainerProvider):
|
|
| 118 |
capture_output=True,
|
| 119 |
timeout=5,
|
| 120 |
)
|
| 121 |
-
except (
|
|
|
|
|
|
|
|
|
|
|
|
|
| 122 |
raise RuntimeError(
|
| 123 |
"Docker is not available. Please install Docker Desktop or Docker Engine."
|
| 124 |
)
|
|
@@ -154,10 +158,13 @@ class LocalDockerProvider(ContainerProvider):
|
|
| 154 |
|
| 155 |
# Build docker run command
|
| 156 |
cmd = [
|
| 157 |
-
"docker",
|
|
|
|
| 158 |
"-d", # Detached
|
| 159 |
-
"--name",
|
| 160 |
-
|
|
|
|
|
|
|
| 161 |
]
|
| 162 |
|
| 163 |
# Add environment variables
|
|
@@ -169,8 +176,12 @@ class LocalDockerProvider(ContainerProvider):
|
|
| 169 |
cmd.append(image)
|
| 170 |
|
| 171 |
# Run container
|
| 172 |
-
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
# Wait a moment for container to start
|
| 176 |
time.sleep(1)
|
|
@@ -222,14 +233,18 @@ class LocalDockerProvider(ContainerProvider):
|
|
| 222 |
TimeoutError: If container doesn't become ready
|
| 223 |
"""
|
| 224 |
import time
|
|
|
|
| 225 |
import requests
|
| 226 |
|
| 227 |
start_time = time.time()
|
| 228 |
health_url = f"{base_url}/health"
|
| 229 |
|
|
|
|
|
|
|
|
|
|
| 230 |
while time.time() - start_time < timeout_s:
|
| 231 |
try:
|
| 232 |
-
response = requests.get(health_url, timeout=2.0)
|
| 233 |
if response.status_code == 200:
|
| 234 |
return
|
| 235 |
except requests.RequestException:
|
|
@@ -273,6 +288,308 @@ class LocalDockerProvider(ContainerProvider):
|
|
| 273 |
return f"{clean_image}-{timestamp}"
|
| 274 |
|
| 275 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 276 |
class KubernetesProvider(ContainerProvider):
|
| 277 |
"""
|
| 278 |
Container provider for Kubernetes clusters.
|
|
@@ -286,4 +603,67 @@ class KubernetesProvider(ContainerProvider):
|
|
| 286 |
>>> # Pod running in k8s, accessible via service or port-forward
|
| 287 |
>>> provider.stop_container()
|
| 288 |
"""
|
|
|
|
| 289 |
pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
Container provider abstractions for running environment servers.
|
| 9 |
|
| 10 |
This module provides a pluggable architecture for different container providers
|
| 11 |
+
(local Docker, Kubernetes, cloud providers, etc.) to be used with EnvClient.
|
| 12 |
"""
|
| 13 |
|
| 14 |
from __future__ import annotations
|
| 15 |
|
| 16 |
from abc import ABC, abstractmethod
|
| 17 |
+
from typing import Any, Dict, Optional, Sequence
|
| 18 |
|
| 19 |
|
| 20 |
class ContainerProvider(ABC):
|
|
|
|
| 118 |
capture_output=True,
|
| 119 |
timeout=5,
|
| 120 |
)
|
| 121 |
+
except (
|
| 122 |
+
subprocess.CalledProcessError,
|
| 123 |
+
FileNotFoundError,
|
| 124 |
+
subprocess.TimeoutExpired,
|
| 125 |
+
):
|
| 126 |
raise RuntimeError(
|
| 127 |
"Docker is not available. Please install Docker Desktop or Docker Engine."
|
| 128 |
)
|
|
|
|
| 158 |
|
| 159 |
# Build docker run command
|
| 160 |
cmd = [
|
| 161 |
+
"docker",
|
| 162 |
+
"run",
|
| 163 |
"-d", # Detached
|
| 164 |
+
"--name",
|
| 165 |
+
self._container_name,
|
| 166 |
+
"-p",
|
| 167 |
+
f"{port}:8000", # Map port
|
| 168 |
]
|
| 169 |
|
| 170 |
# Add environment variables
|
|
|
|
| 176 |
cmd.append(image)
|
| 177 |
|
| 178 |
# Run container
|
| 179 |
+
try:
|
| 180 |
+
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
|
| 181 |
+
self._container_id = result.stdout.strip()
|
| 182 |
+
except subprocess.CalledProcessError as e:
|
| 183 |
+
error_msg = f"Failed to start Docker container.\nCommand: {' '.join(cmd)}\nExit code: {e.returncode}\nStderr: {e.stderr}\nStdout: {e.stdout}"
|
| 184 |
+
raise RuntimeError(error_msg) from e
|
| 185 |
|
| 186 |
# Wait a moment for container to start
|
| 187 |
time.sleep(1)
|
|
|
|
| 233 |
TimeoutError: If container doesn't become ready
|
| 234 |
"""
|
| 235 |
import time
|
| 236 |
+
|
| 237 |
import requests
|
| 238 |
|
| 239 |
start_time = time.time()
|
| 240 |
health_url = f"{base_url}/health"
|
| 241 |
|
| 242 |
+
# Bypass proxy for localhost to avoid proxy issues
|
| 243 |
+
proxies = {"http": None, "https": None}
|
| 244 |
+
|
| 245 |
while time.time() - start_time < timeout_s:
|
| 246 |
try:
|
| 247 |
+
response = requests.get(health_url, timeout=2.0, proxies=proxies)
|
| 248 |
if response.status_code == 200:
|
| 249 |
return
|
| 250 |
except requests.RequestException:
|
|
|
|
| 288 |
return f"{clean_image}-{timestamp}"
|
| 289 |
|
| 290 |
|
| 291 |
+
class DockerSwarmProvider(ContainerProvider):
|
| 292 |
+
"""
|
| 293 |
+
Container provider that uses Docker Swarm services for local concurrency.
|
| 294 |
+
|
| 295 |
+
This provider creates a replicated Swarm service backed by the local Docker
|
| 296 |
+
engine. The built-in load-balancer fans requests across the replicas,
|
| 297 |
+
allowing multiple container instances to run concurrently on the developer
|
| 298 |
+
workstation (mirroring the workflow described in the Docker stack docs).
|
| 299 |
+
"""
|
| 300 |
+
|
| 301 |
+
def __init__(
|
| 302 |
+
self,
|
| 303 |
+
*,
|
| 304 |
+
auto_init_swarm: bool = True,
|
| 305 |
+
overlay_network: Optional[str] = None,
|
| 306 |
+
):
|
| 307 |
+
"""
|
| 308 |
+
Args:
|
| 309 |
+
auto_init_swarm: Whether to call ``docker swarm init`` when Swarm
|
| 310 |
+
is not active. Otherwise, user must manually initialize Swarm.
|
| 311 |
+
overlay_network: Optional overlay network name for the service.
|
| 312 |
+
When provided, the network is created with
|
| 313 |
+
``docker network create --driver overlay --attachable`` if it
|
| 314 |
+
does not already exist.
|
| 315 |
+
"""
|
| 316 |
+
self._service_name: Optional[str] = None
|
| 317 |
+
self._service_id: Optional[str] = None
|
| 318 |
+
self._published_port: Optional[int] = None
|
| 319 |
+
self._overlay_network = overlay_network
|
| 320 |
+
self._auto_init_swarm = auto_init_swarm
|
| 321 |
+
|
| 322 |
+
self._ensure_docker_available()
|
| 323 |
+
self._ensure_swarm_initialized()
|
| 324 |
+
if self._overlay_network:
|
| 325 |
+
self._ensure_overlay_network(self._overlay_network)
|
| 326 |
+
|
| 327 |
+
def start_container(
|
| 328 |
+
self,
|
| 329 |
+
image: str,
|
| 330 |
+
port: Optional[int] = None,
|
| 331 |
+
env_vars: Optional[Dict[str, str]] = None,
|
| 332 |
+
**kwargs: Any,
|
| 333 |
+
) -> str:
|
| 334 |
+
"""
|
| 335 |
+
Start (or scale) a Swarm service for the given image.
|
| 336 |
+
|
| 337 |
+
Supported kwargs:
|
| 338 |
+
replicas (int): Number of container replicas (default: 2).
|
| 339 |
+
cpu_limit (float | str): CPU limit passed to ``--limit-cpu``.
|
| 340 |
+
memory_limit (str): Memory limit passed to ``--limit-memory``.
|
| 341 |
+
constraints (Sequence[str]): Placement constraints.
|
| 342 |
+
labels (Dict[str, str]): Service labels.
|
| 343 |
+
command (Sequence[str] | str): Override container command.
|
| 344 |
+
"""
|
| 345 |
+
import shlex
|
| 346 |
+
import subprocess
|
| 347 |
+
import time
|
| 348 |
+
|
| 349 |
+
allowed_kwargs = {
|
| 350 |
+
"replicas",
|
| 351 |
+
"cpu_limit",
|
| 352 |
+
"memory_limit",
|
| 353 |
+
"constraints",
|
| 354 |
+
"labels",
|
| 355 |
+
"command",
|
| 356 |
+
}
|
| 357 |
+
unknown = set(kwargs) - allowed_kwargs
|
| 358 |
+
if unknown:
|
| 359 |
+
raise ValueError(f"Unsupported kwargs for DockerSwarmProvider: {unknown}")
|
| 360 |
+
|
| 361 |
+
replicas = int(kwargs.get("replicas", 2))
|
| 362 |
+
cpu_limit = kwargs.get("cpu_limit")
|
| 363 |
+
memory_limit = kwargs.get("memory_limit")
|
| 364 |
+
constraints: Optional[Sequence[str]] = kwargs.get("constraints")
|
| 365 |
+
labels: Optional[Dict[str, str]] = kwargs.get("labels")
|
| 366 |
+
command_override = kwargs.get("command")
|
| 367 |
+
|
| 368 |
+
if port is None:
|
| 369 |
+
port = self._find_available_port()
|
| 370 |
+
|
| 371 |
+
self._service_name = self._generate_service_name(image)
|
| 372 |
+
self._published_port = port
|
| 373 |
+
|
| 374 |
+
cmd = [
|
| 375 |
+
"docker",
|
| 376 |
+
"service",
|
| 377 |
+
"create",
|
| 378 |
+
"--detach",
|
| 379 |
+
"--name",
|
| 380 |
+
self._service_name,
|
| 381 |
+
"--replicas",
|
| 382 |
+
str(max(1, replicas)),
|
| 383 |
+
"--publish",
|
| 384 |
+
f"{port}:8000",
|
| 385 |
+
]
|
| 386 |
+
|
| 387 |
+
if self._overlay_network:
|
| 388 |
+
cmd.extend(["--network", self._overlay_network])
|
| 389 |
+
|
| 390 |
+
if env_vars:
|
| 391 |
+
for key, value in env_vars.items():
|
| 392 |
+
cmd.extend(["--env", f"{key}={value}"])
|
| 393 |
+
|
| 394 |
+
if cpu_limit is not None:
|
| 395 |
+
cmd.extend(["--limit-cpu", str(cpu_limit)])
|
| 396 |
+
|
| 397 |
+
if memory_limit is not None:
|
| 398 |
+
cmd.extend(["--limit-memory", str(memory_limit)])
|
| 399 |
+
|
| 400 |
+
if constraints:
|
| 401 |
+
for constraint in constraints:
|
| 402 |
+
cmd.extend(["--constraint", constraint])
|
| 403 |
+
|
| 404 |
+
if labels:
|
| 405 |
+
for key, value in labels.items():
|
| 406 |
+
cmd.extend(["--label", f"{key}={value}"])
|
| 407 |
+
|
| 408 |
+
cmd.append(image)
|
| 409 |
+
|
| 410 |
+
if command_override:
|
| 411 |
+
if isinstance(command_override, str):
|
| 412 |
+
cmd.extend(shlex.split(command_override))
|
| 413 |
+
else:
|
| 414 |
+
cmd.extend(command_override)
|
| 415 |
+
|
| 416 |
+
try:
|
| 417 |
+
result = subprocess.run(
|
| 418 |
+
cmd,
|
| 419 |
+
capture_output=True,
|
| 420 |
+
text=True,
|
| 421 |
+
check=True,
|
| 422 |
+
)
|
| 423 |
+
self._service_id = result.stdout.strip()
|
| 424 |
+
except subprocess.CalledProcessError as e:
|
| 425 |
+
error_msg = (
|
| 426 |
+
"Failed to start Docker Swarm service.\n"
|
| 427 |
+
f"Command: {' '.join(cmd)}\n"
|
| 428 |
+
f"Exit code: {e.returncode}\n"
|
| 429 |
+
f"Stdout: {e.stdout}\n"
|
| 430 |
+
f"Stderr: {e.stderr}"
|
| 431 |
+
)
|
| 432 |
+
raise RuntimeError(error_msg) from e
|
| 433 |
+
|
| 434 |
+
# Give Swarm a brief moment to schedule the tasks.
|
| 435 |
+
time.sleep(1.0)
|
| 436 |
+
|
| 437 |
+
return f"http://localhost:{port}"
|
| 438 |
+
|
| 439 |
+
def stop_container(self) -> None:
|
| 440 |
+
"""
|
| 441 |
+
Remove the Swarm service (and keep the Swarm manager running).
|
| 442 |
+
"""
|
| 443 |
+
if not self._service_name:
|
| 444 |
+
return
|
| 445 |
+
|
| 446 |
+
import subprocess
|
| 447 |
+
|
| 448 |
+
try:
|
| 449 |
+
subprocess.run(
|
| 450 |
+
["docker", "service", "rm", self._service_name],
|
| 451 |
+
capture_output=True,
|
| 452 |
+
check=True,
|
| 453 |
+
timeout=10,
|
| 454 |
+
)
|
| 455 |
+
except subprocess.CalledProcessError:
|
| 456 |
+
# Service may already be gone; ignore.
|
| 457 |
+
pass
|
| 458 |
+
finally:
|
| 459 |
+
self._service_name = None
|
| 460 |
+
self._service_id = None
|
| 461 |
+
self._published_port = None
|
| 462 |
+
|
| 463 |
+
def wait_for_ready(self, base_url: str, timeout_s: float = 30.0) -> None:
|
| 464 |
+
"""
|
| 465 |
+
Wait for at least one replica to become healthy by polling /health.
|
| 466 |
+
|
| 467 |
+
Note: With Swarm's load balancer, requests round-robin across replicas,
|
| 468 |
+
so this only verifies that at least one replica is responding. Some
|
| 469 |
+
replicas may still be starting when this returns.
|
| 470 |
+
"""
|
| 471 |
+
import time
|
| 472 |
+
|
| 473 |
+
import requests
|
| 474 |
+
|
| 475 |
+
deadline = time.time() + timeout_s
|
| 476 |
+
health_url = f"{base_url}/health"
|
| 477 |
+
|
| 478 |
+
# Bypass proxy for localhost to avoid proxy issues
|
| 479 |
+
proxies = {"http": None, "https": None}
|
| 480 |
+
|
| 481 |
+
while time.time() < deadline:
|
| 482 |
+
try:
|
| 483 |
+
response = requests.get(health_url, timeout=2.0, proxies=proxies)
|
| 484 |
+
if response.status_code == 200:
|
| 485 |
+
return
|
| 486 |
+
except requests.RequestException:
|
| 487 |
+
pass
|
| 488 |
+
|
| 489 |
+
time.sleep(0.5)
|
| 490 |
+
|
| 491 |
+
raise TimeoutError(
|
| 492 |
+
f"Swarm service at {base_url} did not become ready within {timeout_s}s"
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
def _ensure_docker_available(self) -> None:
|
| 496 |
+
import subprocess
|
| 497 |
+
|
| 498 |
+
try:
|
| 499 |
+
subprocess.run(
|
| 500 |
+
["docker", "version"],
|
| 501 |
+
check=True,
|
| 502 |
+
capture_output=True,
|
| 503 |
+
timeout=5,
|
| 504 |
+
)
|
| 505 |
+
except (
|
| 506 |
+
subprocess.CalledProcessError,
|
| 507 |
+
FileNotFoundError,
|
| 508 |
+
subprocess.TimeoutExpired,
|
| 509 |
+
) as exc:
|
| 510 |
+
raise RuntimeError(
|
| 511 |
+
"Docker is not available. Please install Docker Desktop or Docker Engine."
|
| 512 |
+
) from exc
|
| 513 |
+
|
| 514 |
+
def _ensure_swarm_initialized(self) -> None:
|
| 515 |
+
import subprocess
|
| 516 |
+
|
| 517 |
+
try:
|
| 518 |
+
result = subprocess.run(
|
| 519 |
+
["docker", "info", "--format", "{{.Swarm.LocalNodeState}}"],
|
| 520 |
+
capture_output=True,
|
| 521 |
+
text=True,
|
| 522 |
+
check=True,
|
| 523 |
+
timeout=5,
|
| 524 |
+
)
|
| 525 |
+
state = result.stdout.strip().lower()
|
| 526 |
+
if state == "active":
|
| 527 |
+
return
|
| 528 |
+
except subprocess.CalledProcessError:
|
| 529 |
+
state = "unknown"
|
| 530 |
+
|
| 531 |
+
if not self._auto_init_swarm:
|
| 532 |
+
raise RuntimeError(
|
| 533 |
+
f"Docker Swarm is not active (state={state}). Enable Swarm manually or pass auto_init_swarm=True."
|
| 534 |
+
)
|
| 535 |
+
|
| 536 |
+
try:
|
| 537 |
+
subprocess.run(
|
| 538 |
+
["docker", "swarm", "init"],
|
| 539 |
+
check=True,
|
| 540 |
+
capture_output=True,
|
| 541 |
+
timeout=10,
|
| 542 |
+
)
|
| 543 |
+
except subprocess.CalledProcessError as e:
|
| 544 |
+
raise RuntimeError("Failed to initialize Docker Swarm") from e
|
| 545 |
+
|
| 546 |
+
def _ensure_overlay_network(self, network: str) -> None:
|
| 547 |
+
import subprocess
|
| 548 |
+
|
| 549 |
+
inspect = subprocess.run(
|
| 550 |
+
["docker", "network", "inspect", network],
|
| 551 |
+
capture_output=True,
|
| 552 |
+
text=True,
|
| 553 |
+
check=False,
|
| 554 |
+
)
|
| 555 |
+
if inspect.returncode == 0:
|
| 556 |
+
return
|
| 557 |
+
|
| 558 |
+
try:
|
| 559 |
+
subprocess.run(
|
| 560 |
+
[
|
| 561 |
+
"docker",
|
| 562 |
+
"network",
|
| 563 |
+
"create",
|
| 564 |
+
"--driver",
|
| 565 |
+
"overlay",
|
| 566 |
+
"--attachable",
|
| 567 |
+
network,
|
| 568 |
+
],
|
| 569 |
+
check=True,
|
| 570 |
+
capture_output=True,
|
| 571 |
+
timeout=10,
|
| 572 |
+
)
|
| 573 |
+
except subprocess.CalledProcessError as e:
|
| 574 |
+
raise RuntimeError(f"Failed to create overlay network '{network}'") from e
|
| 575 |
+
|
| 576 |
+
def _find_available_port(self) -> int:
|
| 577 |
+
import socket
|
| 578 |
+
|
| 579 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
|
| 580 |
+
s.bind(("", 0))
|
| 581 |
+
s.listen(1)
|
| 582 |
+
port = s.getsockname()[1]
|
| 583 |
+
return port
|
| 584 |
+
|
| 585 |
+
def _generate_service_name(self, image: str) -> str:
|
| 586 |
+
import time
|
| 587 |
+
|
| 588 |
+
clean_image = image.split("/")[-1].split(":")[0]
|
| 589 |
+
timestamp = int(time.time() * 1000)
|
| 590 |
+
return f"{clean_image}-swarm-{timestamp}"
|
| 591 |
+
|
| 592 |
+
|
| 593 |
class KubernetesProvider(ContainerProvider):
|
| 594 |
"""
|
| 595 |
Container provider for Kubernetes clusters.
|
|
|
|
| 603 |
>>> # Pod running in k8s, accessible via service or port-forward
|
| 604 |
>>> provider.stop_container()
|
| 605 |
"""
|
| 606 |
+
|
| 607 |
pass
|
| 608 |
+
|
| 609 |
+
|
| 610 |
+
class RuntimeProvider(ABC):
|
| 611 |
+
"""
|
| 612 |
+
Abstract base class for runtime providers that are not container providers.
|
| 613 |
+
Providers implement this interface to support different runtime platforms:
|
| 614 |
+
- UVProvider: Runs environments via `uv run`
|
| 615 |
+
|
| 616 |
+
The provider manages a single runtime lifecycle and provides the base URL
|
| 617 |
+
for connecting to it.
|
| 618 |
+
|
| 619 |
+
Example:
|
| 620 |
+
>>> provider = UVProvider(project_path="/path/to/env")
|
| 621 |
+
>>> base_url = provider.start()
|
| 622 |
+
>>> print(base_url) # http://localhost:8000
|
| 623 |
+
>>> provider.stop()
|
| 624 |
+
"""
|
| 625 |
+
|
| 626 |
+
@abstractmethod
|
| 627 |
+
def start(
|
| 628 |
+
self,
|
| 629 |
+
port: Optional[int] = None,
|
| 630 |
+
env_vars: Optional[Dict[str, str]] = None,
|
| 631 |
+
**kwargs: Any,
|
| 632 |
+
) -> str:
|
| 633 |
+
"""
|
| 634 |
+
Start a runtime from the specified image.
|
| 635 |
+
|
| 636 |
+
Args:
|
| 637 |
+
image: Runtime image name
|
| 638 |
+
port: Port to expose (if None, provider chooses)
|
| 639 |
+
env_vars: Environment variables for the runtime
|
| 640 |
+
**kwargs: Additional runtime options
|
| 641 |
+
"""
|
| 642 |
+
|
| 643 |
+
@abstractmethod
|
| 644 |
+
def stop(self) -> None:
|
| 645 |
+
"""
|
| 646 |
+
Stop the runtime.
|
| 647 |
+
"""
|
| 648 |
+
pass
|
| 649 |
+
|
| 650 |
+
@abstractmethod
|
| 651 |
+
def wait_for_ready(self, timeout_s: float = 30.0) -> None:
|
| 652 |
+
"""
|
| 653 |
+
Wait for the runtime to be ready to accept requests.
|
| 654 |
+
"""
|
| 655 |
+
pass
|
| 656 |
+
|
| 657 |
+
def __enter__(self) -> "RuntimeProvider":
|
| 658 |
+
"""
|
| 659 |
+
Enter the runtime provider.
|
| 660 |
+
"""
|
| 661 |
+
self.start()
|
| 662 |
+
return self
|
| 663 |
+
|
| 664 |
+
def __exit__(self, exc_type, exc, tb) -> None:
|
| 665 |
+
"""
|
| 666 |
+
Exit the runtime provider.
|
| 667 |
+
"""
|
| 668 |
+
self.stop()
|
| 669 |
+
return False
|
src/core/containers/runtime/uv_provider.py
ADDED
|
@@ -0,0 +1,224 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Providers for launching ASGI applications via ``uv run``."""
|
| 2 |
+
|
| 3 |
+
from __future__ import annotations
|
| 4 |
+
|
| 5 |
+
import os
|
| 6 |
+
import socket
|
| 7 |
+
import subprocess
|
| 8 |
+
import time
|
| 9 |
+
from typing import Dict, Optional
|
| 10 |
+
|
| 11 |
+
import requests
|
| 12 |
+
|
| 13 |
+
from .providers import RuntimeProvider
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def _check_uv_installed() -> None:
|
| 17 |
+
try:
|
| 18 |
+
subprocess.check_output(["uv", "--version"])
|
| 19 |
+
except FileNotFoundError as exc:
|
| 20 |
+
raise RuntimeError(
|
| 21 |
+
"`uv` executable not found. Install uv from https://docs.astral.sh and ensure it is on PATH."
|
| 22 |
+
) from exc
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _find_free_port() -> int:
|
| 26 |
+
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
|
| 27 |
+
sock.bind(("", 0))
|
| 28 |
+
sock.listen(1)
|
| 29 |
+
return sock.getsockname()[1]
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def _create_uv_command(
|
| 33 |
+
*,
|
| 34 |
+
host: str,
|
| 35 |
+
port: int,
|
| 36 |
+
reload: bool,
|
| 37 |
+
workers: int,
|
| 38 |
+
app: str,
|
| 39 |
+
project_path: str,
|
| 40 |
+
) -> list[str]:
|
| 41 |
+
command: list[str] = ["uv", "run", "--isolated", "--project", project_path]
|
| 42 |
+
|
| 43 |
+
command.append("--")
|
| 44 |
+
command.extend(
|
| 45 |
+
[
|
| 46 |
+
"uvicorn",
|
| 47 |
+
app,
|
| 48 |
+
"--host",
|
| 49 |
+
host,
|
| 50 |
+
"--port",
|
| 51 |
+
str(port),
|
| 52 |
+
"--workers",
|
| 53 |
+
str(workers),
|
| 54 |
+
]
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
if reload:
|
| 58 |
+
command.append("--reload")
|
| 59 |
+
|
| 60 |
+
return command
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
def _poll_health(health_url: str, timeout_s: float) -> None:
|
| 64 |
+
"""Poll a health endpoint until it returns HTTP 200 or times out."""
|
| 65 |
+
|
| 66 |
+
deadline = time.time() + timeout_s
|
| 67 |
+
while time.time() < deadline:
|
| 68 |
+
try:
|
| 69 |
+
timeout = max(0.0001, min(deadline - time.time(), 2.0))
|
| 70 |
+
response = requests.get(health_url, timeout=timeout)
|
| 71 |
+
if response.status_code == 200:
|
| 72 |
+
return
|
| 73 |
+
except requests.RequestException:
|
| 74 |
+
continue
|
| 75 |
+
|
| 76 |
+
time.sleep(0.5)
|
| 77 |
+
|
| 78 |
+
raise TimeoutError(f"Server did not become ready within {timeout_s:.1f} seconds")
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
class UVProvider(RuntimeProvider):
|
| 82 |
+
"""
|
| 83 |
+
RuntimeProvider implementation backed by ``uv run``.
|
| 84 |
+
|
| 85 |
+
Args:
|
| 86 |
+
project_path: Local path to a uv project (passed to ``uv run --project``)
|
| 87 |
+
app: ASGI application path for uvicorn (defaults to ``server.app:app``)
|
| 88 |
+
host: Host interface to bind to (defaults to ``0.0.0.0``)
|
| 89 |
+
reload: Whether to enable uvicorn's reload mode
|
| 90 |
+
env_vars: Environment variables to pass through to the spawned process
|
| 91 |
+
context_timeout_s: How long to wait for the environment to become ready
|
| 92 |
+
|
| 93 |
+
Example:
|
| 94 |
+
>>> provider = UVProvider(project_path="/path/to/env")
|
| 95 |
+
>>> base_url = provider.start()
|
| 96 |
+
>>> print(base_url) # http://localhost:8000
|
| 97 |
+
>>> # Use the environment via base_url
|
| 98 |
+
>>> provider.stop()
|
| 99 |
+
"""
|
| 100 |
+
|
| 101 |
+
def __init__(
|
| 102 |
+
self,
|
| 103 |
+
*,
|
| 104 |
+
project_path: str,
|
| 105 |
+
app: str = "server.app:app",
|
| 106 |
+
host: str = "0.0.0.0",
|
| 107 |
+
reload: bool = False,
|
| 108 |
+
env_vars: Optional[Dict[str, str]] = None,
|
| 109 |
+
context_timeout_s: float = 60.0,
|
| 110 |
+
):
|
| 111 |
+
"""Initialize the UVProvider."""
|
| 112 |
+
self.project_path = os.path.abspath(project_path)
|
| 113 |
+
self.app = app
|
| 114 |
+
self.host = host
|
| 115 |
+
self.reload = reload
|
| 116 |
+
self.env_vars = env_vars
|
| 117 |
+
self.context_timeout_s = context_timeout_s
|
| 118 |
+
_check_uv_installed()
|
| 119 |
+
self._process = None
|
| 120 |
+
self._base_url = None
|
| 121 |
+
|
| 122 |
+
def start(
|
| 123 |
+
self,
|
| 124 |
+
port: Optional[int] = None,
|
| 125 |
+
env_vars: Optional[Dict[str, str]] = None,
|
| 126 |
+
workers: int = 1,
|
| 127 |
+
**_: Dict[str, str],
|
| 128 |
+
) -> str:
|
| 129 |
+
"""
|
| 130 |
+
Start the environment via `uv run`.
|
| 131 |
+
|
| 132 |
+
Args:
|
| 133 |
+
port: The port to bind the environment to
|
| 134 |
+
env_vars: Environment variables to pass to the environment
|
| 135 |
+
workers: The number of workers to use
|
| 136 |
+
|
| 137 |
+
Returns:
|
| 138 |
+
The base URL of the environment
|
| 139 |
+
|
| 140 |
+
Raises:
|
| 141 |
+
RuntimeError: If the environment is already running
|
| 142 |
+
"""
|
| 143 |
+
if self._process is not None and self._process.poll() is None:
|
| 144 |
+
raise RuntimeError("UVProvider is already running")
|
| 145 |
+
|
| 146 |
+
bind_port = port or _find_free_port()
|
| 147 |
+
|
| 148 |
+
command = _create_uv_command(
|
| 149 |
+
host=self.host,
|
| 150 |
+
port=bind_port,
|
| 151 |
+
reload=self.reload,
|
| 152 |
+
workers=workers,
|
| 153 |
+
app=self.app,
|
| 154 |
+
project_path=self.project_path,
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
env = os.environ.copy()
|
| 158 |
+
|
| 159 |
+
if self.env_vars:
|
| 160 |
+
env.update(self.env_vars)
|
| 161 |
+
if env_vars:
|
| 162 |
+
env.update(env_vars)
|
| 163 |
+
|
| 164 |
+
try:
|
| 165 |
+
self._process = subprocess.Popen(command, env=env)
|
| 166 |
+
except OSError as exc:
|
| 167 |
+
raise RuntimeError(f"Failed to launch `uv run`: {exc}") from exc
|
| 168 |
+
|
| 169 |
+
client_host = "127.0.0.1" if self.host in {"0.0.0.0", "::"} else self.host
|
| 170 |
+
self._base_url = f"http://{client_host}:{bind_port}"
|
| 171 |
+
return self._base_url
|
| 172 |
+
|
| 173 |
+
def wait_for_ready(self, timeout_s: float = 60.0) -> None:
|
| 174 |
+
"""
|
| 175 |
+
Wait for the environment to become ready.
|
| 176 |
+
|
| 177 |
+
Args:
|
| 178 |
+
timeout_s: The timeout to wait for the environment to become ready
|
| 179 |
+
|
| 180 |
+
Raises:
|
| 181 |
+
RuntimeError: If the environment is not running
|
| 182 |
+
TimeoutError: If the environment does not become ready within the timeout
|
| 183 |
+
"""
|
| 184 |
+
if self._process and self._process.poll() is not None:
|
| 185 |
+
code = self._process.returncode
|
| 186 |
+
raise RuntimeError(f"uv process exited prematurely with code {code}")
|
| 187 |
+
|
| 188 |
+
_poll_health(f"{self._base_url}/health", timeout_s=timeout_s)
|
| 189 |
+
|
| 190 |
+
def stop(self) -> None:
|
| 191 |
+
"""
|
| 192 |
+
Stop the environment.
|
| 193 |
+
|
| 194 |
+
Raises:
|
| 195 |
+
RuntimeError: If the environment is not running
|
| 196 |
+
"""
|
| 197 |
+
if self._process is None:
|
| 198 |
+
return
|
| 199 |
+
|
| 200 |
+
if self._process.poll() is None:
|
| 201 |
+
self._process.terminate()
|
| 202 |
+
try:
|
| 203 |
+
self._process.wait(timeout=10.0)
|
| 204 |
+
except subprocess.TimeoutExpired:
|
| 205 |
+
self._process.kill()
|
| 206 |
+
self._process.wait(timeout=5.0)
|
| 207 |
+
|
| 208 |
+
self._process = None
|
| 209 |
+
self._base_url = None
|
| 210 |
+
|
| 211 |
+
@property
|
| 212 |
+
def base_url(self) -> str:
|
| 213 |
+
"""
|
| 214 |
+
The base URL of the environment.
|
| 215 |
+
|
| 216 |
+
Returns:
|
| 217 |
+
The base URL of the environment
|
| 218 |
+
|
| 219 |
+
Raises:
|
| 220 |
+
RuntimeError: If the environment is not running
|
| 221 |
+
"""
|
| 222 |
+
if self._base_url is None:
|
| 223 |
+
raise RuntimeError("UVProvider has not been started")
|
| 224 |
+
return self._base_url
|
src/core/containers/test_local_docker_provider.py
CHANGED
|
@@ -16,8 +16,8 @@ from pathlib import Path
|
|
| 16 |
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 17 |
|
| 18 |
import requests
|
|
|
|
| 19 |
|
| 20 |
-
from core.containers.runtime import LocalDockerProvider
|
| 21 |
|
| 22 |
# TODO: Remove this test or make it a functional test sicne this will be tested in e2e test for echo env
|
| 23 |
def test_local_docker_provider():
|
|
@@ -87,7 +87,9 @@ def test_local_docker_provider():
|
|
| 87 |
print(f" Length: {data['observation']['message_length']}")
|
| 88 |
print(f" Reward: {data['reward']}")
|
| 89 |
assert response.status_code == 200
|
| 90 |
-
assert
|
|
|
|
|
|
|
| 91 |
assert data["observation"]["message_length"] == 31
|
| 92 |
print("✓ Step test passed\n")
|
| 93 |
|
|
@@ -107,11 +109,11 @@ def test_local_docker_provider():
|
|
| 107 |
for i in range(3):
|
| 108 |
response = requests.post(
|
| 109 |
f"{base_url}/step",
|
| 110 |
-
json={"action": {"message": f"Message {i+1}"}},
|
| 111 |
headers={"Content-Type": "application/json"},
|
| 112 |
)
|
| 113 |
assert response.status_code == 200
|
| 114 |
-
print(f" Step {i+1}: ✓")
|
| 115 |
|
| 116 |
# Check state updated
|
| 117 |
response = requests.get(f"{base_url}/state")
|
|
@@ -130,6 +132,7 @@ def test_local_docker_provider():
|
|
| 130 |
except Exception as e:
|
| 131 |
print(f"\n❌ Test failed: {e}")
|
| 132 |
import traceback
|
|
|
|
| 133 |
traceback.print_exc()
|
| 134 |
return False
|
| 135 |
|
|
@@ -197,8 +200,7 @@ def test_provider_with_env_vars():
|
|
| 197 |
|
| 198 |
print("Starting container with environment variables...")
|
| 199 |
base_url = provider.start_container(
|
| 200 |
-
"echo-env:latest",
|
| 201 |
-
env_vars={"DEBUG": "true", "LOG_LEVEL": "info"}
|
| 202 |
)
|
| 203 |
print(f"✓ Started at: {base_url}")
|
| 204 |
|
|
|
|
| 16 |
sys.path.insert(0, str(Path(__file__).parent.parent.parent))
|
| 17 |
|
| 18 |
import requests
|
| 19 |
+
from openenv.core.containers.runtime import LocalDockerProvider
|
| 20 |
|
|
|
|
| 21 |
|
| 22 |
# TODO: Remove this test or make it a functional test sicne this will be tested in e2e test for echo env
|
| 23 |
def test_local_docker_provider():
|
|
|
|
| 87 |
print(f" Length: {data['observation']['message_length']}")
|
| 88 |
print(f" Reward: {data['reward']}")
|
| 89 |
assert response.status_code == 200
|
| 90 |
+
assert (
|
| 91 |
+
data["observation"]["echoed_message"] == "Hello from LocalDockerProvider!"
|
| 92 |
+
)
|
| 93 |
assert data["observation"]["message_length"] == 31
|
| 94 |
print("✓ Step test passed\n")
|
| 95 |
|
|
|
|
| 109 |
for i in range(3):
|
| 110 |
response = requests.post(
|
| 111 |
f"{base_url}/step",
|
| 112 |
+
json={"action": {"message": f"Message {i + 1}"}},
|
| 113 |
headers={"Content-Type": "application/json"},
|
| 114 |
)
|
| 115 |
assert response.status_code == 200
|
| 116 |
+
print(f" Step {i + 1}: ✓")
|
| 117 |
|
| 118 |
# Check state updated
|
| 119 |
response = requests.get(f"{base_url}/state")
|
|
|
|
| 132 |
except Exception as e:
|
| 133 |
print(f"\n❌ Test failed: {e}")
|
| 134 |
import traceback
|
| 135 |
+
|
| 136 |
traceback.print_exc()
|
| 137 |
return False
|
| 138 |
|
|
|
|
| 200 |
|
| 201 |
print("Starting container with environment variables...")
|
| 202 |
base_url = provider.start_container(
|
| 203 |
+
"echo-env:latest", env_vars={"DEBUG": "true", "LOG_LEVEL": "info"}
|
|
|
|
| 204 |
)
|
| 205 |
print(f"✓ Started at: {base_url}")
|
| 206 |
|
src/core/env_client.py
ADDED
|
@@ -0,0 +1,484 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Environment client for persistent sessions.
|
| 9 |
+
|
| 10 |
+
This module provides a WebSocket-based client that maintains a persistent connection
|
| 11 |
+
to an environment server, enabling efficient multi-step interactions without
|
| 12 |
+
the overhead of HTTP request/response cycles.
|
| 13 |
+
|
| 14 |
+
The client is async by default. For synchronous usage, use the `.sync()` method
|
| 15 |
+
to get a `SyncEnvClient` wrapper.
|
| 16 |
+
|
| 17 |
+
Example (async):
|
| 18 |
+
>>> async with GenericEnvClient(base_url="ws://localhost:8000") as env:
|
| 19 |
+
... result = await env.reset()
|
| 20 |
+
... result = await env.step({"code": "print('hello')"})
|
| 21 |
+
|
| 22 |
+
Example (sync wrapper):
|
| 23 |
+
>>> env = GenericEnvClient(base_url="ws://localhost:8000").sync()
|
| 24 |
+
>>> with env:
|
| 25 |
+
... result = env.reset()
|
| 26 |
+
... result = env.step({"code": "print('hello')"})
|
| 27 |
+
"""
|
| 28 |
+
|
| 29 |
+
from __future__ import annotations
|
| 30 |
+
|
| 31 |
+
import asyncio
|
| 32 |
+
import json
|
| 33 |
+
import os
|
| 34 |
+
from abc import ABC, abstractmethod
|
| 35 |
+
from typing import Any, Dict, Generic, Optional, Type, TYPE_CHECKING, TypeVar
|
| 36 |
+
|
| 37 |
+
from .client_types import StateT, StepResult
|
| 38 |
+
from .containers.runtime import LocalDockerProvider, UVProvider
|
| 39 |
+
from .utils import convert_to_ws_url
|
| 40 |
+
|
| 41 |
+
if TYPE_CHECKING:
|
| 42 |
+
from websockets.asyncio.client import ClientConnection
|
| 43 |
+
|
| 44 |
+
from .containers.runtime import ContainerProvider, RuntimeProvider
|
| 45 |
+
from .sync_client import SyncEnvClient
|
| 46 |
+
|
| 47 |
+
from websockets.asyncio.client import connect as ws_connect
|
| 48 |
+
|
| 49 |
+
ActT = TypeVar("ActT")
|
| 50 |
+
ObsT = TypeVar("ObsT")
|
| 51 |
+
EnvClientT = TypeVar("EnvClientT", bound="EnvClient")
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class EnvClient(ABC, Generic[ActT, ObsT, StateT]):
|
| 55 |
+
"""
|
| 56 |
+
Async environment client for persistent sessions.
|
| 57 |
+
|
| 58 |
+
This client maintains a persistent WebSocket connection to an environment
|
| 59 |
+
server, enabling efficient multi-step interactions. Each client instance
|
| 60 |
+
corresponds to a dedicated environment session on the server.
|
| 61 |
+
|
| 62 |
+
The client is async by default. For synchronous usage, use the `.sync()`
|
| 63 |
+
method to get a `SyncEnvClient` wrapper.
|
| 64 |
+
|
| 65 |
+
Features:
|
| 66 |
+
- Lower latency for sequential interactions
|
| 67 |
+
- Session state is maintained server-side
|
| 68 |
+
- Better suited for long-running episodes
|
| 69 |
+
- Async by default for modern Python async/await patterns
|
| 70 |
+
|
| 71 |
+
Example (async):
|
| 72 |
+
>>> from envs.coding_env.client import CodingEnv
|
| 73 |
+
>>>
|
| 74 |
+
>>> # Connect to a server using async context manager
|
| 75 |
+
>>> async with CodingEnv(base_url="ws://localhost:8000") as env:
|
| 76 |
+
... result = await env.reset(seed=42)
|
| 77 |
+
... while not result.done:
|
| 78 |
+
... action = agent.predict(result.observation)
|
| 79 |
+
... result = await env.step(action)
|
| 80 |
+
|
| 81 |
+
Example (sync wrapper):
|
| 82 |
+
>>> env = CodingEnv(base_url="ws://localhost:8000").sync()
|
| 83 |
+
>>> with env:
|
| 84 |
+
... result = env.reset(seed=42)
|
| 85 |
+
... result = env.step(action)
|
| 86 |
+
"""
|
| 87 |
+
|
| 88 |
+
def __init__(
|
| 89 |
+
self,
|
| 90 |
+
base_url: str,
|
| 91 |
+
connect_timeout_s: float = 10.0,
|
| 92 |
+
message_timeout_s: float = 60.0,
|
| 93 |
+
max_message_size_mb: float = 100.0,
|
| 94 |
+
provider: Optional["ContainerProvider | RuntimeProvider"] = None,
|
| 95 |
+
mode: Optional[str] = None,
|
| 96 |
+
):
|
| 97 |
+
"""
|
| 98 |
+
Initialize environment client.
|
| 99 |
+
|
| 100 |
+
Args:
|
| 101 |
+
base_url: Base URL of the environment server (http:// or ws://).
|
| 102 |
+
Will be converted to ws:// if http:// is provided.
|
| 103 |
+
connect_timeout_s: Timeout for establishing WebSocket connection
|
| 104 |
+
message_timeout_s: Timeout for receiving responses to messages
|
| 105 |
+
max_message_size_mb: Maximum WebSocket message size in megabytes.
|
| 106 |
+
Default 100MB to handle large observations (screenshots, DOM, etc.)
|
| 107 |
+
provider: Optional container/runtime provider for lifecycle management.
|
| 108 |
+
Can be a ContainerProvider (Docker) or RuntimeProvider (UV).
|
| 109 |
+
mode: Communication mode: 'simulation' for Gym-style API (default) or
|
| 110 |
+
'production' for MCP JSON-RPC protocol. Can also be set via the
|
| 111 |
+
OPENENV_CLIENT_MODE environment variable. Constructor parameter
|
| 112 |
+
takes precedence over environment variable. Case-insensitive.
|
| 113 |
+
"""
|
| 114 |
+
# Determine mode (constructor > env var > default)
|
| 115 |
+
if mode is None:
|
| 116 |
+
mode = os.environ.get("OPENENV_CLIENT_MODE", "simulation")
|
| 117 |
+
|
| 118 |
+
# Normalize and validate mode
|
| 119 |
+
mode = mode.lower()
|
| 120 |
+
if mode not in ("simulation", "production"):
|
| 121 |
+
raise ValueError(
|
| 122 |
+
f"Invalid mode: '{mode}'. Must be 'simulation' or 'production'. "
|
| 123 |
+
f"Set via constructor parameter or OPENENV_CLIENT_MODE environment variable."
|
| 124 |
+
)
|
| 125 |
+
|
| 126 |
+
# Store mode (use object.__setattr__ to bypass immutability)
|
| 127 |
+
object.__setattr__(self, "_mode", mode)
|
| 128 |
+
|
| 129 |
+
# Convert HTTP URL to WebSocket URL
|
| 130 |
+
ws_url = convert_to_ws_url(base_url)
|
| 131 |
+
|
| 132 |
+
self._ws_url = f"{ws_url}/ws"
|
| 133 |
+
self._connect_timeout = connect_timeout_s
|
| 134 |
+
self._message_timeout = message_timeout_s
|
| 135 |
+
self._max_message_size = int(
|
| 136 |
+
max_message_size_mb * 1024 * 1024
|
| 137 |
+
) # Convert MB to bytes
|
| 138 |
+
self._provider = provider
|
| 139 |
+
self._ws: Optional[ClientConnection] = None
|
| 140 |
+
|
| 141 |
+
def __setattr__(self, name: str, value: Any) -> None:
|
| 142 |
+
"""Prevent modification of _mode after initialization."""
|
| 143 |
+
if name == "_mode" and hasattr(self, "_mode"):
|
| 144 |
+
raise AttributeError("Cannot modify mode after initialization")
|
| 145 |
+
super().__setattr__(name, value)
|
| 146 |
+
|
| 147 |
+
async def connect(self) -> "EnvClient":
|
| 148 |
+
"""
|
| 149 |
+
Establish WebSocket connection to the server.
|
| 150 |
+
|
| 151 |
+
Returns:
|
| 152 |
+
self for method chaining
|
| 153 |
+
|
| 154 |
+
Raises:
|
| 155 |
+
ConnectionError: If connection cannot be established
|
| 156 |
+
"""
|
| 157 |
+
if self._ws is not None:
|
| 158 |
+
return self
|
| 159 |
+
|
| 160 |
+
# Bypass proxy for localhost connections
|
| 161 |
+
ws_url_lower = self._ws_url.lower()
|
| 162 |
+
is_localhost = "localhost" in ws_url_lower or "127.0.0.1" in ws_url_lower
|
| 163 |
+
|
| 164 |
+
old_no_proxy = os.environ.get("NO_PROXY")
|
| 165 |
+
if is_localhost:
|
| 166 |
+
# Set NO_PROXY to bypass proxy for localhost
|
| 167 |
+
current_no_proxy = old_no_proxy or ""
|
| 168 |
+
if "localhost" not in current_no_proxy.lower():
|
| 169 |
+
os.environ["NO_PROXY"] = (
|
| 170 |
+
f"{current_no_proxy},localhost,127.0.0.1"
|
| 171 |
+
if current_no_proxy
|
| 172 |
+
else "localhost,127.0.0.1"
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
try:
|
| 176 |
+
self._ws = await ws_connect(
|
| 177 |
+
self._ws_url,
|
| 178 |
+
open_timeout=self._connect_timeout,
|
| 179 |
+
max_size=self._max_message_size,
|
| 180 |
+
)
|
| 181 |
+
except Exception as e:
|
| 182 |
+
raise ConnectionError(f"Failed to connect to {self._ws_url}: {e}") from e
|
| 183 |
+
finally:
|
| 184 |
+
# Restore original NO_PROXY value
|
| 185 |
+
if is_localhost:
|
| 186 |
+
if old_no_proxy is None:
|
| 187 |
+
os.environ.pop("NO_PROXY", None)
|
| 188 |
+
else:
|
| 189 |
+
os.environ["NO_PROXY"] = old_no_proxy
|
| 190 |
+
|
| 191 |
+
return self
|
| 192 |
+
|
| 193 |
+
async def disconnect(self) -> None:
|
| 194 |
+
"""Close the WebSocket connection."""
|
| 195 |
+
if self._ws is not None:
|
| 196 |
+
try:
|
| 197 |
+
# Send close message
|
| 198 |
+
await self._send({"type": "close"})
|
| 199 |
+
except Exception:
|
| 200 |
+
pass # Best effort
|
| 201 |
+
try:
|
| 202 |
+
await self._ws.close()
|
| 203 |
+
except Exception:
|
| 204 |
+
pass
|
| 205 |
+
self._ws = None
|
| 206 |
+
|
| 207 |
+
async def _ensure_connected(self) -> None:
|
| 208 |
+
"""Ensure WebSocket connection is established."""
|
| 209 |
+
if self._ws is None:
|
| 210 |
+
await self.connect()
|
| 211 |
+
|
| 212 |
+
async def _send(self, message: Dict[str, Any]) -> None:
|
| 213 |
+
"""Send a message over the WebSocket."""
|
| 214 |
+
await self._ensure_connected()
|
| 215 |
+
assert self._ws is not None
|
| 216 |
+
await self._ws.send(json.dumps(message))
|
| 217 |
+
|
| 218 |
+
async def _receive(self) -> Dict[str, Any]:
|
| 219 |
+
"""Receive and parse a message from the WebSocket."""
|
| 220 |
+
assert self._ws is not None
|
| 221 |
+
raw = await asyncio.wait_for(self._ws.recv(), timeout=self._message_timeout)
|
| 222 |
+
return json.loads(raw)
|
| 223 |
+
|
| 224 |
+
async def _send_and_receive(self, message: Dict[str, Any]) -> Dict[str, Any]:
|
| 225 |
+
"""Send a message and wait for response."""
|
| 226 |
+
await self._send(message)
|
| 227 |
+
response = await self._receive()
|
| 228 |
+
|
| 229 |
+
# Check for error response
|
| 230 |
+
if response.get("type") == "error":
|
| 231 |
+
error_data = response.get("data", {})
|
| 232 |
+
raise RuntimeError(
|
| 233 |
+
f"Server error: {error_data.get('message', 'Unknown error')} "
|
| 234 |
+
f"(code: {error_data.get('code', 'UNKNOWN')})"
|
| 235 |
+
)
|
| 236 |
+
|
| 237 |
+
return response
|
| 238 |
+
|
| 239 |
+
@classmethod
|
| 240 |
+
async def from_docker_image(
|
| 241 |
+
cls: Type[EnvClientT],
|
| 242 |
+
image: str,
|
| 243 |
+
provider: Optional["ContainerProvider"] = None,
|
| 244 |
+
**kwargs: Any,
|
| 245 |
+
) -> EnvClientT:
|
| 246 |
+
"""
|
| 247 |
+
Create an environment client by spinning up a Docker container.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
image: Docker image name to run (e.g., "coding-env:latest")
|
| 251 |
+
provider: Container provider to use (defaults to LocalDockerProvider)
|
| 252 |
+
**kwargs: Additional arguments to pass to provider.start_container()
|
| 253 |
+
|
| 254 |
+
Returns:
|
| 255 |
+
Connected client instance
|
| 256 |
+
"""
|
| 257 |
+
if provider is None:
|
| 258 |
+
provider = LocalDockerProvider()
|
| 259 |
+
|
| 260 |
+
# Start container
|
| 261 |
+
base_url = provider.start_container(image, **kwargs)
|
| 262 |
+
|
| 263 |
+
# Wait for server to be ready
|
| 264 |
+
provider.wait_for_ready(base_url)
|
| 265 |
+
|
| 266 |
+
# Create and connect client
|
| 267 |
+
client = cls(base_url=base_url, provider=provider)
|
| 268 |
+
await client.connect()
|
| 269 |
+
|
| 270 |
+
return client
|
| 271 |
+
|
| 272 |
+
@classmethod
|
| 273 |
+
async def from_env(
|
| 274 |
+
cls: Type[EnvClientT],
|
| 275 |
+
repo_id: str,
|
| 276 |
+
*,
|
| 277 |
+
use_docker: bool = True,
|
| 278 |
+
provider: Optional["ContainerProvider | RuntimeProvider"] = None,
|
| 279 |
+
**provider_kwargs: Any,
|
| 280 |
+
) -> EnvClientT:
|
| 281 |
+
"""
|
| 282 |
+
Create a client from a Hugging Face Space.
|
| 283 |
+
|
| 284 |
+
Args:
|
| 285 |
+
repo_id: Hugging Face space identifier ``{org}/{space}``.
|
| 286 |
+
use_docker: When ``True`` (default) pull from the HF registry and
|
| 287 |
+
launch via :class:`LocalDockerProvider`. When ``False`` run the
|
| 288 |
+
space locally with :class:`UVProvider`.
|
| 289 |
+
provider: Optional provider instance to reuse. Must be a
|
| 290 |
+
:class:`ContainerProvider` when ``use_docker=True`` and a
|
| 291 |
+
:class:`RuntimeProvider` otherwise.
|
| 292 |
+
provider_kwargs: Additional keyword arguments forwarded to
|
| 293 |
+
either the container provider's ``start_container`` (docker)
|
| 294 |
+
or to the ``UVProvider`` constructor/start (uv). When
|
| 295 |
+
``use_docker=False``, the ``project_path`` argument can be
|
| 296 |
+
used to override the default git URL
|
| 297 |
+
(``git+https://huggingface.co/spaces/{repo_id}``).
|
| 298 |
+
|
| 299 |
+
Returns:
|
| 300 |
+
Connected client instance
|
| 301 |
+
|
| 302 |
+
Examples:
|
| 303 |
+
>>> # Pull and run from HF Docker registry
|
| 304 |
+
>>> env = await MyEnv.from_env("openenv/echo-env")
|
| 305 |
+
>>>
|
| 306 |
+
>>> # Run locally with UV (clones the space)
|
| 307 |
+
>>> env = await MyEnv.from_env("openenv/echo-env", use_docker=False)
|
| 308 |
+
>>>
|
| 309 |
+
>>> # Run from a local checkout
|
| 310 |
+
>>> env = await MyEnv.from_env(
|
| 311 |
+
... "openenv/echo-env",
|
| 312 |
+
... use_docker=False,
|
| 313 |
+
... project_path="/path/to/local/checkout"
|
| 314 |
+
... )
|
| 315 |
+
"""
|
| 316 |
+
# Extract start args that apply to both providers
|
| 317 |
+
start_args = {}
|
| 318 |
+
for key in ("port", "env_vars", "workers"):
|
| 319 |
+
if key in provider_kwargs:
|
| 320 |
+
start_args[key] = provider_kwargs.pop(key)
|
| 321 |
+
|
| 322 |
+
if use_docker:
|
| 323 |
+
# Docker mode: pull from HF registry
|
| 324 |
+
docker_provider = provider or LocalDockerProvider()
|
| 325 |
+
tag = provider_kwargs.pop("tag", "latest")
|
| 326 |
+
image = f"registry.hf.space/{repo_id.replace('/', '-')}:{tag}"
|
| 327 |
+
base_url = docker_provider.start_container(
|
| 328 |
+
image, **start_args, **provider_kwargs
|
| 329 |
+
)
|
| 330 |
+
docker_provider.wait_for_ready(base_url)
|
| 331 |
+
|
| 332 |
+
client = cls(base_url=base_url, provider=docker_provider)
|
| 333 |
+
await client.connect()
|
| 334 |
+
return client
|
| 335 |
+
else:
|
| 336 |
+
# UV mode: clone and run with uv
|
| 337 |
+
if provider is None:
|
| 338 |
+
uv_kwargs = dict(provider_kwargs)
|
| 339 |
+
project_path = uv_kwargs.pop("project_path", None)
|
| 340 |
+
if project_path is None:
|
| 341 |
+
project_path = f"git+https://huggingface.co/spaces/{repo_id}"
|
| 342 |
+
|
| 343 |
+
provider = UVProvider(project_path=project_path, **uv_kwargs)
|
| 344 |
+
else:
|
| 345 |
+
if provider_kwargs:
|
| 346 |
+
raise ValueError(
|
| 347 |
+
"provider_kwargs cannot be used when supplying a provider instance"
|
| 348 |
+
)
|
| 349 |
+
|
| 350 |
+
base_url = provider.start(**start_args)
|
| 351 |
+
provider.wait_for_ready()
|
| 352 |
+
|
| 353 |
+
client = cls(base_url=base_url, provider=provider)
|
| 354 |
+
await client.connect()
|
| 355 |
+
return client
|
| 356 |
+
|
| 357 |
+
@abstractmethod
|
| 358 |
+
def _step_payload(self, action: ActT) -> Dict[str, Any]:
|
| 359 |
+
"""Convert an Action object to the JSON data expected by the env server."""
|
| 360 |
+
raise NotImplementedError
|
| 361 |
+
|
| 362 |
+
@abstractmethod
|
| 363 |
+
def _parse_result(self, payload: Dict[str, Any]) -> StepResult[ObsT]:
|
| 364 |
+
"""Convert a JSON response from the env server to StepResult[ObsT]."""
|
| 365 |
+
raise NotImplementedError
|
| 366 |
+
|
| 367 |
+
@abstractmethod
|
| 368 |
+
def _parse_state(self, payload: Dict[str, Any]) -> StateT:
|
| 369 |
+
"""Convert a JSON response from the state endpoint to a State object."""
|
| 370 |
+
raise NotImplementedError
|
| 371 |
+
|
| 372 |
+
async def reset(self, **kwargs: Any) -> StepResult[ObsT]:
|
| 373 |
+
"""
|
| 374 |
+
Reset the environment with optional parameters.
|
| 375 |
+
|
| 376 |
+
Args:
|
| 377 |
+
**kwargs: Optional parameters passed to the environment's reset method.
|
| 378 |
+
Common parameters include:
|
| 379 |
+
- seed: Random seed for reproducibility
|
| 380 |
+
- episode_id: Custom episode identifier
|
| 381 |
+
|
| 382 |
+
Returns:
|
| 383 |
+
StepResult containing initial observation
|
| 384 |
+
"""
|
| 385 |
+
message = {
|
| 386 |
+
"type": "reset",
|
| 387 |
+
"data": kwargs,
|
| 388 |
+
}
|
| 389 |
+
response = await self._send_and_receive(message)
|
| 390 |
+
return self._parse_result(response.get("data", {}))
|
| 391 |
+
|
| 392 |
+
async def step(self, action: ActT, **kwargs: Any) -> StepResult[ObsT]:
|
| 393 |
+
"""
|
| 394 |
+
Execute an action in the environment.
|
| 395 |
+
|
| 396 |
+
Args:
|
| 397 |
+
action: The action to execute
|
| 398 |
+
**kwargs: Optional parameters (currently ignored)
|
| 399 |
+
|
| 400 |
+
Returns:
|
| 401 |
+
StepResult containing observation, reward, and done status
|
| 402 |
+
"""
|
| 403 |
+
message = {
|
| 404 |
+
"type": "step",
|
| 405 |
+
"data": self._step_payload(action),
|
| 406 |
+
}
|
| 407 |
+
response = await self._send_and_receive(message)
|
| 408 |
+
return self._parse_result(response.get("data", {}))
|
| 409 |
+
|
| 410 |
+
async def state(self) -> StateT:
|
| 411 |
+
"""
|
| 412 |
+
Get the current environment state from the server.
|
| 413 |
+
|
| 414 |
+
Returns:
|
| 415 |
+
State object with environment state information
|
| 416 |
+
"""
|
| 417 |
+
message = {"type": "state"}
|
| 418 |
+
response = await self._send_and_receive(message)
|
| 419 |
+
return self._parse_state(response.get("data", {}))
|
| 420 |
+
|
| 421 |
+
async def close(self) -> None:
|
| 422 |
+
"""
|
| 423 |
+
Close the WebSocket connection and clean up resources.
|
| 424 |
+
|
| 425 |
+
If this client was created via from_docker_image() or from_env(),
|
| 426 |
+
this will also stop and remove the associated container/process.
|
| 427 |
+
"""
|
| 428 |
+
await self.disconnect()
|
| 429 |
+
|
| 430 |
+
if self._provider is not None:
|
| 431 |
+
# Handle both ContainerProvider and RuntimeProvider
|
| 432 |
+
if hasattr(self._provider, "stop_container"):
|
| 433 |
+
self._provider.stop_container()
|
| 434 |
+
elif hasattr(self._provider, "stop"):
|
| 435 |
+
self._provider.stop()
|
| 436 |
+
|
| 437 |
+
async def __aenter__(self) -> "EnvClient":
|
| 438 |
+
"""Enter async context manager, ensuring connection is established."""
|
| 439 |
+
await self.connect()
|
| 440 |
+
return self
|
| 441 |
+
|
| 442 |
+
async def __aexit__(self, exc_type, exc_val, exc_tb) -> None:
|
| 443 |
+
"""Exit async context manager, closing connection."""
|
| 444 |
+
await self.close()
|
| 445 |
+
|
| 446 |
+
def __enter__(self) -> "EnvClient":
|
| 447 |
+
"""Sync context manager entry - raises error suggesting async usage."""
|
| 448 |
+
raise TypeError(
|
| 449 |
+
"EnvClient is async by default. Use 'async with' instead of 'with', "
|
| 450 |
+
"or call .sync() to get a synchronous wrapper:\n"
|
| 451 |
+
" async with client: # async usage\n"
|
| 452 |
+
" with client.sync(): # sync wrapper"
|
| 453 |
+
)
|
| 454 |
+
|
| 455 |
+
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
|
| 456 |
+
"""Sync context manager exit - should not be reached."""
|
| 457 |
+
pass # pragma: no cover
|
| 458 |
+
|
| 459 |
+
def sync(self) -> "SyncEnvClient":
|
| 460 |
+
"""
|
| 461 |
+
Return a synchronous wrapper around this async client.
|
| 462 |
+
|
| 463 |
+
Use this method when you need synchronous access to the environment
|
| 464 |
+
without async/await syntax. This is useful for:
|
| 465 |
+
- Integration with synchronous codebases
|
| 466 |
+
- Interactive/REPL usage
|
| 467 |
+
- Stopping async from "infecting" the call stack
|
| 468 |
+
|
| 469 |
+
Returns:
|
| 470 |
+
SyncEnvClient wrapper that provides synchronous methods
|
| 471 |
+
|
| 472 |
+
Example:
|
| 473 |
+
>>> # Create async client and get sync wrapper
|
| 474 |
+
>>> async_client = GenericEnvClient(base_url="http://localhost:8000")
|
| 475 |
+
>>> sync_client = async_client.sync()
|
| 476 |
+
>>>
|
| 477 |
+
>>> # Use synchronous API
|
| 478 |
+
>>> with sync_client:
|
| 479 |
+
... result = sync_client.reset()
|
| 480 |
+
... result = sync_client.step({"code": "print('hello')"})
|
| 481 |
+
"""
|
| 482 |
+
from .sync_client import SyncEnvClient
|
| 483 |
+
|
| 484 |
+
return SyncEnvClient(self)
|
src/core/env_server/__init__.py
CHANGED
|
@@ -7,10 +7,74 @@
|
|
| 7 |
"""Core environment interfaces and types."""
|
| 8 |
|
| 9 |
from .base_transforms import CompositeTransform, NullTransform
|
| 10 |
-
from .
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
from .interfaces import Environment, Message, ModelTokenizer, Transform
|
| 12 |
-
|
| 13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
|
| 15 |
__all__ = [
|
| 16 |
# Core interfaces
|
|
@@ -22,6 +86,33 @@ __all__ = [
|
|
| 22 |
"Action",
|
| 23 |
"Observation",
|
| 24 |
"State",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
# Base transforms
|
| 26 |
"CompositeTransform",
|
| 27 |
"NullTransform",
|
|
@@ -32,4 +123,28 @@ __all__ = [
|
|
| 32 |
# Web Interface
|
| 33 |
"create_web_interface_app",
|
| 34 |
"WebInterfaceManager",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
]
|
|
|
|
| 7 |
"""Core environment interfaces and types."""
|
| 8 |
|
| 9 |
from .base_transforms import CompositeTransform, NullTransform
|
| 10 |
+
from .exceptions import (
|
| 11 |
+
ConcurrencyConfigurationError,
|
| 12 |
+
EnvironmentFactoryError,
|
| 13 |
+
OpenEnvError,
|
| 14 |
+
SessionCapacityError,
|
| 15 |
+
SessionCreationError,
|
| 16 |
+
SessionNotFoundError,
|
| 17 |
+
)
|
| 18 |
+
from .http_server import create_app, create_fastapi_app, HTTPEnvServer
|
| 19 |
from .interfaces import Environment, Message, ModelTokenizer, Transform
|
| 20 |
+
|
| 21 |
+
try:
|
| 22 |
+
from .mcp_environment import MCPEnvironment
|
| 23 |
+
except ModuleNotFoundError:
|
| 24 |
+
MCPEnvironment = None # type: ignore[assignment]
|
| 25 |
+
|
| 26 |
+
from .mcp_types import (
|
| 27 |
+
CallToolAction,
|
| 28 |
+
CallToolObservation,
|
| 29 |
+
JsonRpcError,
|
| 30 |
+
# JSON-RPC types
|
| 31 |
+
JsonRpcErrorCode,
|
| 32 |
+
JsonRpcRequest,
|
| 33 |
+
JsonRpcResponse,
|
| 34 |
+
ListToolsAction,
|
| 35 |
+
ListToolsObservation,
|
| 36 |
+
McpMethod,
|
| 37 |
+
RESERVED_TOOL_NAMES,
|
| 38 |
+
Tool,
|
| 39 |
+
ToolError,
|
| 40 |
+
ToolErrorType,
|
| 41 |
+
WSMCPMessage,
|
| 42 |
+
WSMCPResponse,
|
| 43 |
+
)
|
| 44 |
+
from .route_config import GetEndpointConfig
|
| 45 |
+
from .serialization import (
|
| 46 |
+
deserialize_action,
|
| 47 |
+
deserialize_action_with_preprocessing,
|
| 48 |
+
serialize_observation,
|
| 49 |
+
)
|
| 50 |
+
from .types import (
|
| 51 |
+
Action,
|
| 52 |
+
BaseMessage,
|
| 53 |
+
ConcurrencyConfig,
|
| 54 |
+
HealthResponse,
|
| 55 |
+
HealthStatus,
|
| 56 |
+
Observation,
|
| 57 |
+
SchemaResponse,
|
| 58 |
+
ServerCapacityStatus,
|
| 59 |
+
ServerMode,
|
| 60 |
+
SessionInfo,
|
| 61 |
+
State,
|
| 62 |
+
WSCloseMessage,
|
| 63 |
+
WSErrorCode,
|
| 64 |
+
WSErrorResponse,
|
| 65 |
+
WSIncomingMessage,
|
| 66 |
+
WSObservationResponse,
|
| 67 |
+
WSResetMessage,
|
| 68 |
+
WSStateMessage,
|
| 69 |
+
WSStateResponse,
|
| 70 |
+
WSStepMessage,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
try:
|
| 74 |
+
from .web_interface import create_web_interface_app, WebInterfaceManager
|
| 75 |
+
except ModuleNotFoundError:
|
| 76 |
+
create_web_interface_app = None # type: ignore[assignment]
|
| 77 |
+
WebInterfaceManager = None # type: ignore[assignment]
|
| 78 |
|
| 79 |
__all__ = [
|
| 80 |
# Core interfaces
|
|
|
|
| 86 |
"Action",
|
| 87 |
"Observation",
|
| 88 |
"State",
|
| 89 |
+
"SchemaResponse",
|
| 90 |
+
"HealthResponse",
|
| 91 |
+
# Enums
|
| 92 |
+
"HealthStatus",
|
| 93 |
+
"ServerMode",
|
| 94 |
+
"WSErrorCode",
|
| 95 |
+
# WebSocket message types
|
| 96 |
+
"BaseMessage",
|
| 97 |
+
"WSIncomingMessage",
|
| 98 |
+
"WSResetMessage",
|
| 99 |
+
"WSStepMessage",
|
| 100 |
+
"WSStateMessage",
|
| 101 |
+
"WSCloseMessage",
|
| 102 |
+
"WSObservationResponse",
|
| 103 |
+
"WSStateResponse",
|
| 104 |
+
"WSErrorResponse",
|
| 105 |
+
# Concurrency types
|
| 106 |
+
"ConcurrencyConfig",
|
| 107 |
+
"ServerCapacityStatus",
|
| 108 |
+
"SessionInfo",
|
| 109 |
+
# Exceptions
|
| 110 |
+
"OpenEnvError",
|
| 111 |
+
"ConcurrencyConfigurationError",
|
| 112 |
+
"SessionCapacityError",
|
| 113 |
+
"SessionNotFoundError",
|
| 114 |
+
"SessionCreationError",
|
| 115 |
+
"EnvironmentFactoryError",
|
| 116 |
# Base transforms
|
| 117 |
"CompositeTransform",
|
| 118 |
"NullTransform",
|
|
|
|
| 123 |
# Web Interface
|
| 124 |
"create_web_interface_app",
|
| 125 |
"WebInterfaceManager",
|
| 126 |
+
# Serialization utilities
|
| 127 |
+
"deserialize_action",
|
| 128 |
+
"deserialize_action_with_preprocessing",
|
| 129 |
+
"serialize_observation",
|
| 130 |
+
# Route configuration
|
| 131 |
+
"GetEndpointConfig",
|
| 132 |
+
# MCP types
|
| 133 |
+
"Tool",
|
| 134 |
+
"ToolError",
|
| 135 |
+
"ToolErrorType",
|
| 136 |
+
"ListToolsAction",
|
| 137 |
+
"CallToolAction",
|
| 138 |
+
"ListToolsObservation",
|
| 139 |
+
"CallToolObservation",
|
| 140 |
+
"WSMCPMessage",
|
| 141 |
+
"WSMCPResponse",
|
| 142 |
+
"RESERVED_TOOL_NAMES",
|
| 143 |
+
"MCPEnvironment",
|
| 144 |
+
# JSON-RPC types
|
| 145 |
+
"JsonRpcErrorCode",
|
| 146 |
+
"JsonRpcError",
|
| 147 |
+
"JsonRpcRequest",
|
| 148 |
+
"JsonRpcResponse",
|
| 149 |
+
"McpMethod",
|
| 150 |
]
|
src/core/env_server/base_transforms.py
CHANGED
|
@@ -26,4 +26,4 @@ class NullTransform(Transform):
|
|
| 26 |
"""Default transform that passes through unchanged."""
|
| 27 |
|
| 28 |
def __call__(self, observation: Observation) -> Observation:
|
| 29 |
-
return observation
|
|
|
|
| 26 |
"""Default transform that passes through unchanged."""
|
| 27 |
|
| 28 |
def __call__(self, observation: Observation) -> Observation:
|
| 29 |
+
return observation
|
src/core/env_server/exceptions.py
ADDED
|
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""Custom exceptions for environment server operations."""
|
| 8 |
+
|
| 9 |
+
from typing import Optional
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class OpenEnvError(Exception):
|
| 13 |
+
"""Base exception for all OpenEnv errors."""
|
| 14 |
+
|
| 15 |
+
pass
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class ConcurrencyConfigurationError(OpenEnvError):
|
| 19 |
+
"""
|
| 20 |
+
Raised when an environment is misconfigured for concurrent sessions.
|
| 21 |
+
|
| 22 |
+
This error is raised during server startup when max_concurrent_envs > 1
|
| 23 |
+
is specified for an environment that is not marked as SUPPORTS_CONCURRENT_SESSIONS.
|
| 24 |
+
"""
|
| 25 |
+
|
| 26 |
+
def __init__(
|
| 27 |
+
self,
|
| 28 |
+
environment_name: str,
|
| 29 |
+
max_concurrent_envs: int,
|
| 30 |
+
message: Optional[str] = None,
|
| 31 |
+
):
|
| 32 |
+
self.environment_name = environment_name
|
| 33 |
+
self.max_concurrent_envs = max_concurrent_envs
|
| 34 |
+
|
| 35 |
+
if message is None:
|
| 36 |
+
message = (
|
| 37 |
+
f"Environment '{environment_name}' is not marked as SUPPORTS_CONCURRENT_SESSIONS. "
|
| 38 |
+
f"Cannot run with max_concurrent_envs={max_concurrent_envs}. "
|
| 39 |
+
f"Either set max_concurrent_envs=1 or ensure the environment "
|
| 40 |
+
f"properly isolates session state and set SUPPORTS_CONCURRENT_SESSIONS=True."
|
| 41 |
+
)
|
| 42 |
+
|
| 43 |
+
super().__init__(message)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
class SessionCapacityError(OpenEnvError):
|
| 47 |
+
"""
|
| 48 |
+
Raised when the server cannot accept new sessions due to capacity limits.
|
| 49 |
+
|
| 50 |
+
This error is raised when a new WebSocket connection is attempted but
|
| 51 |
+
the server has already reached max_concurrent_envs active sessions.
|
| 52 |
+
"""
|
| 53 |
+
|
| 54 |
+
def __init__(
|
| 55 |
+
self,
|
| 56 |
+
active_sessions: int,
|
| 57 |
+
max_sessions: int,
|
| 58 |
+
message: Optional[str] = None,
|
| 59 |
+
):
|
| 60 |
+
self.active_sessions = active_sessions
|
| 61 |
+
self.max_sessions = max_sessions
|
| 62 |
+
|
| 63 |
+
if message is None:
|
| 64 |
+
message = (
|
| 65 |
+
f"Server at capacity: {active_sessions}/{max_sessions} sessions active. "
|
| 66 |
+
f"Cannot accept new connections."
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
super().__init__(message)
|
| 70 |
+
|
| 71 |
+
|
| 72 |
+
class SessionNotFoundError(OpenEnvError):
|
| 73 |
+
"""Raised when attempting to access a session that does not exist."""
|
| 74 |
+
|
| 75 |
+
def __init__(self, session_id: str, message: Optional[str] = None):
|
| 76 |
+
self.session_id = session_id
|
| 77 |
+
|
| 78 |
+
if message is None:
|
| 79 |
+
message = f"Session '{session_id}' not found."
|
| 80 |
+
|
| 81 |
+
super().__init__(message)
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class SessionCreationError(OpenEnvError):
|
| 85 |
+
"""Raised when a session cannot be created."""
|
| 86 |
+
|
| 87 |
+
def __init__(self, reason: str, message: Optional[str] = None):
|
| 88 |
+
self.reason = reason
|
| 89 |
+
|
| 90 |
+
if message is None:
|
| 91 |
+
message = f"Failed to create session: {reason}"
|
| 92 |
+
|
| 93 |
+
super().__init__(message)
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
class EnvironmentFactoryError(OpenEnvError):
|
| 97 |
+
"""Raised when the environment factory fails to create an instance."""
|
| 98 |
+
|
| 99 |
+
def __init__(self, factory_name: str, message: Optional[str] = None):
|
| 100 |
+
self.factory_name = factory_name
|
| 101 |
+
|
| 102 |
+
if message is None:
|
| 103 |
+
message = f"Environment factory '{factory_name}' failed to create instance."
|
| 104 |
+
|
| 105 |
+
super().__init__(message)
|
src/core/env_server/gradio_theme.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""Unified terminal-style theme for OpenEnv Gradio UI (light/dark)."""
|
| 8 |
+
|
| 9 |
+
from __future__ import annotations
|
| 10 |
+
|
| 11 |
+
import gradio as gr
|
| 12 |
+
|
| 13 |
+
_MONO_FONTS = (
|
| 14 |
+
"JetBrains Mono",
|
| 15 |
+
"Fira Code",
|
| 16 |
+
"Cascadia Code",
|
| 17 |
+
"Consolas",
|
| 18 |
+
"ui-monospace",
|
| 19 |
+
"monospace",
|
| 20 |
+
)
|
| 21 |
+
|
| 22 |
+
_CORE_FONT = (
|
| 23 |
+
"Lato",
|
| 24 |
+
"Inter",
|
| 25 |
+
"Arial",
|
| 26 |
+
"Helvetica",
|
| 27 |
+
"sans-serif",
|
| 28 |
+
)
|
| 29 |
+
|
| 30 |
+
_ZERO_RADIUS = gr.themes.Size(
|
| 31 |
+
xxs="0px",
|
| 32 |
+
xs="0px",
|
| 33 |
+
sm="0px",
|
| 34 |
+
md="0px",
|
| 35 |
+
lg="0px",
|
| 36 |
+
xl="0px",
|
| 37 |
+
xxl="0px",
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
_GREEN_HUE = gr.themes.Color(
|
| 41 |
+
c50="#e6f4ea",
|
| 42 |
+
c100="#ceead6",
|
| 43 |
+
c200="#a8dab5",
|
| 44 |
+
c300="#6fcc8b",
|
| 45 |
+
c400="#3fb950",
|
| 46 |
+
c500="#238636",
|
| 47 |
+
c600="#1a7f37",
|
| 48 |
+
c700="#116329",
|
| 49 |
+
c800="#0a4620",
|
| 50 |
+
c900="#033a16",
|
| 51 |
+
c950="#04200d",
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
_NEUTRAL_HUE = gr.themes.Color(
|
| 55 |
+
c50="#f6f8fa",
|
| 56 |
+
c100="#eaeef2",
|
| 57 |
+
c200="#d0d7de",
|
| 58 |
+
c300="#afb8c1",
|
| 59 |
+
c400="#8c959f",
|
| 60 |
+
c500="#6e7781",
|
| 61 |
+
c600="#57606a",
|
| 62 |
+
c700="#424a53",
|
| 63 |
+
c800="#32383f",
|
| 64 |
+
c900="#24292f",
|
| 65 |
+
c950="#1b1f24",
|
| 66 |
+
)
|
| 67 |
+
|
| 68 |
+
OPENENV_GRADIO_THEME = gr.themes.Base(
|
| 69 |
+
primary_hue=_GREEN_HUE,
|
| 70 |
+
secondary_hue=_NEUTRAL_HUE,
|
| 71 |
+
neutral_hue=_NEUTRAL_HUE,
|
| 72 |
+
font=_CORE_FONT,
|
| 73 |
+
font_mono=_MONO_FONTS,
|
| 74 |
+
radius_size=_ZERO_RADIUS,
|
| 75 |
+
).set(
|
| 76 |
+
body_background_fill="#ffffff",
|
| 77 |
+
background_fill_primary="#ffffff",
|
| 78 |
+
background_fill_secondary="#f6f8fa",
|
| 79 |
+
block_background_fill="#ffffff",
|
| 80 |
+
block_border_color="#ffffff",
|
| 81 |
+
block_label_text_color="#57606a",
|
| 82 |
+
block_title_text_color="#24292f",
|
| 83 |
+
border_color_primary="#d0d7de",
|
| 84 |
+
input_background_fill="#ffffff",
|
| 85 |
+
input_border_color="#d0d7de",
|
| 86 |
+
button_primary_background_fill="#1a7f37",
|
| 87 |
+
button_primary_background_fill_hover="#116329",
|
| 88 |
+
button_primary_text_color="#ffffff",
|
| 89 |
+
button_secondary_background_fill="#f6f8fa",
|
| 90 |
+
button_secondary_background_fill_hover="#eaeef2",
|
| 91 |
+
button_secondary_text_color="#24292f",
|
| 92 |
+
button_secondary_border_color="#d0d7de",
|
| 93 |
+
body_background_fill_dark="#0d1117",
|
| 94 |
+
background_fill_primary_dark="#0d1117",
|
| 95 |
+
background_fill_secondary_dark="#0d1117",
|
| 96 |
+
block_background_fill_dark="#0d1117",
|
| 97 |
+
block_border_color_dark="#0d1117",
|
| 98 |
+
block_label_text_color_dark="#8b949e",
|
| 99 |
+
block_title_text_color_dark="#c9d1d9",
|
| 100 |
+
border_color_primary_dark="#30363d",
|
| 101 |
+
input_background_fill_dark="#0d1117",
|
| 102 |
+
input_border_color_dark="#30363d",
|
| 103 |
+
button_primary_background_fill_dark="#30363d",
|
| 104 |
+
button_primary_background_fill_hover_dark="#484f58",
|
| 105 |
+
button_primary_text_color_dark="#c9d1d9",
|
| 106 |
+
button_secondary_background_fill_dark="#21262d",
|
| 107 |
+
button_secondary_background_fill_hover_dark="#30363d",
|
| 108 |
+
button_secondary_text_color_dark="#c9d1d9",
|
| 109 |
+
button_secondary_border_color_dark="#30363d",
|
| 110 |
+
)
|
| 111 |
+
|
| 112 |
+
OPENENV_GRADIO_CSS = """
|
| 113 |
+
* { border-radius: 0 !important; }
|
| 114 |
+
.col-left { padding: 16px !important; }
|
| 115 |
+
.col-right { padding: 16px !important; }
|
| 116 |
+
.prose, .markdown-text, .md,
|
| 117 |
+
.prose > *, .markdown-text > * {
|
| 118 |
+
background: transparent !important;
|
| 119 |
+
border: none !important;
|
| 120 |
+
box-shadow: none !important;
|
| 121 |
+
}
|
| 122 |
+
.dark .col-left {
|
| 123 |
+
border-left-color: rgba(139, 148, 158, 0.4) !important;
|
| 124 |
+
}
|
| 125 |
+
.dark .col-right {
|
| 126 |
+
border-left-color: rgba(201, 209, 217, 0.3) !important;
|
| 127 |
+
}
|
| 128 |
+
"""
|
src/core/env_server/gradio_ui.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
Gradio-based web UI for OpenEnv environments.
|
| 9 |
+
|
| 10 |
+
Replaces the legacy HTML/JavaScript interface when ENABLE_WEB_INTERFACE is set.
|
| 11 |
+
Mount at /web via gr.mount_gradio_app() from create_web_interface_app().
|
| 12 |
+
"""
|
| 13 |
+
|
| 14 |
+
from __future__ import annotations
|
| 15 |
+
|
| 16 |
+
import json
|
| 17 |
+
import re
|
| 18 |
+
from typing import Any, Dict, List, Optional
|
| 19 |
+
|
| 20 |
+
import gradio as gr
|
| 21 |
+
|
| 22 |
+
from .types import EnvironmentMetadata
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _escape_md(text: str) -> str:
|
| 26 |
+
"""Escape Markdown special characters in user-controlled content."""
|
| 27 |
+
return re.sub(r"([\\`*_\{\}\[\]()#+\-.!|~>])", r"\\\1", str(text))
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def _format_observation(data: Dict[str, Any]) -> str:
|
| 31 |
+
"""Format reset/step response for Markdown display."""
|
| 32 |
+
lines: List[str] = []
|
| 33 |
+
obs = data.get("observation", {})
|
| 34 |
+
if isinstance(obs, dict):
|
| 35 |
+
if obs.get("prompt"):
|
| 36 |
+
lines.append(f"**Prompt:**\n\n{_escape_md(obs['prompt'])}\n")
|
| 37 |
+
messages = obs.get("messages", [])
|
| 38 |
+
if messages:
|
| 39 |
+
lines.append("**Messages:**\n")
|
| 40 |
+
for msg in messages:
|
| 41 |
+
sender = _escape_md(str(msg.get("sender_id", "?")))
|
| 42 |
+
content = _escape_md(str(msg.get("content", "")))
|
| 43 |
+
cat = _escape_md(str(msg.get("category", "")))
|
| 44 |
+
lines.append(f"- `[{cat}]` Player {sender}: {content}")
|
| 45 |
+
lines.append("")
|
| 46 |
+
reward = data.get("reward")
|
| 47 |
+
done = data.get("done")
|
| 48 |
+
if reward is not None:
|
| 49 |
+
lines.append(f"**Reward:** `{reward}`")
|
| 50 |
+
if done is not None:
|
| 51 |
+
lines.append(f"**Done:** `{done}`")
|
| 52 |
+
return "\n".join(lines) if lines else "*No observation data*"
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def _readme_section(metadata: Optional[EnvironmentMetadata]) -> str:
|
| 56 |
+
"""README content for the left panel."""
|
| 57 |
+
if not metadata or not metadata.readme_content:
|
| 58 |
+
return "*No README available.*"
|
| 59 |
+
return metadata.readme_content
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get_gradio_display_title(
|
| 63 |
+
metadata: Optional[EnvironmentMetadata],
|
| 64 |
+
fallback: str = "OpenEnv Environment",
|
| 65 |
+
) -> str:
|
| 66 |
+
"""Return the title used for the Gradio app (browser tab and Blocks)."""
|
| 67 |
+
name = metadata.name if metadata else fallback
|
| 68 |
+
return f"OpenEnv Agentic Environment: {name}"
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
def build_gradio_app(
|
| 72 |
+
web_manager: Any,
|
| 73 |
+
action_fields: List[Dict[str, Any]],
|
| 74 |
+
metadata: Optional[EnvironmentMetadata],
|
| 75 |
+
is_chat_env: bool,
|
| 76 |
+
title: str = "OpenEnv Environment",
|
| 77 |
+
quick_start_md: Optional[str] = None,
|
| 78 |
+
) -> gr.Blocks:
|
| 79 |
+
"""
|
| 80 |
+
Build a Gradio Blocks app for the OpenEnv web interface.
|
| 81 |
+
|
| 82 |
+
Args:
|
| 83 |
+
web_manager: WebInterfaceManager (reset/step_environment, get_state).
|
| 84 |
+
action_fields: Field dicts from _extract_action_fields(action_cls).
|
| 85 |
+
metadata: Environment metadata for README/name.
|
| 86 |
+
is_chat_env: If True, single message textbox; else form from action_fields.
|
| 87 |
+
title: App title (overridden by metadata.name when present; see get_gradio_display_title).
|
| 88 |
+
quick_start_md: Optional Quick Start markdown (class names already replaced).
|
| 89 |
+
|
| 90 |
+
Returns:
|
| 91 |
+
gr.Blocks to mount with gr.mount_gradio_app(app, blocks, path="/web").
|
| 92 |
+
"""
|
| 93 |
+
readme_content = _readme_section(metadata)
|
| 94 |
+
display_title = get_gradio_display_title(metadata, fallback=title)
|
| 95 |
+
|
| 96 |
+
async def reset_env():
|
| 97 |
+
try:
|
| 98 |
+
data = await web_manager.reset_environment()
|
| 99 |
+
obs_md = _format_observation(data)
|
| 100 |
+
return (
|
| 101 |
+
obs_md,
|
| 102 |
+
json.dumps(data, indent=2),
|
| 103 |
+
"Environment reset successfully.",
|
| 104 |
+
)
|
| 105 |
+
except Exception as e:
|
| 106 |
+
return ("", "", f"Error: {e}")
|
| 107 |
+
|
| 108 |
+
def _step_with_action(action_data: Dict[str, Any]):
|
| 109 |
+
async def _run():
|
| 110 |
+
try:
|
| 111 |
+
data = await web_manager.step_environment(action_data)
|
| 112 |
+
obs_md = _format_observation(data)
|
| 113 |
+
return (
|
| 114 |
+
obs_md,
|
| 115 |
+
json.dumps(data, indent=2),
|
| 116 |
+
"Step complete.",
|
| 117 |
+
)
|
| 118 |
+
except Exception as e:
|
| 119 |
+
return ("", "", f"Error: {e}")
|
| 120 |
+
|
| 121 |
+
return _run
|
| 122 |
+
|
| 123 |
+
async def step_chat(message: str):
|
| 124 |
+
if not (message or str(message).strip()):
|
| 125 |
+
return ("", "", "Please enter an action message.")
|
| 126 |
+
action = {"message": str(message).strip()}
|
| 127 |
+
return await _step_with_action(action)()
|
| 128 |
+
|
| 129 |
+
def get_state_sync():
|
| 130 |
+
try:
|
| 131 |
+
data = web_manager.get_state()
|
| 132 |
+
return json.dumps(data, indent=2)
|
| 133 |
+
except Exception as e:
|
| 134 |
+
return f"Error: {e}"
|
| 135 |
+
|
| 136 |
+
with gr.Blocks(title=display_title) as demo:
|
| 137 |
+
with gr.Row():
|
| 138 |
+
with gr.Column(scale=1, elem_classes="col-left"):
|
| 139 |
+
if quick_start_md:
|
| 140 |
+
with gr.Accordion("Quick Start", open=True):
|
| 141 |
+
gr.Markdown(quick_start_md)
|
| 142 |
+
with gr.Accordion("README", open=False):
|
| 143 |
+
gr.Markdown(readme_content)
|
| 144 |
+
|
| 145 |
+
with gr.Column(scale=2, elem_classes="col-right"):
|
| 146 |
+
obs_display = gr.Markdown(
|
| 147 |
+
value=("# Playground\n\nClick **Reset** to start a new episode."),
|
| 148 |
+
)
|
| 149 |
+
with gr.Group():
|
| 150 |
+
if is_chat_env:
|
| 151 |
+
action_input = gr.Textbox(
|
| 152 |
+
label="Action message",
|
| 153 |
+
placeholder="e.g. Enter your message...",
|
| 154 |
+
)
|
| 155 |
+
step_inputs = [action_input]
|
| 156 |
+
step_fn = step_chat
|
| 157 |
+
else:
|
| 158 |
+
step_inputs = []
|
| 159 |
+
for field in action_fields:
|
| 160 |
+
name = field["name"]
|
| 161 |
+
field_type = field.get("type", "text")
|
| 162 |
+
label = name.replace("_", " ").title()
|
| 163 |
+
placeholder = field.get("placeholder", "")
|
| 164 |
+
if field_type == "checkbox":
|
| 165 |
+
inp = gr.Checkbox(label=label)
|
| 166 |
+
elif field_type == "number":
|
| 167 |
+
inp = gr.Number(label=label)
|
| 168 |
+
elif field_type == "select":
|
| 169 |
+
choices = field.get("choices") or []
|
| 170 |
+
inp = gr.Dropdown(
|
| 171 |
+
choices=choices,
|
| 172 |
+
label=label,
|
| 173 |
+
allow_custom_value=False,
|
| 174 |
+
)
|
| 175 |
+
elif field_type in ("textarea", "tensor"):
|
| 176 |
+
inp = gr.Textbox(
|
| 177 |
+
label=label,
|
| 178 |
+
placeholder=placeholder,
|
| 179 |
+
lines=3,
|
| 180 |
+
)
|
| 181 |
+
else:
|
| 182 |
+
inp = gr.Textbox(
|
| 183 |
+
label=label,
|
| 184 |
+
placeholder=placeholder,
|
| 185 |
+
)
|
| 186 |
+
step_inputs.append(inp)
|
| 187 |
+
|
| 188 |
+
async def step_form(*values):
|
| 189 |
+
if not action_fields:
|
| 190 |
+
return await _step_with_action({})()
|
| 191 |
+
action_data = {}
|
| 192 |
+
for i, field in enumerate(action_fields):
|
| 193 |
+
if i >= len(values):
|
| 194 |
+
break
|
| 195 |
+
name = field["name"]
|
| 196 |
+
val = values[i]
|
| 197 |
+
if field.get("type") == "checkbox":
|
| 198 |
+
action_data[name] = bool(val)
|
| 199 |
+
elif val is not None and val != "":
|
| 200 |
+
action_data[name] = val
|
| 201 |
+
return await _step_with_action(action_data)()
|
| 202 |
+
|
| 203 |
+
step_fn = step_form
|
| 204 |
+
|
| 205 |
+
with gr.Row():
|
| 206 |
+
step_btn = gr.Button("Step", variant="primary")
|
| 207 |
+
reset_btn = gr.Button("Reset", variant="secondary")
|
| 208 |
+
state_btn = gr.Button("Get state", variant="secondary")
|
| 209 |
+
with gr.Row():
|
| 210 |
+
status = gr.Textbox(
|
| 211 |
+
label="Status",
|
| 212 |
+
interactive=False,
|
| 213 |
+
)
|
| 214 |
+
raw_json = gr.Code(
|
| 215 |
+
label="Raw JSON response",
|
| 216 |
+
language="json",
|
| 217 |
+
interactive=False,
|
| 218 |
+
)
|
| 219 |
+
|
| 220 |
+
reset_btn.click(
|
| 221 |
+
fn=reset_env,
|
| 222 |
+
outputs=[obs_display, raw_json, status],
|
| 223 |
+
)
|
| 224 |
+
step_btn.click(
|
| 225 |
+
fn=step_fn,
|
| 226 |
+
inputs=step_inputs,
|
| 227 |
+
outputs=[obs_display, raw_json, status],
|
| 228 |
+
)
|
| 229 |
+
if is_chat_env:
|
| 230 |
+
action_input.submit(
|
| 231 |
+
fn=step_fn,
|
| 232 |
+
inputs=step_inputs,
|
| 233 |
+
outputs=[obs_display, raw_json, status],
|
| 234 |
+
)
|
| 235 |
+
state_btn.click(
|
| 236 |
+
fn=get_state_sync,
|
| 237 |
+
outputs=[raw_json],
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
return demo
|
src/core/env_server/http_server.py
CHANGED
|
@@ -8,25 +8,113 @@
|
|
| 8 |
HTTP server wrapper for Environment instances.
|
| 9 |
|
| 10 |
This module provides utilities to wrap any Environment subclass and expose it
|
| 11 |
-
over HTTP endpoints that
|
| 12 |
"""
|
| 13 |
|
| 14 |
from __future__ import annotations
|
| 15 |
|
|
|
|
|
|
|
|
|
|
| 16 |
import os
|
| 17 |
-
|
| 18 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 19 |
|
| 20 |
from .interfaces import Environment
|
| 21 |
-
from .
|
| 22 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
|
| 24 |
class HTTPEnvServer:
|
| 25 |
"""
|
| 26 |
HTTP server wrapper for Environment instances.
|
| 27 |
|
| 28 |
This class wraps an Environment and exposes its reset(), step(), and state
|
| 29 |
-
methods as HTTP endpoints compatible with
|
| 30 |
|
| 31 |
The server expects:
|
| 32 |
- Action deserialization: Converts JSON dict to Action subclass
|
|
@@ -35,9 +123,15 @@ class HTTPEnvServer:
|
|
| 35 |
Example:
|
| 36 |
>>> from core.env_server import HTTPEnvServer
|
| 37 |
>>> from envs.coding_env.server import CodeExecutionEnvironment
|
|
|
|
| 38 |
>>>
|
| 39 |
-
>>>
|
| 40 |
-
>>> server = HTTPEnvServer(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 41 |
>>>
|
| 42 |
>>> # Register routes with FastAPI
|
| 43 |
>>> from fastapi import FastAPI
|
|
@@ -47,178 +141,1177 @@ class HTTPEnvServer:
|
|
| 47 |
|
| 48 |
def __init__(
|
| 49 |
self,
|
| 50 |
-
env: Environment,
|
| 51 |
action_cls: Type[Action],
|
| 52 |
observation_cls: Type[Observation],
|
|
|
|
|
|
|
| 53 |
):
|
| 54 |
"""
|
| 55 |
Initialize HTTP server wrapper.
|
| 56 |
|
| 57 |
Args:
|
| 58 |
-
env:
|
|
|
|
| 59 |
action_cls: The Action subclass this environment expects
|
| 60 |
observation_cls: The Observation subclass this environment returns
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 61 |
"""
|
| 62 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 63 |
self.action_cls = action_cls
|
| 64 |
self.observation_cls = observation_cls
|
| 65 |
|
| 66 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 67 |
"""
|
| 68 |
-
|
| 69 |
|
| 70 |
-
|
| 71 |
-
|
|
|
|
| 72 |
"""
|
|
|
|
|
|
|
| 73 |
|
| 74 |
-
if
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
return self._serialize_observation(observation)
|
| 83 |
|
| 84 |
-
|
| 85 |
-
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 89 |
|
| 90 |
-
|
| 91 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
-
|
| 94 |
-
|
| 95 |
|
| 96 |
-
#
|
| 97 |
-
|
|
|
|
|
|
|
|
|
|
| 98 |
|
| 99 |
-
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
| 103 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 104 |
|
| 105 |
-
|
| 106 |
-
|
| 107 |
-
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
|
|
|
|
| 110 |
|
| 111 |
-
def
|
| 112 |
"""
|
| 113 |
-
|
| 114 |
|
| 115 |
Args:
|
| 116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 117 |
|
| 118 |
-
|
| 119 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
|
| 121 |
-
|
| 122 |
-
|
| 123 |
-
|
| 124 |
"""
|
| 125 |
-
|
| 126 |
-
|
| 127 |
-
|
| 128 |
-
|
| 129 |
-
return action
|
| 130 |
|
| 131 |
-
def
|
| 132 |
"""
|
| 133 |
-
|
| 134 |
|
| 135 |
Args:
|
| 136 |
-
|
| 137 |
|
| 138 |
Returns:
|
| 139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 140 |
|
| 141 |
-
|
| 142 |
-
|
| 143 |
-
|
| 144 |
-
|
| 145 |
-
"
|
| 146 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
"""
|
| 148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
-
|
| 151 |
-
|
| 152 |
-
|
| 153 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 154 |
|
| 155 |
-
# Return in HTTPEnvClient expected format
|
| 156 |
-
return {
|
| 157 |
-
"observation": obs_dict,
|
| 158 |
-
"reward": reward,
|
| 159 |
-
"done": done,
|
| 160 |
-
}
|
| 161 |
|
| 162 |
def create_app(
|
| 163 |
-
env: Environment,
|
| 164 |
action_cls: Type[Action],
|
| 165 |
observation_cls: Type[Observation],
|
| 166 |
env_name: Optional[str] = None,
|
| 167 |
-
|
|
|
|
|
|
|
|
|
|
| 168 |
"""
|
| 169 |
Create a FastAPI application with or without web interface.
|
| 170 |
-
|
| 171 |
This function creates a FastAPI app with the web interface enabled by default,
|
| 172 |
including README integration for better user experience.
|
| 173 |
-
|
| 174 |
Args:
|
| 175 |
-
env:
|
| 176 |
action_cls: The Action subclass this environment expects
|
| 177 |
observation_cls: The Observation subclass this environment returns
|
| 178 |
env_name: Optional environment name for README loading
|
| 179 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
Returns:
|
| 181 |
FastAPI application instance with or without web interface and README integration
|
| 182 |
"""
|
| 183 |
# Check if web interface should be enabled
|
| 184 |
# This can be controlled via environment variable or build argument
|
| 185 |
-
enable_web = (
|
| 186 |
-
|
|
|
|
|
|
|
| 187 |
)
|
| 188 |
|
| 189 |
if enable_web:
|
| 190 |
-
#
|
| 191 |
from .web_interface import create_web_interface_app
|
| 192 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 193 |
else:
|
| 194 |
# Use standard FastAPI app without web interface
|
| 195 |
-
return create_fastapi_app(
|
| 196 |
-
|
|
|
|
|
|
|
| 197 |
|
| 198 |
def create_fastapi_app(
|
| 199 |
-
env: Environment,
|
| 200 |
action_cls: Type[Action],
|
| 201 |
observation_cls: Type[Observation],
|
| 202 |
-
|
|
|
|
|
|
|
| 203 |
"""
|
| 204 |
-
Create a FastAPI application with
|
| 205 |
|
| 206 |
Args:
|
| 207 |
-
env:
|
| 208 |
action_cls: The Action subclass this environment expects
|
| 209 |
observation_cls: The Observation subclass this environment returns
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
|
| 211 |
Returns:
|
| 212 |
-
FastAPI application instance
|
| 213 |
-
|
| 214 |
-
Example:
|
| 215 |
-
>>> from envs.coding_env.server import CodeExecutionEnvironment
|
| 216 |
-
>>> from envs.coding_env.models import CodeAction, CodeObservation
|
| 217 |
-
>>>
|
| 218 |
-
>>> env = CodeExecutionEnvironment()
|
| 219 |
-
>>> app = create_fastapi_app(env, CodeAction, CodeObservation)
|
| 220 |
-
>>>
|
| 221 |
-
>>> # Run with: uvicorn module:app --host 0.0.0.0 --port 8000
|
| 222 |
"""
|
| 223 |
try:
|
| 224 |
from fastapi import FastAPI
|
|
@@ -227,7 +1320,72 @@ def create_fastapi_app(
|
|
| 227 |
"FastAPI is required. Install with: pip install fastapi uvicorn"
|
| 228 |
)
|
| 229 |
|
| 230 |
-
app = FastAPI(
|
| 231 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 232 |
server.register_routes(app)
|
| 233 |
return app
|
|
|
|
| 8 |
HTTP server wrapper for Environment instances.
|
| 9 |
|
| 10 |
This module provides utilities to wrap any Environment subclass and expose it
|
| 11 |
+
over HTTP and WebSocket endpoints that EnvClient can consume.
|
| 12 |
"""
|
| 13 |
|
| 14 |
from __future__ import annotations
|
| 15 |
|
| 16 |
+
import asyncio
|
| 17 |
+
import inspect
|
| 18 |
+
import json
|
| 19 |
import os
|
| 20 |
+
import time
|
| 21 |
+
import uuid
|
| 22 |
+
from concurrent.futures import ThreadPoolExecutor
|
| 23 |
+
from typing import Any, Callable, Dict, Optional, Type
|
| 24 |
+
|
| 25 |
+
from fastapi import (
|
| 26 |
+
Body,
|
| 27 |
+
FastAPI,
|
| 28 |
+
HTTPException,
|
| 29 |
+
Request,
|
| 30 |
+
status,
|
| 31 |
+
WebSocket,
|
| 32 |
+
WebSocketDisconnect,
|
| 33 |
+
)
|
| 34 |
+
from pydantic import ValidationError
|
| 35 |
|
| 36 |
from .interfaces import Environment
|
| 37 |
+
from .mcp_environment import get_server_tools
|
| 38 |
+
from .mcp_types import (
|
| 39 |
+
JsonRpcErrorCode,
|
| 40 |
+
JsonRpcRequest,
|
| 41 |
+
JsonRpcResponse,
|
| 42 |
+
McpMethod,
|
| 43 |
+
WSMCPMessage,
|
| 44 |
+
WSMCPResponse,
|
| 45 |
+
)
|
| 46 |
+
from .route_config import GetEndpointConfig, register_get_endpoints
|
| 47 |
+
from .serialization import deserialize_action, serialize_observation
|
| 48 |
+
from .types import (
|
| 49 |
+
Action,
|
| 50 |
+
ConcurrencyConfig,
|
| 51 |
+
EnvironmentMetadata,
|
| 52 |
+
HealthResponse,
|
| 53 |
+
HealthStatus,
|
| 54 |
+
Observation,
|
| 55 |
+
ResetRequest,
|
| 56 |
+
ResetResponse,
|
| 57 |
+
SchemaResponse,
|
| 58 |
+
ServerCapacityStatus,
|
| 59 |
+
ServerMode,
|
| 60 |
+
SessionInfo,
|
| 61 |
+
State,
|
| 62 |
+
StepRequest,
|
| 63 |
+
StepResponse,
|
| 64 |
+
WSCloseMessage,
|
| 65 |
+
WSErrorCode,
|
| 66 |
+
WSErrorResponse,
|
| 67 |
+
WSObservationResponse,
|
| 68 |
+
WSResetMessage,
|
| 69 |
+
WSStateMessage,
|
| 70 |
+
WSStateResponse,
|
| 71 |
+
WSStepMessage,
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def _make_json_serializable(obj: Any) -> Any:
|
| 76 |
+
"""
|
| 77 |
+
Convert an object to a JSON-serializable form.
|
| 78 |
+
|
| 79 |
+
Handles Pydantic models, dataclasses, and other common types.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
obj: The object to convert
|
| 83 |
+
|
| 84 |
+
Returns:
|
| 85 |
+
A JSON-serializable representation of the object
|
| 86 |
+
"""
|
| 87 |
+
if obj is None:
|
| 88 |
+
return None
|
| 89 |
+
if isinstance(obj, (str, int, float, bool)):
|
| 90 |
+
return obj
|
| 91 |
+
if isinstance(obj, (list, tuple)):
|
| 92 |
+
return [_make_json_serializable(item) for item in obj]
|
| 93 |
+
if isinstance(obj, dict):
|
| 94 |
+
return {k: _make_json_serializable(v) for k, v in obj.items()}
|
| 95 |
+
if hasattr(obj, "model_dump"):
|
| 96 |
+
# Pydantic model
|
| 97 |
+
return obj.model_dump()
|
| 98 |
+
if hasattr(obj, "__dict__"):
|
| 99 |
+
# Object with __dict__
|
| 100 |
+
return {k: _make_json_serializable(v) for k, v in obj.__dict__.items()}
|
| 101 |
+
# Fallback to string representation
|
| 102 |
+
return str(obj)
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
from .exceptions import (
|
| 106 |
+
ConcurrencyConfigurationError,
|
| 107 |
+
EnvironmentFactoryError,
|
| 108 |
+
SessionCapacityError,
|
| 109 |
+
)
|
| 110 |
+
|
| 111 |
|
| 112 |
class HTTPEnvServer:
|
| 113 |
"""
|
| 114 |
HTTP server wrapper for Environment instances.
|
| 115 |
|
| 116 |
This class wraps an Environment and exposes its reset(), step(), and state
|
| 117 |
+
methods as HTTP and WebSocket endpoints compatible with EnvClient.
|
| 118 |
|
| 119 |
The server expects:
|
| 120 |
- Action deserialization: Converts JSON dict to Action subclass
|
|
|
|
| 123 |
Example:
|
| 124 |
>>> from core.env_server import HTTPEnvServer
|
| 125 |
>>> from envs.coding_env.server import CodeExecutionEnvironment
|
| 126 |
+
>>> from envs.coding_env.models import CodeAction, CodeObservation
|
| 127 |
>>>
|
| 128 |
+
>>> # Pass environment class (factory pattern)
|
| 129 |
+
>>> server = HTTPEnvServer(
|
| 130 |
+
... env=CodeExecutionEnvironment,
|
| 131 |
+
... action_cls=CodeAction,
|
| 132 |
+
... observation_cls=CodeObservation,
|
| 133 |
+
... max_concurrent_envs=4,
|
| 134 |
+
... )
|
| 135 |
>>>
|
| 136 |
>>> # Register routes with FastAPI
|
| 137 |
>>> from fastapi import FastAPI
|
|
|
|
| 141 |
|
| 142 |
def __init__(
|
| 143 |
self,
|
| 144 |
+
env: Callable[[], Environment],
|
| 145 |
action_cls: Type[Action],
|
| 146 |
observation_cls: Type[Observation],
|
| 147 |
+
max_concurrent_envs: Optional[int] = None,
|
| 148 |
+
concurrency_config: Optional[ConcurrencyConfig] = None,
|
| 149 |
):
|
| 150 |
"""
|
| 151 |
Initialize HTTP server wrapper.
|
| 152 |
|
| 153 |
Args:
|
| 154 |
+
env: Environment factory (callable) that creates new instances.
|
| 155 |
+
Will be called to create a new environment for each WebSocket session.
|
| 156 |
action_cls: The Action subclass this environment expects
|
| 157 |
observation_cls: The Observation subclass this environment returns
|
| 158 |
+
max_concurrent_envs: Maximum number of concurrent WebSocket sessions.
|
| 159 |
+
Mutually exclusive with concurrency_config.
|
| 160 |
+
concurrency_config: Optional ConcurrencyConfig for advanced concurrency settings.
|
| 161 |
+
Mutually exclusive with max_concurrent_envs.
|
| 162 |
+
|
| 163 |
+
Raises:
|
| 164 |
+
ValueError: If both max_concurrent_envs and concurrency_config are provided.
|
| 165 |
+
ConcurrencyConfigurationError: If max_concurrent_envs > 1 for an
|
| 166 |
+
environment that is not marked as SUPPORTS_CONCURRENT_SESSIONS.
|
| 167 |
"""
|
| 168 |
+
# Validate that env is callable
|
| 169 |
+
if not callable(env):
|
| 170 |
+
raise TypeError(
|
| 171 |
+
f"env must be a callable (class or factory function), got {type(env)}. "
|
| 172 |
+
f"Pass the environment class (e.g., MyEnvironment) not an instance (e.g., MyEnvironment())."
|
| 173 |
+
)
|
| 174 |
+
|
| 175 |
+
self._env_factory: Callable[[], Environment] = env
|
| 176 |
+
|
| 177 |
+
# Handle concurrency configuration
|
| 178 |
+
if max_concurrent_envs is not None and concurrency_config is not None:
|
| 179 |
+
raise ValueError(
|
| 180 |
+
"Cannot specify both 'max_concurrent_envs' and 'concurrency_config'. "
|
| 181 |
+
"Please use only one method to configure concurrency."
|
| 182 |
+
)
|
| 183 |
+
|
| 184 |
+
if concurrency_config is not None:
|
| 185 |
+
self._concurrency_config = concurrency_config
|
| 186 |
+
elif max_concurrent_envs is not None:
|
| 187 |
+
self._concurrency_config = ConcurrencyConfig(
|
| 188 |
+
max_concurrent_envs=max_concurrent_envs,
|
| 189 |
+
session_timeout=None,
|
| 190 |
+
)
|
| 191 |
+
else:
|
| 192 |
+
# Default configuration
|
| 193 |
+
self._concurrency_config = ConcurrencyConfig(
|
| 194 |
+
max_concurrent_envs=1,
|
| 195 |
+
session_timeout=None,
|
| 196 |
+
)
|
| 197 |
+
|
| 198 |
+
self._max_concurrent_envs = self._concurrency_config.max_concurrent_envs
|
| 199 |
+
|
| 200 |
+
# Validate concurrency configuration
|
| 201 |
+
self._validate_concurrency_safety()
|
| 202 |
+
|
| 203 |
self.action_cls = action_cls
|
| 204 |
self.observation_cls = observation_cls
|
| 205 |
|
| 206 |
+
# Session management for WebSocket connections
|
| 207 |
+
self._sessions: Dict[str, Environment] = {}
|
| 208 |
+
self._session_executors: Dict[str, ThreadPoolExecutor] = {}
|
| 209 |
+
self._session_info: Dict[str, SessionInfo] = {}
|
| 210 |
+
self._session_lock = asyncio.Lock()
|
| 211 |
+
|
| 212 |
+
# Create thread pool for running sync code in async context
|
| 213 |
+
# This is needed for environments using sync libraries (e.g., Playwright)
|
| 214 |
+
self._executor = ThreadPoolExecutor(max_workers=32)
|
| 215 |
+
|
| 216 |
+
def _validate_concurrency_safety(self) -> None:
|
| 217 |
"""
|
| 218 |
+
Validate that the environment supports the configured concurrency level.
|
| 219 |
|
| 220 |
+
Raises:
|
| 221 |
+
ConcurrencyConfigurationError: If max_concurrent_envs > 1 for an
|
| 222 |
+
environment that is not marked as SUPPORTS_CONCURRENT_SESSIONS.
|
| 223 |
"""
|
| 224 |
+
if self._max_concurrent_envs <= 1:
|
| 225 |
+
return
|
| 226 |
|
| 227 |
+
if inspect.isclass(self._env_factory):
|
| 228 |
+
env_cls = self._env_factory
|
| 229 |
+
else:
|
| 230 |
+
_temp_env = self._env_factory()
|
| 231 |
+
env_cls = type(_temp_env)
|
| 232 |
+
_temp_env.close()
|
| 233 |
+
del _temp_env
|
| 234 |
|
| 235 |
+
if not getattr(env_cls, "SUPPORTS_CONCURRENT_SESSIONS", False):
|
| 236 |
+
raise ConcurrencyConfigurationError(
|
| 237 |
+
environment_name=env_cls.__name__,
|
| 238 |
+
max_concurrent_envs=self._max_concurrent_envs,
|
| 239 |
+
)
|
|
|
|
| 240 |
|
| 241 |
+
def get_capacity_status(self) -> ServerCapacityStatus:
|
| 242 |
+
"""
|
| 243 |
+
Get the current capacity status of the server.
|
| 244 |
+
|
| 245 |
+
Returns:
|
| 246 |
+
ServerCapacityStatus with current session counts and availability.
|
| 247 |
+
"""
|
| 248 |
+
return ServerCapacityStatus.from_counts(
|
| 249 |
+
active=len(self._sessions),
|
| 250 |
+
max_sessions=self._max_concurrent_envs,
|
| 251 |
+
)
|
| 252 |
+
|
| 253 |
+
async def _run_sync_in_thread_pool(
|
| 254 |
+
self, func: Callable[..., Observation], *args, **kwargs
|
| 255 |
+
) -> Observation:
|
| 256 |
+
"""Run a synchronous function in the thread pool executor."""
|
| 257 |
+
loop = asyncio.get_event_loop()
|
| 258 |
+
return await loop.run_in_executor(self._executor, lambda: func(*args, **kwargs))
|
| 259 |
+
|
| 260 |
+
def _get_valid_kwargs(
|
| 261 |
+
self,
|
| 262 |
+
sig: inspect.Signature,
|
| 263 |
+
kwargs: Dict[str, Any],
|
| 264 |
+
skip_params: Optional[set[str]] = None,
|
| 265 |
+
) -> Dict[str, Any]:
|
| 266 |
+
"""Filter kwargs to only include parameters accepted by the function signature."""
|
| 267 |
+
if skip_params is None:
|
| 268 |
+
skip_params = set()
|
| 269 |
+
|
| 270 |
+
valid_kwargs = {}
|
| 271 |
+
|
| 272 |
+
has_kwargs = any(
|
| 273 |
+
p.kind == inspect.Parameter.VAR_KEYWORD for p in sig.parameters.values()
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
for k, v in kwargs.items():
|
| 277 |
+
if k in sig.parameters or has_kwargs:
|
| 278 |
+
if k not in skip_params:
|
| 279 |
+
valid_kwargs[k] = v
|
| 280 |
+
|
| 281 |
+
return valid_kwargs
|
| 282 |
+
|
| 283 |
+
async def _create_session(self) -> tuple[str, Environment]:
|
| 284 |
+
"""
|
| 285 |
+
Create a new WebSocket session with its own environment instance.
|
| 286 |
+
|
| 287 |
+
Returns:
|
| 288 |
+
Tuple of (session_id, environment)
|
| 289 |
|
| 290 |
+
Raises:
|
| 291 |
+
SessionCapacityError: If max concurrent sessions reached
|
| 292 |
+
EnvironmentFactoryError: If the factory fails to create an environment
|
| 293 |
+
"""
|
| 294 |
+
async with self._session_lock:
|
| 295 |
+
if len(self._sessions) >= self._max_concurrent_envs:
|
| 296 |
+
raise SessionCapacityError(
|
| 297 |
+
active_sessions=len(self._sessions),
|
| 298 |
+
max_sessions=self._max_concurrent_envs,
|
| 299 |
+
)
|
| 300 |
|
| 301 |
+
session_id = str(uuid.uuid4())
|
| 302 |
+
current_time = time.time()
|
| 303 |
|
| 304 |
+
# Create executor and reserve slot so capacity is not exceeded while
|
| 305 |
+
# we create the env outside the lock (avoids blocking other sessions)
|
| 306 |
+
executor = ThreadPoolExecutor(max_workers=1)
|
| 307 |
+
self._session_executors[session_id] = executor
|
| 308 |
+
self._sessions[session_id] = None # placeholder until env is ready
|
| 309 |
|
| 310 |
+
try:
|
| 311 |
+
# Create environment in the executor thread (outside lock)
|
| 312 |
+
loop = asyncio.get_event_loop()
|
| 313 |
+
env = await loop.run_in_executor(executor, self._env_factory)
|
| 314 |
+
except Exception as e:
|
| 315 |
+
async with self._session_lock:
|
| 316 |
+
executor.shutdown(wait=False)
|
| 317 |
+
self._session_executors.pop(session_id, None)
|
| 318 |
+
self._sessions.pop(session_id, None)
|
| 319 |
+
factory_name = getattr(
|
| 320 |
+
self._env_factory, "__name__", str(self._env_factory)
|
| 321 |
+
)
|
| 322 |
+
raise EnvironmentFactoryError(factory_name) from e
|
| 323 |
|
| 324 |
+
async with self._session_lock:
|
| 325 |
+
self._sessions[session_id] = env
|
| 326 |
+
self._session_info[session_id] = SessionInfo(
|
| 327 |
+
session_id=session_id,
|
| 328 |
+
created_at=current_time,
|
| 329 |
+
last_activity_at=current_time,
|
| 330 |
+
step_count=0,
|
| 331 |
+
environment_type=type(env).__name__,
|
| 332 |
+
)
|
| 333 |
|
| 334 |
+
return session_id, env
|
| 335 |
|
| 336 |
+
async def _destroy_session(self, session_id: str) -> None:
|
| 337 |
"""
|
| 338 |
+
Destroy a WebSocket session and cleanup resources.
|
| 339 |
|
| 340 |
Args:
|
| 341 |
+
session_id: The session ID to destroy
|
| 342 |
+
"""
|
| 343 |
+
async with self._session_lock:
|
| 344 |
+
env = self._sessions.pop(session_id, None)
|
| 345 |
+
executor = self._session_executors.pop(session_id, None)
|
| 346 |
+
self._session_info.pop(session_id, None)
|
| 347 |
|
| 348 |
+
# Run close() in the same executor where the env was created
|
| 349 |
+
# This is required for thread-sensitive libraries like Playwright/greenlet
|
| 350 |
+
if env is not None:
|
| 351 |
+
if executor is not None:
|
| 352 |
+
try:
|
| 353 |
+
loop = asyncio.get_event_loop()
|
| 354 |
+
await loop.run_in_executor(executor, env.close)
|
| 355 |
+
except Exception:
|
| 356 |
+
# If executor close fails, try direct close as fallback
|
| 357 |
+
try:
|
| 358 |
+
env.close()
|
| 359 |
+
except Exception:
|
| 360 |
+
pass # Best effort cleanup
|
| 361 |
+
else:
|
| 362 |
+
try:
|
| 363 |
+
env.close()
|
| 364 |
+
except Exception:
|
| 365 |
+
pass # Best effort cleanup
|
| 366 |
+
|
| 367 |
+
# Shutdown executor after close is done
|
| 368 |
+
if executor is not None:
|
| 369 |
+
executor.shutdown(wait=False)
|
| 370 |
+
|
| 371 |
+
def _update_session_activity(
|
| 372 |
+
self, session_id: str, increment_step: bool = False
|
| 373 |
+
) -> None:
|
| 374 |
+
"""
|
| 375 |
+
Update session activity timestamp and optionally increment step count.
|
| 376 |
|
| 377 |
+
Args:
|
| 378 |
+
session_id: The session ID to update
|
| 379 |
+
increment_step: If True, increment the step count
|
| 380 |
"""
|
| 381 |
+
if session_id in self._session_info:
|
| 382 |
+
self._session_info[session_id].last_activity_at = time.time()
|
| 383 |
+
if increment_step:
|
| 384 |
+
self._session_info[session_id].step_count += 1
|
|
|
|
| 385 |
|
| 386 |
+
def get_session_info(self, session_id: str) -> Optional[SessionInfo]:
|
| 387 |
"""
|
| 388 |
+
Get information about a specific session.
|
| 389 |
|
| 390 |
Args:
|
| 391 |
+
session_id: The session ID to query
|
| 392 |
|
| 393 |
Returns:
|
| 394 |
+
SessionInfo if the session exists, None otherwise
|
| 395 |
+
"""
|
| 396 |
+
return self._session_info.get(session_id)
|
| 397 |
+
|
| 398 |
+
async def _run_in_session_executor(
|
| 399 |
+
self, session_id: str, func: Callable[..., Observation], *args, **kwargs
|
| 400 |
+
) -> Observation:
|
| 401 |
+
"""Run a synchronous function in the session's thread pool executor."""
|
| 402 |
+
executor = self._session_executors.get(session_id, self._executor)
|
| 403 |
+
loop = asyncio.get_event_loop()
|
| 404 |
+
return await loop.run_in_executor(executor, lambda: func(*args, **kwargs))
|
| 405 |
+
|
| 406 |
+
@property
|
| 407 |
+
def active_sessions(self) -> int:
|
| 408 |
+
"""Return the number of active WebSocket sessions."""
|
| 409 |
+
return len(self._sessions)
|
| 410 |
+
|
| 411 |
+
@property
|
| 412 |
+
def max_concurrent_envs(self) -> int:
|
| 413 |
+
"""Return the maximum number of concurrent environments."""
|
| 414 |
+
return self._max_concurrent_envs
|
| 415 |
+
|
| 416 |
+
@property
|
| 417 |
+
def is_concurrency_safe(self) -> bool:
|
| 418 |
+
"""Return whether the environment is marked as concurrency safe."""
|
| 419 |
+
import inspect
|
| 420 |
|
| 421 |
+
if inspect.isclass(self._env_factory):
|
| 422 |
+
return getattr(self._env_factory, "SUPPORTS_CONCURRENT_SESSIONS", False)
|
| 423 |
+
else:
|
| 424 |
+
_temp_env = self._env_factory()
|
| 425 |
+
result = getattr(_temp_env, "SUPPORTS_CONCURRENT_SESSIONS", False)
|
| 426 |
+
_temp_env.close()
|
| 427 |
+
del _temp_env
|
| 428 |
+
return result
|
| 429 |
+
|
| 430 |
+
@property
|
| 431 |
+
def concurrency_config(self) -> ConcurrencyConfig:
|
| 432 |
+
"""Return the concurrency configuration."""
|
| 433 |
+
return self._concurrency_config
|
| 434 |
+
|
| 435 |
+
def register_routes(
|
| 436 |
+
self, app: FastAPI, mode: ServerMode | str = ServerMode.SIMULATION
|
| 437 |
+
) -> None:
|
| 438 |
"""
|
| 439 |
+
Register HTTP routes on a FastAPI application.
|
| 440 |
+
|
| 441 |
+
Args:
|
| 442 |
+
app: FastAPI application instance
|
| 443 |
+
mode: Server mode - either SIMULATION or PRODUCTION (or string equivalents).
|
| 444 |
+
In production mode, simulation control endpoints (/reset, /step, /state)
|
| 445 |
+
are NOT registered. Only safe endpoints (/health, /schema, /metadata, /ws)
|
| 446 |
+
are available. Defaults to SIMULATION for backwards compatibility.
|
| 447 |
+
|
| 448 |
+
Raises:
|
| 449 |
+
ValueError: If mode is not a valid ServerMode or string equivalent.
|
| 450 |
+
"""
|
| 451 |
+
# Convert string to ServerMode enum for backwards compatibility
|
| 452 |
+
if isinstance(mode, str):
|
| 453 |
+
try:
|
| 454 |
+
mode = ServerMode(mode.lower())
|
| 455 |
+
except ValueError:
|
| 456 |
+
valid_modes = [m.value for m in ServerMode]
|
| 457 |
+
raise ValueError(
|
| 458 |
+
f"Invalid mode: '{mode}'. Must be one of: {valid_modes}"
|
| 459 |
+
)
|
| 460 |
+
|
| 461 |
+
# Helper function to handle reset endpoint
|
| 462 |
+
async def reset_handler(
|
| 463 |
+
request: ResetRequest = Body(default_factory=ResetRequest),
|
| 464 |
+
) -> ResetResponse:
|
| 465 |
+
"""Reset endpoint - returns initial observation."""
|
| 466 |
+
_env = self._env_factory()
|
| 467 |
+
|
| 468 |
+
try:
|
| 469 |
+
kwargs = request.model_dump(exclude_unset=True)
|
| 470 |
+
|
| 471 |
+
is_async = _env.reset_async.__func__ is not Environment.reset_async
|
| 472 |
+
|
| 473 |
+
if is_async:
|
| 474 |
+
sig = inspect.signature(_env.reset_async)
|
| 475 |
+
else:
|
| 476 |
+
sig = inspect.signature(_env.reset)
|
| 477 |
+
valid_kwargs = self._get_valid_kwargs(sig, kwargs)
|
| 478 |
+
|
| 479 |
+
if is_async:
|
| 480 |
+
observation = await _env.reset_async(**valid_kwargs)
|
| 481 |
+
else:
|
| 482 |
+
observation = await self._run_sync_in_thread_pool(
|
| 483 |
+
_env.reset, **valid_kwargs
|
| 484 |
+
)
|
| 485 |
+
return ResetResponse(**serialize_observation(observation))
|
| 486 |
+
finally:
|
| 487 |
+
_env.close()
|
| 488 |
+
|
| 489 |
+
# Helper function to handle step endpoint
|
| 490 |
+
async def step_handler(request: StepRequest) -> StepResponse:
|
| 491 |
+
"""Step endpoint - executes action and returns observation."""
|
| 492 |
+
action_data = request.action
|
| 493 |
+
|
| 494 |
+
try:
|
| 495 |
+
action = deserialize_action(action_data, self.action_cls)
|
| 496 |
+
except ValidationError as e:
|
| 497 |
+
raise HTTPException(
|
| 498 |
+
status_code=status.HTTP_422_UNPROCESSABLE_CONTENT, detail=e.errors()
|
| 499 |
+
)
|
| 500 |
+
|
| 501 |
+
_env = self._env_factory()
|
| 502 |
+
|
| 503 |
+
try:
|
| 504 |
+
kwargs = request.model_dump(exclude_unset=True, exclude={"action"})
|
| 505 |
+
|
| 506 |
+
is_async = _env.step_async.__func__ is not Environment.step_async
|
| 507 |
+
|
| 508 |
+
if is_async:
|
| 509 |
+
sig = inspect.signature(_env.step_async)
|
| 510 |
+
else:
|
| 511 |
+
sig = inspect.signature(_env.step)
|
| 512 |
+
valid_kwargs = self._get_valid_kwargs(
|
| 513 |
+
sig, kwargs, skip_params={"action"}
|
| 514 |
+
)
|
| 515 |
+
|
| 516 |
+
if is_async:
|
| 517 |
+
observation = await _env.step_async(action, **valid_kwargs)
|
| 518 |
+
else:
|
| 519 |
+
observation = await self._run_sync_in_thread_pool(
|
| 520 |
+
_env.step, action, **valid_kwargs
|
| 521 |
+
)
|
| 522 |
+
|
| 523 |
+
return StepResponse(**serialize_observation(observation))
|
| 524 |
+
finally:
|
| 525 |
+
_env.close()
|
| 526 |
+
|
| 527 |
+
# Helper function to handle MCP endpoint
|
| 528 |
+
async def mcp_handler(
|
| 529 |
+
request: JsonRpcRequest, session_env: Optional[Environment] = None
|
| 530 |
+
) -> JsonRpcResponse:
|
| 531 |
+
"""
|
| 532 |
+
Handle MCP JSON-RPC requests.
|
| 533 |
+
|
| 534 |
+
Supports tools/list and tools/call methods in JSON-RPC 2.0 format.
|
| 535 |
+
"""
|
| 536 |
+
method = request.method
|
| 537 |
+
request_id = request.id
|
| 538 |
+
|
| 539 |
+
# Use provided session environment or create temporary one
|
| 540 |
+
if session_env is not None:
|
| 541 |
+
_env = session_env
|
| 542 |
+
should_close = False
|
| 543 |
+
else:
|
| 544 |
+
_env = self._env_factory()
|
| 545 |
+
should_close = True
|
| 546 |
+
try:
|
| 547 |
+
if method == McpMethod.TOOLS_LIST:
|
| 548 |
+
# Check if environment is MCP-enabled
|
| 549 |
+
if not hasattr(_env, "mcp_client"):
|
| 550 |
+
return JsonRpcResponse.error_response(
|
| 551 |
+
JsonRpcErrorCode.INTERNAL_ERROR,
|
| 552 |
+
"Environment does not support MCP",
|
| 553 |
+
request_id=request_id,
|
| 554 |
+
)
|
| 555 |
+
|
| 556 |
+
# Use async context manager for MCP client
|
| 557 |
+
async with _env.mcp_client:
|
| 558 |
+
tools = await _env.mcp_client.list_tools()
|
| 559 |
+
|
| 560 |
+
return JsonRpcResponse.success(
|
| 561 |
+
result={
|
| 562 |
+
"tools": [
|
| 563 |
+
t.model_dump() if hasattr(t, "model_dump") else dict(t)
|
| 564 |
+
for t in tools
|
| 565 |
+
]
|
| 566 |
+
},
|
| 567 |
+
request_id=request_id,
|
| 568 |
+
)
|
| 569 |
+
|
| 570 |
+
elif method == McpMethod.TOOLS_CALL:
|
| 571 |
+
params = request.params
|
| 572 |
+
tool_name = params.get("name")
|
| 573 |
+
arguments = params.get("arguments", {})
|
| 574 |
+
|
| 575 |
+
if not hasattr(_env, "mcp_client"):
|
| 576 |
+
return JsonRpcResponse.error_response(
|
| 577 |
+
JsonRpcErrorCode.INTERNAL_ERROR,
|
| 578 |
+
"Environment does not support MCP",
|
| 579 |
+
request_id=request_id,
|
| 580 |
+
)
|
| 581 |
+
|
| 582 |
+
if not tool_name:
|
| 583 |
+
return JsonRpcResponse.error_response(
|
| 584 |
+
JsonRpcErrorCode.INVALID_REQUEST,
|
| 585 |
+
"Missing 'name' in params",
|
| 586 |
+
request_id=request_id,
|
| 587 |
+
)
|
| 588 |
+
|
| 589 |
+
# Use async context manager for MCP client
|
| 590 |
+
async with _env.mcp_client:
|
| 591 |
+
result = await _env.mcp_client.call_tool(
|
| 592 |
+
name=tool_name, arguments=arguments
|
| 593 |
+
)
|
| 594 |
+
|
| 595 |
+
# Ensure result is JSON serializable
|
| 596 |
+
serializable_result = _make_json_serializable(result)
|
| 597 |
|
| 598 |
+
return JsonRpcResponse.success(
|
| 599 |
+
result=serializable_result,
|
| 600 |
+
request_id=request_id,
|
| 601 |
+
)
|
| 602 |
+
|
| 603 |
+
else:
|
| 604 |
+
return JsonRpcResponse.error_response(
|
| 605 |
+
JsonRpcErrorCode.METHOD_NOT_FOUND,
|
| 606 |
+
f"Method not found: {method}",
|
| 607 |
+
request_id=request_id,
|
| 608 |
+
)
|
| 609 |
+
|
| 610 |
+
except Exception as e:
|
| 611 |
+
return JsonRpcResponse.error_response(
|
| 612 |
+
JsonRpcErrorCode.INTERNAL_ERROR,
|
| 613 |
+
str(e),
|
| 614 |
+
request_id=request_id,
|
| 615 |
+
)
|
| 616 |
+
finally:
|
| 617 |
+
if should_close:
|
| 618 |
+
_env.close()
|
| 619 |
+
|
| 620 |
+
# Register MCP WebSocket endpoint (available in both production and simulation modes)
|
| 621 |
+
@app.websocket("/mcp")
|
| 622 |
+
async def mcp_websocket_endpoint(websocket: WebSocket):
|
| 623 |
+
"""
|
| 624 |
+
WebSocket endpoint for MCP JSON-RPC requests.
|
| 625 |
+
|
| 626 |
+
Each WebSocket connection gets its own environment instance for MCP operations.
|
| 627 |
+
|
| 628 |
+
Message Protocol:
|
| 629 |
+
- Client sends: JSON-RPC 2.0 request (tools/list, tools/call)
|
| 630 |
+
- Server responds: JSON-RPC 2.0 response (result or error)
|
| 631 |
+
"""
|
| 632 |
+
await websocket.accept()
|
| 633 |
+
|
| 634 |
+
session_id = None
|
| 635 |
+
session_env = None
|
| 636 |
+
|
| 637 |
+
try:
|
| 638 |
+
# Create session with dedicated environment
|
| 639 |
+
session_id, session_env = await self._create_session()
|
| 640 |
+
|
| 641 |
+
while True:
|
| 642 |
+
# Receive message from client
|
| 643 |
+
raw_message = await websocket.receive_text()
|
| 644 |
+
|
| 645 |
+
try:
|
| 646 |
+
jsonrpc_dict = json.loads(raw_message)
|
| 647 |
+
jsonrpc_request = JsonRpcRequest(**jsonrpc_dict)
|
| 648 |
+
except json.JSONDecodeError as e:
|
| 649 |
+
error_resp = JsonRpcResponse.error_response(
|
| 650 |
+
JsonRpcErrorCode.PARSE_ERROR,
|
| 651 |
+
f"Parse error: {e}",
|
| 652 |
+
)
|
| 653 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 654 |
+
continue
|
| 655 |
+
except ValidationError as e:
|
| 656 |
+
error_resp = JsonRpcResponse.error_response(
|
| 657 |
+
JsonRpcErrorCode.INVALID_REQUEST,
|
| 658 |
+
f"Invalid request: {e}",
|
| 659 |
+
)
|
| 660 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 661 |
+
continue
|
| 662 |
+
|
| 663 |
+
try:
|
| 664 |
+
# Call mcp_handler with session environment
|
| 665 |
+
response = await mcp_handler(
|
| 666 |
+
jsonrpc_request, session_env=session_env
|
| 667 |
+
)
|
| 668 |
+
await websocket.send_text(response.model_dump_json())
|
| 669 |
+
except Exception as e:
|
| 670 |
+
error_resp = JsonRpcResponse.error_response(
|
| 671 |
+
JsonRpcErrorCode.INTERNAL_ERROR,
|
| 672 |
+
str(e),
|
| 673 |
+
request_id=jsonrpc_request.id,
|
| 674 |
+
)
|
| 675 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 676 |
+
|
| 677 |
+
except WebSocketDisconnect:
|
| 678 |
+
pass
|
| 679 |
+
except SessionCapacityError as e:
|
| 680 |
+
error_resp = JsonRpcResponse.error_response(
|
| 681 |
+
JsonRpcErrorCode.SERVER_ERROR,
|
| 682 |
+
str(e),
|
| 683 |
+
data={
|
| 684 |
+
"active_sessions": e.active_sessions,
|
| 685 |
+
"max_sessions": e.max_sessions,
|
| 686 |
+
},
|
| 687 |
+
)
|
| 688 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 689 |
+
except EnvironmentFactoryError as e:
|
| 690 |
+
error_resp = JsonRpcResponse.error_response(
|
| 691 |
+
JsonRpcErrorCode.SERVER_ERROR,
|
| 692 |
+
str(e),
|
| 693 |
+
data={"factory_name": e.factory_name},
|
| 694 |
+
)
|
| 695 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 696 |
+
except Exception as e:
|
| 697 |
+
error_resp = JsonRpcResponse.error_response(
|
| 698 |
+
JsonRpcErrorCode.SERVER_ERROR,
|
| 699 |
+
str(e),
|
| 700 |
+
)
|
| 701 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 702 |
+
finally:
|
| 703 |
+
if session_id:
|
| 704 |
+
await self._destroy_session(session_id)
|
| 705 |
+
try:
|
| 706 |
+
await websocket.close()
|
| 707 |
+
except RuntimeError:
|
| 708 |
+
pass
|
| 709 |
+
|
| 710 |
+
# Register simulation control routes only in simulation mode
|
| 711 |
+
if mode == ServerMode.SIMULATION:
|
| 712 |
+
|
| 713 |
+
@app.post(
|
| 714 |
+
"/reset",
|
| 715 |
+
response_model=ResetResponse,
|
| 716 |
+
tags=["Environment Control"],
|
| 717 |
+
summary="Reset the environment",
|
| 718 |
+
description="""
|
| 719 |
+
Reset the environment to its initial state and return the first observation.
|
| 720 |
+
|
| 721 |
+
You can optionally provide a seed for reproducibility and an episode_id for tracking.
|
| 722 |
+
""",
|
| 723 |
+
responses={
|
| 724 |
+
200: {
|
| 725 |
+
"description": "Environment reset successfully",
|
| 726 |
+
"content": {
|
| 727 |
+
"application/json": {
|
| 728 |
+
"example": {
|
| 729 |
+
"observation": {"status": "ready", "data": {}},
|
| 730 |
+
"reward": None,
|
| 731 |
+
"done": False,
|
| 732 |
+
}
|
| 733 |
+
}
|
| 734 |
+
},
|
| 735 |
+
}
|
| 736 |
+
},
|
| 737 |
+
)
|
| 738 |
+
async def reset(
|
| 739 |
+
request: ResetRequest = Body(default_factory=ResetRequest),
|
| 740 |
+
) -> ResetResponse:
|
| 741 |
+
return await reset_handler(request)
|
| 742 |
+
|
| 743 |
+
@app.post(
|
| 744 |
+
"/step",
|
| 745 |
+
response_model=StepResponse,
|
| 746 |
+
tags=["Environment Control"],
|
| 747 |
+
summary="Execute an action in the environment",
|
| 748 |
+
description="""
|
| 749 |
+
Execute an action in the environment and receive the resulting observation.
|
| 750 |
+
|
| 751 |
+
The action must conform to the environment's action schema, which can be
|
| 752 |
+
retrieved from the `/schema` endpoint. If the action is invalid,
|
| 753 |
+
the endpoint will return HTTP 422 with detailed validation errors.
|
| 754 |
+
|
| 755 |
+
The response includes:
|
| 756 |
+
- **observation**: The environment's response to the action
|
| 757 |
+
- **reward**: Optional reward signal (float or None)
|
| 758 |
+
- **done**: Boolean indicating if the episode has terminated
|
| 759 |
+
""",
|
| 760 |
+
responses={
|
| 761 |
+
200: {
|
| 762 |
+
"description": "Action executed successfully",
|
| 763 |
+
"content": {
|
| 764 |
+
"application/json": {
|
| 765 |
+
"example": {
|
| 766 |
+
"observation": {"status": "success", "data": {}},
|
| 767 |
+
"reward": 1.0,
|
| 768 |
+
"done": False,
|
| 769 |
+
}
|
| 770 |
+
}
|
| 771 |
+
},
|
| 772 |
+
},
|
| 773 |
+
422: {
|
| 774 |
+
"description": "Validation error - invalid action format or values",
|
| 775 |
+
"content": {
|
| 776 |
+
"application/json": {
|
| 777 |
+
"example": {
|
| 778 |
+
"detail": [
|
| 779 |
+
{
|
| 780 |
+
"type": "string_too_short",
|
| 781 |
+
"loc": ["body", "action", "message"],
|
| 782 |
+
"msg": "String should have at least 1 character",
|
| 783 |
+
"input": "",
|
| 784 |
+
}
|
| 785 |
+
]
|
| 786 |
+
}
|
| 787 |
+
}
|
| 788 |
+
},
|
| 789 |
+
},
|
| 790 |
+
500: {
|
| 791 |
+
"description": "Internal server error during action execution"
|
| 792 |
+
},
|
| 793 |
+
},
|
| 794 |
+
)
|
| 795 |
+
async def step(request: StepRequest) -> StepResponse:
|
| 796 |
+
return await step_handler(request)
|
| 797 |
+
|
| 798 |
+
def get_state_handler() -> State:
|
| 799 |
+
_env = self._env_factory()
|
| 800 |
+
try:
|
| 801 |
+
return _env.state
|
| 802 |
+
finally:
|
| 803 |
+
_env.close()
|
| 804 |
+
|
| 805 |
+
def get_metadata_handler() -> EnvironmentMetadata:
|
| 806 |
+
_env = self._env_factory()
|
| 807 |
+
try:
|
| 808 |
+
return _env.get_metadata()
|
| 809 |
+
finally:
|
| 810 |
+
_env.close()
|
| 811 |
+
|
| 812 |
+
# Build list of GET endpoints based on mode
|
| 813 |
+
get_endpoints = [
|
| 814 |
+
GetEndpointConfig(
|
| 815 |
+
path="/metadata",
|
| 816 |
+
handler=get_metadata_handler,
|
| 817 |
+
response_model=EnvironmentMetadata,
|
| 818 |
+
tag="Environment Info",
|
| 819 |
+
summary="Get environment metadata",
|
| 820 |
+
description="""
|
| 821 |
+
Get metadata about this environment.
|
| 822 |
+
|
| 823 |
+
Returns information about the environment including name, description,
|
| 824 |
+
version, author, and documentation links.
|
| 825 |
+
""",
|
| 826 |
+
),
|
| 827 |
+
GetEndpointConfig(
|
| 828 |
+
path="/health",
|
| 829 |
+
handler=lambda: HealthResponse(status=HealthStatus.HEALTHY),
|
| 830 |
+
response_model=HealthResponse,
|
| 831 |
+
tag="Health",
|
| 832 |
+
summary="Health check",
|
| 833 |
+
description="Check if the environment server is running and healthy.",
|
| 834 |
+
),
|
| 835 |
+
]
|
| 836 |
+
|
| 837 |
+
# Only register /state endpoint in simulation mode
|
| 838 |
+
if mode == ServerMode.SIMULATION:
|
| 839 |
+
get_endpoints.insert(
|
| 840 |
+
0,
|
| 841 |
+
GetEndpointConfig(
|
| 842 |
+
path="/state",
|
| 843 |
+
handler=get_state_handler,
|
| 844 |
+
response_model=State,
|
| 845 |
+
tag="State Management",
|
| 846 |
+
summary="Get current environment state",
|
| 847 |
+
description="""
|
| 848 |
+
Retrieve the current internal state of the environment.
|
| 849 |
+
|
| 850 |
+
The structure of the state object is defined by the environment's State model.
|
| 851 |
+
""",
|
| 852 |
+
),
|
| 853 |
+
)
|
| 854 |
+
|
| 855 |
+
register_get_endpoints(app, get_endpoints)
|
| 856 |
+
|
| 857 |
+
# Register combined schema endpoint
|
| 858 |
+
@app.get(
|
| 859 |
+
"/schema",
|
| 860 |
+
response_model=SchemaResponse,
|
| 861 |
+
tags=["Schema"],
|
| 862 |
+
summary="Get all JSON schemas",
|
| 863 |
+
description="""
|
| 864 |
+
Get JSON schemas for actions, observations, and state in a single response.
|
| 865 |
+
|
| 866 |
+
Returns a combined schema object containing:
|
| 867 |
+
- **action**: JSON schema for actions accepted by this environment
|
| 868 |
+
- **observation**: JSON schema for observations returned by this environment
|
| 869 |
+
- **state**: JSON schema for environment state objects
|
| 870 |
+
|
| 871 |
+
This is more efficient than calling individual schema endpoints and provides
|
| 872 |
+
all schema information needed to interact with the environment.
|
| 873 |
+
""",
|
| 874 |
+
responses={
|
| 875 |
+
200: {
|
| 876 |
+
"description": "Combined schemas retrieved successfully",
|
| 877 |
+
"content": {
|
| 878 |
+
"application/json": {
|
| 879 |
+
"example": {
|
| 880 |
+
"action": {
|
| 881 |
+
"type": "object",
|
| 882 |
+
"properties": {"message": {"type": "string"}},
|
| 883 |
+
},
|
| 884 |
+
"observation": {
|
| 885 |
+
"type": "object",
|
| 886 |
+
"properties": {"response": {"type": "string"}},
|
| 887 |
+
},
|
| 888 |
+
"state": {
|
| 889 |
+
"type": "object",
|
| 890 |
+
"properties": {"step_count": {"type": "integer"}},
|
| 891 |
+
},
|
| 892 |
+
}
|
| 893 |
+
}
|
| 894 |
+
},
|
| 895 |
+
}
|
| 896 |
+
},
|
| 897 |
+
)
|
| 898 |
+
async def get_schemas() -> SchemaResponse:
|
| 899 |
+
"""Return all schemas in one response."""
|
| 900 |
+
return SchemaResponse(
|
| 901 |
+
action=self.action_cls.model_json_schema(),
|
| 902 |
+
observation=self.observation_cls.model_json_schema(),
|
| 903 |
+
state=State.model_json_schema(),
|
| 904 |
+
)
|
| 905 |
+
|
| 906 |
+
# Register MCP endpoint for production mode (direct MCP access)
|
| 907 |
+
@app.post("/mcp")
|
| 908 |
+
async def mcp_endpoint(request_raw: Request) -> Dict[str, Any]:
|
| 909 |
+
"""
|
| 910 |
+
MCP JSON-RPC endpoint for production mode.
|
| 911 |
+
|
| 912 |
+
Bypasses step() overhead and provides direct access to MCP tools.
|
| 913 |
+
Supports tools/list and tools/call methods.
|
| 914 |
+
"""
|
| 915 |
+
# Parse JSON manually to handle parse errors gracefully
|
| 916 |
+
try:
|
| 917 |
+
body = await request_raw.body()
|
| 918 |
+
request_dict = json.loads(body)
|
| 919 |
+
request = JsonRpcRequest(**request_dict)
|
| 920 |
+
except json.JSONDecodeError:
|
| 921 |
+
return JsonRpcResponse.error_response(
|
| 922 |
+
JsonRpcErrorCode.PARSE_ERROR
|
| 923 |
+
).model_dump()
|
| 924 |
+
except ValidationError as e:
|
| 925 |
+
return JsonRpcResponse.error_response(
|
| 926 |
+
JsonRpcErrorCode.INVALID_REQUEST,
|
| 927 |
+
f"Invalid request: {e}",
|
| 928 |
+
).model_dump()
|
| 929 |
+
except Exception:
|
| 930 |
+
return JsonRpcResponse.error_response(
|
| 931 |
+
JsonRpcErrorCode.PARSE_ERROR
|
| 932 |
+
).model_dump()
|
| 933 |
+
|
| 934 |
+
method = request.method
|
| 935 |
+
params = request.params
|
| 936 |
+
request_id = request.id
|
| 937 |
+
|
| 938 |
+
# Create a temporary environment for MCP access
|
| 939 |
+
_env = self._env_factory()
|
| 940 |
+
|
| 941 |
+
try:
|
| 942 |
+
# Check if environment supports MCP
|
| 943 |
+
if not hasattr(_env, "mcp_client") and not hasattr(_env, "mcp_server"):
|
| 944 |
+
return JsonRpcResponse.error_response(
|
| 945 |
+
JsonRpcErrorCode.INTERNAL_ERROR,
|
| 946 |
+
"Environment does not support MCP",
|
| 947 |
+
request_id=request_id,
|
| 948 |
+
).model_dump()
|
| 949 |
+
|
| 950 |
+
if method == McpMethod.TOOLS_LIST:
|
| 951 |
+
# List tools from MCP server
|
| 952 |
+
if hasattr(_env, "mcp_client") and _env.mcp_client:
|
| 953 |
+
async with _env.mcp_client:
|
| 954 |
+
tools = await _env.mcp_client.list_tools()
|
| 955 |
+
return JsonRpcResponse.success(
|
| 956 |
+
result={
|
| 957 |
+
"tools": [
|
| 958 |
+
t.model_dump()
|
| 959 |
+
if hasattr(t, "model_dump")
|
| 960 |
+
else dict(t)
|
| 961 |
+
for t in tools
|
| 962 |
+
]
|
| 963 |
+
},
|
| 964 |
+
request_id=request_id,
|
| 965 |
+
).model_dump()
|
| 966 |
+
elif hasattr(_env, "mcp_server") and _env.mcp_server:
|
| 967 |
+
# Use server directly
|
| 968 |
+
tools = []
|
| 969 |
+
for tool_name, tool in get_server_tools(
|
| 970 |
+
_env.mcp_server
|
| 971 |
+
).items():
|
| 972 |
+
tool_dict = {
|
| 973 |
+
"name": tool.name,
|
| 974 |
+
"description": tool.description or "",
|
| 975 |
+
"inputSchema": tool.parameters or {},
|
| 976 |
+
}
|
| 977 |
+
tools.append(tool_dict)
|
| 978 |
+
return JsonRpcResponse.success(
|
| 979 |
+
result={"tools": tools},
|
| 980 |
+
request_id=request_id,
|
| 981 |
+
).model_dump()
|
| 982 |
+
else:
|
| 983 |
+
return JsonRpcResponse.error_response(
|
| 984 |
+
JsonRpcErrorCode.INTERNAL_ERROR,
|
| 985 |
+
"MCP server not available",
|
| 986 |
+
request_id=request_id,
|
| 987 |
+
).model_dump()
|
| 988 |
+
|
| 989 |
+
elif method == McpMethod.TOOLS_CALL:
|
| 990 |
+
tool_name = params.get("name")
|
| 991 |
+
arguments = params.get("arguments", {})
|
| 992 |
+
|
| 993 |
+
if not tool_name:
|
| 994 |
+
return JsonRpcResponse.error_response(
|
| 995 |
+
JsonRpcErrorCode.INVALID_PARAMS,
|
| 996 |
+
"Invalid params - 'name' is required",
|
| 997 |
+
request_id=request_id,
|
| 998 |
+
).model_dump()
|
| 999 |
+
|
| 1000 |
+
# Call tool via MCP
|
| 1001 |
+
if hasattr(_env, "mcp_client") and _env.mcp_client:
|
| 1002 |
+
async with _env.mcp_client:
|
| 1003 |
+
result = await _env.mcp_client.call_tool(
|
| 1004 |
+
name=tool_name, arguments=arguments
|
| 1005 |
+
)
|
| 1006 |
+
elif hasattr(_env, "mcp_server") and _env.mcp_server:
|
| 1007 |
+
# Call tool directly on FastMCP server
|
| 1008 |
+
server_tools = get_server_tools(_env.mcp_server)
|
| 1009 |
+
if tool_name in server_tools:
|
| 1010 |
+
tool = server_tools[tool_name]
|
| 1011 |
+
result = tool.fn(**arguments)
|
| 1012 |
+
else:
|
| 1013 |
+
return JsonRpcResponse.error_response(
|
| 1014 |
+
JsonRpcErrorCode.INVALID_PARAMS,
|
| 1015 |
+
f"Tool not found: {tool_name}",
|
| 1016 |
+
request_id=request_id,
|
| 1017 |
+
).model_dump()
|
| 1018 |
+
else:
|
| 1019 |
+
return JsonRpcResponse.error_response(
|
| 1020 |
+
JsonRpcErrorCode.INTERNAL_ERROR,
|
| 1021 |
+
"MCP server not available",
|
| 1022 |
+
request_id=request_id,
|
| 1023 |
+
).model_dump()
|
| 1024 |
+
|
| 1025 |
+
# Make result JSON serializable
|
| 1026 |
+
serializable_result = _make_json_serializable(result)
|
| 1027 |
+
|
| 1028 |
+
return JsonRpcResponse.success(
|
| 1029 |
+
result=serializable_result,
|
| 1030 |
+
request_id=request_id,
|
| 1031 |
+
).model_dump()
|
| 1032 |
+
|
| 1033 |
+
else:
|
| 1034 |
+
return JsonRpcResponse.error_response(
|
| 1035 |
+
JsonRpcErrorCode.METHOD_NOT_FOUND,
|
| 1036 |
+
f"Method not found: {method}",
|
| 1037 |
+
request_id=request_id,
|
| 1038 |
+
).model_dump()
|
| 1039 |
+
|
| 1040 |
+
except Exception as e:
|
| 1041 |
+
return JsonRpcResponse.error_response(
|
| 1042 |
+
JsonRpcErrorCode.INTERNAL_ERROR,
|
| 1043 |
+
str(e),
|
| 1044 |
+
request_id=request_id,
|
| 1045 |
+
).model_dump()
|
| 1046 |
+
finally:
|
| 1047 |
+
_env.close()
|
| 1048 |
+
|
| 1049 |
+
# Register WebSocket endpoint for persistent sessions
|
| 1050 |
+
@app.websocket("/ws")
|
| 1051 |
+
async def websocket_endpoint(websocket: WebSocket):
|
| 1052 |
+
"""
|
| 1053 |
+
WebSocket endpoint for persistent environment sessions.
|
| 1054 |
+
|
| 1055 |
+
Each WebSocket connection gets its own environment instance.
|
| 1056 |
+
|
| 1057 |
+
Message Protocol:
|
| 1058 |
+
- Client sends: WSResetMessage | WSStepMessage | WSStateMessage | WSCloseMessage
|
| 1059 |
+
- Server responds: WSObservationResponse | WSStateResponse | WSErrorResponse
|
| 1060 |
+
"""
|
| 1061 |
+
await websocket.accept()
|
| 1062 |
+
|
| 1063 |
+
session_id = None
|
| 1064 |
+
session_env = None
|
| 1065 |
+
|
| 1066 |
+
try:
|
| 1067 |
+
# Create session with dedicated environment
|
| 1068 |
+
session_id, session_env = await self._create_session()
|
| 1069 |
+
|
| 1070 |
+
while True:
|
| 1071 |
+
# Receive message from client
|
| 1072 |
+
raw_message = await websocket.receive_text()
|
| 1073 |
+
|
| 1074 |
+
try:
|
| 1075 |
+
message_dict = json.loads(raw_message)
|
| 1076 |
+
except json.JSONDecodeError as e:
|
| 1077 |
+
error_resp = WSErrorResponse(
|
| 1078 |
+
data={
|
| 1079 |
+
"message": f"Invalid JSON: {e}",
|
| 1080 |
+
"code": WSErrorCode.INVALID_JSON,
|
| 1081 |
+
}
|
| 1082 |
+
)
|
| 1083 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 1084 |
+
continue
|
| 1085 |
+
|
| 1086 |
+
msg_type = message_dict.get("type", "")
|
| 1087 |
+
|
| 1088 |
+
try:
|
| 1089 |
+
match msg_type:
|
| 1090 |
+
case "reset":
|
| 1091 |
+
msg = WSResetMessage(**message_dict)
|
| 1092 |
+
|
| 1093 |
+
is_async = (
|
| 1094 |
+
session_env.reset_async.__func__
|
| 1095 |
+
is not Environment.reset_async
|
| 1096 |
+
)
|
| 1097 |
+
|
| 1098 |
+
if is_async:
|
| 1099 |
+
sig = inspect.signature(session_env.reset_async)
|
| 1100 |
+
valid_kwargs = self._get_valid_kwargs(sig, msg.data)
|
| 1101 |
+
observation = await session_env.reset_async(
|
| 1102 |
+
**valid_kwargs
|
| 1103 |
+
)
|
| 1104 |
+
else:
|
| 1105 |
+
sig = inspect.signature(session_env.reset)
|
| 1106 |
+
valid_kwargs = self._get_valid_kwargs(sig, msg.data)
|
| 1107 |
+
observation = await self._run_in_session_executor(
|
| 1108 |
+
session_id, session_env.reset, **valid_kwargs
|
| 1109 |
+
)
|
| 1110 |
+
|
| 1111 |
+
self._update_session_activity(session_id)
|
| 1112 |
+
|
| 1113 |
+
response = WSObservationResponse(
|
| 1114 |
+
data=serialize_observation(observation),
|
| 1115 |
+
)
|
| 1116 |
+
|
| 1117 |
+
case "step":
|
| 1118 |
+
msg = WSStepMessage(**message_dict)
|
| 1119 |
+
action = deserialize_action(msg.data, self.action_cls)
|
| 1120 |
+
|
| 1121 |
+
is_async = (
|
| 1122 |
+
session_env.step_async.__func__
|
| 1123 |
+
is not Environment.step_async
|
| 1124 |
+
)
|
| 1125 |
+
|
| 1126 |
+
if is_async:
|
| 1127 |
+
observation = await session_env.step_async(action)
|
| 1128 |
+
else:
|
| 1129 |
+
observation = await self._run_in_session_executor(
|
| 1130 |
+
session_id, session_env.step, action
|
| 1131 |
+
)
|
| 1132 |
+
|
| 1133 |
+
self._update_session_activity(
|
| 1134 |
+
session_id, increment_step=True
|
| 1135 |
+
)
|
| 1136 |
+
|
| 1137 |
+
response = WSObservationResponse(
|
| 1138 |
+
data=serialize_observation(observation)
|
| 1139 |
+
)
|
| 1140 |
+
|
| 1141 |
+
case "state":
|
| 1142 |
+
msg = WSStateMessage(**message_dict)
|
| 1143 |
+
state = session_env.state
|
| 1144 |
+
if hasattr(state, "model_dump"):
|
| 1145 |
+
state_data = state.model_dump()
|
| 1146 |
+
else:
|
| 1147 |
+
state_data = dict(state) if state else {}
|
| 1148 |
+
|
| 1149 |
+
response = WSStateResponse(data=state_data)
|
| 1150 |
+
|
| 1151 |
+
case "close":
|
| 1152 |
+
msg = WSCloseMessage(**message_dict)
|
| 1153 |
+
break
|
| 1154 |
+
|
| 1155 |
+
case "mcp":
|
| 1156 |
+
msg = WSMCPMessage(**message_dict)
|
| 1157 |
+
try:
|
| 1158 |
+
rpc_request = JsonRpcRequest(**msg.data)
|
| 1159 |
+
except (ValidationError, Exception) as e:
|
| 1160 |
+
rpc_response = JsonRpcResponse.error_response(
|
| 1161 |
+
JsonRpcErrorCode.INVALID_REQUEST,
|
| 1162 |
+
f"Invalid request: {e}",
|
| 1163 |
+
)
|
| 1164 |
+
else:
|
| 1165 |
+
rpc_response = await mcp_handler(
|
| 1166 |
+
rpc_request,
|
| 1167 |
+
session_env=session_env,
|
| 1168 |
+
)
|
| 1169 |
+
response = WSMCPResponse(data=rpc_response.model_dump())
|
| 1170 |
+
|
| 1171 |
+
case _:
|
| 1172 |
+
response = WSErrorResponse(
|
| 1173 |
+
data={
|
| 1174 |
+
"message": f"Unknown message type: {msg_type}",
|
| 1175 |
+
"code": WSErrorCode.UNKNOWN_TYPE,
|
| 1176 |
+
}
|
| 1177 |
+
)
|
| 1178 |
+
|
| 1179 |
+
await websocket.send_text(response.model_dump_json())
|
| 1180 |
+
|
| 1181 |
+
except ValidationError as e:
|
| 1182 |
+
error_resp = WSErrorResponse(
|
| 1183 |
+
data={
|
| 1184 |
+
"message": "Invalid message",
|
| 1185 |
+
"code": WSErrorCode.VALIDATION_ERROR,
|
| 1186 |
+
"errors": e.errors(),
|
| 1187 |
+
}
|
| 1188 |
+
)
|
| 1189 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 1190 |
+
except Exception as e:
|
| 1191 |
+
error_resp = WSErrorResponse(
|
| 1192 |
+
data={
|
| 1193 |
+
"message": str(e),
|
| 1194 |
+
"code": WSErrorCode.EXECUTION_ERROR,
|
| 1195 |
+
}
|
| 1196 |
+
)
|
| 1197 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 1198 |
+
|
| 1199 |
+
except WebSocketDisconnect:
|
| 1200 |
+
pass
|
| 1201 |
+
except SessionCapacityError as e:
|
| 1202 |
+
error_resp = WSErrorResponse(
|
| 1203 |
+
data={
|
| 1204 |
+
"message": str(e),
|
| 1205 |
+
"code": WSErrorCode.CAPACITY_REACHED,
|
| 1206 |
+
"active_sessions": e.active_sessions,
|
| 1207 |
+
"max_sessions": e.max_sessions,
|
| 1208 |
+
}
|
| 1209 |
+
)
|
| 1210 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 1211 |
+
except EnvironmentFactoryError as e:
|
| 1212 |
+
error_resp = WSErrorResponse(
|
| 1213 |
+
data={
|
| 1214 |
+
"message": str(e),
|
| 1215 |
+
"code": WSErrorCode.FACTORY_ERROR,
|
| 1216 |
+
"factory_name": e.factory_name,
|
| 1217 |
+
}
|
| 1218 |
+
)
|
| 1219 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 1220 |
+
except Exception as e:
|
| 1221 |
+
error_resp = WSErrorResponse(
|
| 1222 |
+
data={"message": str(e), "code": WSErrorCode.SESSION_ERROR}
|
| 1223 |
+
)
|
| 1224 |
+
await websocket.send_text(error_resp.model_dump_json())
|
| 1225 |
+
finally:
|
| 1226 |
+
if session_id:
|
| 1227 |
+
await self._destroy_session(session_id)
|
| 1228 |
+
try:
|
| 1229 |
+
await websocket.close()
|
| 1230 |
+
except RuntimeError:
|
| 1231 |
+
pass
|
| 1232 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1233 |
|
| 1234 |
def create_app(
|
| 1235 |
+
env: Callable[[], Environment],
|
| 1236 |
action_cls: Type[Action],
|
| 1237 |
observation_cls: Type[Observation],
|
| 1238 |
env_name: Optional[str] = None,
|
| 1239 |
+
max_concurrent_envs: Optional[int] = None,
|
| 1240 |
+
concurrency_config: Optional[ConcurrencyConfig] = None,
|
| 1241 |
+
gradio_builder: Optional[Callable[..., Any]] = None,
|
| 1242 |
+
) -> FastAPI:
|
| 1243 |
"""
|
| 1244 |
Create a FastAPI application with or without web interface.
|
| 1245 |
+
|
| 1246 |
This function creates a FastAPI app with the web interface enabled by default,
|
| 1247 |
including README integration for better user experience.
|
| 1248 |
+
|
| 1249 |
Args:
|
| 1250 |
+
env: Environment factory (callable) that creates new instances
|
| 1251 |
action_cls: The Action subclass this environment expects
|
| 1252 |
observation_cls: The Observation subclass this environment returns
|
| 1253 |
env_name: Optional environment name for README loading
|
| 1254 |
+
max_concurrent_envs: Maximum concurrent WebSocket sessions.
|
| 1255 |
+
Mutually exclusive with concurrency_config.
|
| 1256 |
+
concurrency_config: Optional ConcurrencyConfig for advanced concurrency settings.
|
| 1257 |
+
Mutually exclusive with max_concurrent_envs.
|
| 1258 |
+
gradio_builder: Optional callable to build a custom Gradio UI at /web.
|
| 1259 |
+
Signature: (web_manager, action_fields, metadata, is_chat_env, title,
|
| 1260 |
+
quick_start_md) -> gr.Blocks. When None, the default Gradio app is used.
|
| 1261 |
+
See docs/customizing-web-ui.md.
|
| 1262 |
+
|
| 1263 |
Returns:
|
| 1264 |
FastAPI application instance with or without web interface and README integration
|
| 1265 |
"""
|
| 1266 |
# Check if web interface should be enabled
|
| 1267 |
# This can be controlled via environment variable or build argument
|
| 1268 |
+
enable_web = os.getenv("ENABLE_WEB_INTERFACE", "false").lower() in (
|
| 1269 |
+
"true",
|
| 1270 |
+
"1",
|
| 1271 |
+
"yes",
|
| 1272 |
)
|
| 1273 |
|
| 1274 |
if enable_web:
|
| 1275 |
+
# Gradio-based web UI (gradio is a core dependency)
|
| 1276 |
from .web_interface import create_web_interface_app
|
| 1277 |
+
|
| 1278 |
+
return create_web_interface_app(
|
| 1279 |
+
env,
|
| 1280 |
+
action_cls,
|
| 1281 |
+
observation_cls,
|
| 1282 |
+
env_name,
|
| 1283 |
+
max_concurrent_envs,
|
| 1284 |
+
concurrency_config,
|
| 1285 |
+
gradio_builder=gradio_builder,
|
| 1286 |
+
)
|
| 1287 |
else:
|
| 1288 |
# Use standard FastAPI app without web interface
|
| 1289 |
+
return create_fastapi_app(
|
| 1290 |
+
env, action_cls, observation_cls, max_concurrent_envs, concurrency_config
|
| 1291 |
+
)
|
| 1292 |
+
|
| 1293 |
|
| 1294 |
def create_fastapi_app(
|
| 1295 |
+
env: Callable[[], Environment],
|
| 1296 |
action_cls: Type[Action],
|
| 1297 |
observation_cls: Type[Observation],
|
| 1298 |
+
max_concurrent_envs: Optional[int] = None,
|
| 1299 |
+
concurrency_config: Optional[ConcurrencyConfig] = None,
|
| 1300 |
+
) -> FastAPI:
|
| 1301 |
"""
|
| 1302 |
+
Create a FastAPI application with comprehensive documentation.
|
| 1303 |
|
| 1304 |
Args:
|
| 1305 |
+
env: Environment factory (callable) that creates new instances
|
| 1306 |
action_cls: The Action subclass this environment expects
|
| 1307 |
observation_cls: The Observation subclass this environment returns
|
| 1308 |
+
max_concurrent_envs: Maximum concurrent WebSocket sessions.
|
| 1309 |
+
Mutually exclusive with concurrency_config.
|
| 1310 |
+
concurrency_config: Optional ConcurrencyConfig for advanced concurrency settings.
|
| 1311 |
+
Mutually exclusive with max_concurrent_envs.
|
| 1312 |
|
| 1313 |
Returns:
|
| 1314 |
+
FastAPI application instance
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1315 |
"""
|
| 1316 |
try:
|
| 1317 |
from fastapi import FastAPI
|
|
|
|
| 1320 |
"FastAPI is required. Install with: pip install fastapi uvicorn"
|
| 1321 |
)
|
| 1322 |
|
| 1323 |
+
app = FastAPI(
|
| 1324 |
+
title="OpenEnv Environment HTTP API",
|
| 1325 |
+
version="1.0.0",
|
| 1326 |
+
description="""
|
| 1327 |
+
# OpenEnv Environment HTTP API
|
| 1328 |
+
|
| 1329 |
+
HTTP API for interacting with OpenEnv environments through a standardized interface.
|
| 1330 |
+
|
| 1331 |
+
## Features
|
| 1332 |
+
|
| 1333 |
+
* **Environment Reset**: Initialize or restart episodes
|
| 1334 |
+
* **Action Execution**: Send actions and receive observations
|
| 1335 |
+
* **State Inspection**: Query current environment state
|
| 1336 |
+
* **Schema Access**: Retrieve JSON schemas for actions and observations
|
| 1337 |
+
|
| 1338 |
+
## Workflow
|
| 1339 |
+
|
| 1340 |
+
1. Call `/reset` to start a new episode and get initial observation
|
| 1341 |
+
2. Call `/step` repeatedly with actions to interact with environment
|
| 1342 |
+
3. Episode ends when observation returns `done: true`
|
| 1343 |
+
4. Call `/state` anytime to inspect current environment state
|
| 1344 |
+
|
| 1345 |
+
## Documentation
|
| 1346 |
+
|
| 1347 |
+
* **Swagger UI**: Available at `/docs`
|
| 1348 |
+
* **ReDoc**: Available at `/redoc`
|
| 1349 |
+
* **OpenAPI Schema**: Available at `/openapi.json`
|
| 1350 |
+
""",
|
| 1351 |
+
openapi_tags=[
|
| 1352 |
+
{
|
| 1353 |
+
"name": "Environment Control",
|
| 1354 |
+
"description": "Core operations for environment interaction (reset, step)",
|
| 1355 |
+
},
|
| 1356 |
+
{
|
| 1357 |
+
"name": "State Management",
|
| 1358 |
+
"description": "Operations for inspecting environment state",
|
| 1359 |
+
},
|
| 1360 |
+
{
|
| 1361 |
+
"name": "Environment Info",
|
| 1362 |
+
"description": "Information about the environment",
|
| 1363 |
+
},
|
| 1364 |
+
{
|
| 1365 |
+
"name": "Schema",
|
| 1366 |
+
"description": "JSON Schema endpoints for actions, observations, and state",
|
| 1367 |
+
},
|
| 1368 |
+
{"name": "Health", "description": "Service health and status checks"},
|
| 1369 |
+
],
|
| 1370 |
+
docs_url="/docs",
|
| 1371 |
+
redoc_url="/redoc",
|
| 1372 |
+
openapi_url="/openapi.json",
|
| 1373 |
+
contact={
|
| 1374 |
+
"name": "OpenEnv Team",
|
| 1375 |
+
"url": "https://github.com/meta-pytorch/OpenEnv",
|
| 1376 |
+
},
|
| 1377 |
+
license_info={
|
| 1378 |
+
"name": "BSD-3-Clause",
|
| 1379 |
+
"url": "https://github.com/meta-pytorch/OpenEnv/blob/main/LICENSE",
|
| 1380 |
+
},
|
| 1381 |
+
)
|
| 1382 |
+
|
| 1383 |
+
server = HTTPEnvServer(
|
| 1384 |
+
env,
|
| 1385 |
+
action_cls,
|
| 1386 |
+
observation_cls,
|
| 1387 |
+
max_concurrent_envs,
|
| 1388 |
+
concurrency_config=concurrency_config,
|
| 1389 |
+
)
|
| 1390 |
server.register_routes(app)
|
| 1391 |
return app
|
src/core/env_server/interfaces.py
CHANGED
|
@@ -4,10 +4,20 @@
|
|
| 4 |
# This source code is licensed under the BSD-style license found in the
|
| 5 |
# LICENSE file in the root directory of this source tree.
|
| 6 |
|
|
|
|
| 7 |
from abc import ABC, abstractmethod
|
| 8 |
-
from typing import Any, Protocol,
|
| 9 |
|
| 10 |
-
from
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
|
| 13 |
class Message(TypedDict):
|
|
@@ -64,7 +74,7 @@ class ModelTokenizer(Protocol):
|
|
| 64 |
...
|
| 65 |
|
| 66 |
|
| 67 |
-
class Transform(ABC):
|
| 68 |
"""Transform observations to add rewards, metrics, or other modifications.
|
| 69 |
|
| 70 |
Transforms follow the TorchRL pattern where they take an observation
|
|
@@ -73,7 +83,7 @@ class Transform(ABC):
|
|
| 73 |
"""
|
| 74 |
|
| 75 |
@abstractmethod
|
| 76 |
-
def __call__(self, observation:
|
| 77 |
"""Transform an observation.
|
| 78 |
|
| 79 |
Args:
|
|
@@ -85,34 +95,203 @@ class Transform(ABC):
|
|
| 85 |
pass
|
| 86 |
|
| 87 |
|
| 88 |
-
class Environment(ABC):
|
| 89 |
"""Base class for all environment servers following Gym/Gymnasium API.
|
| 90 |
|
| 91 |
Args:
|
| 92 |
transform: Optional transform to apply to observations
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
"""
|
| 94 |
|
| 95 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 96 |
self.transform = transform
|
|
|
|
| 97 |
|
| 98 |
@abstractmethod
|
| 99 |
-
def reset(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
"""Reset the environment and return initial observation."""
|
| 101 |
pass
|
| 102 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 103 |
@abstractmethod
|
| 104 |
-
def step(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
"""Take a step in the environment."""
|
| 106 |
pass
|
| 107 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 108 |
@property
|
| 109 |
@abstractmethod
|
| 110 |
-
def state(self) ->
|
| 111 |
"""Get the current environment state."""
|
| 112 |
pass
|
| 113 |
|
| 114 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
"""Apply transform if one is provided."""
|
| 116 |
if self.transform is not None:
|
| 117 |
return self.transform(observation)
|
| 118 |
return observation
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 4 |
# This source code is licensed under the BSD-style license found in the
|
| 5 |
# LICENSE file in the root directory of this source tree.
|
| 6 |
|
| 7 |
+
import inspect
|
| 8 |
from abc import ABC, abstractmethod
|
| 9 |
+
from typing import Any, Generic, Optional, Protocol, TYPE_CHECKING, TypeVar
|
| 10 |
|
| 11 |
+
from typing_extensions import TypedDict
|
| 12 |
+
|
| 13 |
+
from .types import Action, EnvironmentMetadata, Observation, State
|
| 14 |
+
|
| 15 |
+
if TYPE_CHECKING:
|
| 16 |
+
from openenv.core.rubrics import Rubric
|
| 17 |
+
|
| 18 |
+
ActT = TypeVar("ActT", bound=Action)
|
| 19 |
+
ObsT = TypeVar("ObsT", bound=Observation)
|
| 20 |
+
StateT = TypeVar("StateT", bound=State)
|
| 21 |
|
| 22 |
|
| 23 |
class Message(TypedDict):
|
|
|
|
| 74 |
...
|
| 75 |
|
| 76 |
|
| 77 |
+
class Transform(ABC, Generic[ObsT]):
|
| 78 |
"""Transform observations to add rewards, metrics, or other modifications.
|
| 79 |
|
| 80 |
Transforms follow the TorchRL pattern where they take an observation
|
|
|
|
| 83 |
"""
|
| 84 |
|
| 85 |
@abstractmethod
|
| 86 |
+
def __call__(self, observation: ObsT) -> ObsT:
|
| 87 |
"""Transform an observation.
|
| 88 |
|
| 89 |
Args:
|
|
|
|
| 95 |
pass
|
| 96 |
|
| 97 |
|
| 98 |
+
class Environment(ABC, Generic[ActT, ObsT, StateT]):
|
| 99 |
"""Base class for all environment servers following Gym/Gymnasium API.
|
| 100 |
|
| 101 |
Args:
|
| 102 |
transform: Optional transform to apply to observations
|
| 103 |
+
rubric: Optional rubric for reward computation. When provided, the
|
| 104 |
+
rubric's output can be used to set the observation's reward in step().
|
| 105 |
+
|
| 106 |
+
Class Attributes:
|
| 107 |
+
SUPPORTS_CONCURRENT_SESSIONS: Whether this environment supports concurrent sessions.
|
| 108 |
+
When True, multiple WebSocket connections can each have their own
|
| 109 |
+
environment instance (up to max_concurrent_envs). When False (default),
|
| 110 |
+
the environment should only be used with a single session at a time.
|
| 111 |
+
|
| 112 |
+
Set this to True in your Environment subclass if:
|
| 113 |
+
- The environment uses proper session isolation (e.g., unique working dirs)
|
| 114 |
+
- No shared mutable state exists between instances
|
| 115 |
+
- External resources (databases, APIs) can handle concurrent access
|
| 116 |
+
|
| 117 |
+
Attributes:
|
| 118 |
+
rubric: Optional rubric for computing rewards. Environments can set this
|
| 119 |
+
in __init__ and use it in step() to compute observation rewards.
|
| 120 |
+
Training infrastructure can access it for introspection:
|
| 121 |
+
for name, r in env.rubric.named_rubrics():
|
| 122 |
+
print(f"{name}: {r.last_score}")
|
| 123 |
+
|
| 124 |
+
See RFC 004 for rubric design: rfcs/004-rubrics.md
|
| 125 |
"""
|
| 126 |
|
| 127 |
+
# Class-level flag indicating whether this environment supports concurrent sessions
|
| 128 |
+
SUPPORTS_CONCURRENT_SESSIONS: bool = False
|
| 129 |
+
|
| 130 |
+
# Optional rubric for reward computation
|
| 131 |
+
rubric: Optional["Rubric"]
|
| 132 |
+
|
| 133 |
+
def __init__(
|
| 134 |
+
self,
|
| 135 |
+
transform: Optional[Transform[ObsT]] = None,
|
| 136 |
+
rubric: Optional["Rubric"] = None,
|
| 137 |
+
):
|
| 138 |
self.transform = transform
|
| 139 |
+
self.rubric = rubric
|
| 140 |
|
| 141 |
@abstractmethod
|
| 142 |
+
def reset(
|
| 143 |
+
self,
|
| 144 |
+
seed: Optional[int] = None,
|
| 145 |
+
episode_id: Optional[str] = None,
|
| 146 |
+
**kwargs: Any,
|
| 147 |
+
) -> ObsT:
|
| 148 |
"""Reset the environment and return initial observation."""
|
| 149 |
pass
|
| 150 |
|
| 151 |
+
async def reset_async(
|
| 152 |
+
self,
|
| 153 |
+
seed: Optional[int] = None,
|
| 154 |
+
episode_id: Optional[str] = None,
|
| 155 |
+
**kwargs: Any,
|
| 156 |
+
) -> ObsT:
|
| 157 |
+
"""Async version of reset. Default implementation calls sync reset.
|
| 158 |
+
|
| 159 |
+
Override to provide true async implementation.
|
| 160 |
+
"""
|
| 161 |
+
return self.reset(seed=seed, episode_id=episode_id, **kwargs)
|
| 162 |
+
|
| 163 |
@abstractmethod
|
| 164 |
+
def step(
|
| 165 |
+
self,
|
| 166 |
+
action: ActT,
|
| 167 |
+
timeout_s: Optional[float] = None,
|
| 168 |
+
**kwargs: Any,
|
| 169 |
+
) -> ObsT:
|
| 170 |
"""Take a step in the environment."""
|
| 171 |
pass
|
| 172 |
|
| 173 |
+
async def step_async(
|
| 174 |
+
self,
|
| 175 |
+
action: ActT,
|
| 176 |
+
timeout_s: Optional[float] = None,
|
| 177 |
+
**kwargs: Any,
|
| 178 |
+
) -> ObsT:
|
| 179 |
+
"""Async version of step. Default implementation calls sync step.
|
| 180 |
+
|
| 181 |
+
Override to provide true async implementation.
|
| 182 |
+
"""
|
| 183 |
+
return self.step(action, timeout_s=timeout_s, **kwargs)
|
| 184 |
+
|
| 185 |
@property
|
| 186 |
@abstractmethod
|
| 187 |
+
def state(self) -> StateT:
|
| 188 |
"""Get the current environment state."""
|
| 189 |
pass
|
| 190 |
|
| 191 |
+
def get_metadata(self) -> EnvironmentMetadata:
|
| 192 |
+
"""
|
| 193 |
+
Get metadata about this environment.
|
| 194 |
+
|
| 195 |
+
Override this method to provide custom metadata for the environment.
|
| 196 |
+
Default implementation returns basic metadata derived from class name.
|
| 197 |
+
|
| 198 |
+
Returns:
|
| 199 |
+
EnvironmentMetadata with environment information
|
| 200 |
+
"""
|
| 201 |
+
return EnvironmentMetadata(
|
| 202 |
+
name=self.__class__.__name__,
|
| 203 |
+
description=f"{self.__class__.__name__} environment",
|
| 204 |
+
version="1.0.0",
|
| 205 |
+
)
|
| 206 |
+
|
| 207 |
+
def _apply_transform(self, observation: ObsT) -> ObsT:
|
| 208 |
"""Apply transform if one is provided."""
|
| 209 |
if self.transform is not None:
|
| 210 |
return self.transform(observation)
|
| 211 |
return observation
|
| 212 |
+
|
| 213 |
+
def _apply_rubric(self, action: ActT, observation: ObsT) -> float:
|
| 214 |
+
"""Apply rubric if one is provided.
|
| 215 |
+
|
| 216 |
+
Args:
|
| 217 |
+
action: The action taken by the agent.
|
| 218 |
+
observation: The resulting observation.
|
| 219 |
+
|
| 220 |
+
Returns:
|
| 221 |
+
Reward value from the rubric, or 0.0 if no rubric is set.
|
| 222 |
+
|
| 223 |
+
Usage in step():
|
| 224 |
+
def step(self, action: MyAction, ...) -> MyObservation:
|
| 225 |
+
# ... execute action and create observation ...
|
| 226 |
+
observation.reward = self._apply_rubric(action, observation)
|
| 227 |
+
return observation
|
| 228 |
+
"""
|
| 229 |
+
if self.rubric is not None:
|
| 230 |
+
return self.rubric(action, observation)
|
| 231 |
+
return 0.0
|
| 232 |
+
|
| 233 |
+
async def _apply_rubric_async(self, action: ActT, observation: ObsT) -> float:
|
| 234 |
+
"""Apply rubric asynchronously if one is provided.
|
| 235 |
+
|
| 236 |
+
Args:
|
| 237 |
+
action: The action taken by the agent.
|
| 238 |
+
observation: The resulting observation.
|
| 239 |
+
|
| 240 |
+
Returns:
|
| 241 |
+
Reward value from the rubric, or 0.0 if no rubric is set.
|
| 242 |
+
|
| 243 |
+
Usage in step_async():
|
| 244 |
+
async def step_async(self, action: MyAction, ...) -> MyObservation:
|
| 245 |
+
# ... execute action and create observation ...
|
| 246 |
+
observation.reward = await self._apply_rubric_async(action, observation)
|
| 247 |
+
return observation
|
| 248 |
+
"""
|
| 249 |
+
if self.rubric is not None:
|
| 250 |
+
result = self.rubric(action, observation)
|
| 251 |
+
# If rubric returns a coroutine, await it
|
| 252 |
+
if inspect.iscoroutine(result):
|
| 253 |
+
return await result
|
| 254 |
+
return result
|
| 255 |
+
return 0.0
|
| 256 |
+
|
| 257 |
+
def _reset_rubric(self) -> None:
|
| 258 |
+
"""Reset the rubric state if one is provided.
|
| 259 |
+
|
| 260 |
+
Call this in reset() to clear any trajectory state in the rubric.
|
| 261 |
+
|
| 262 |
+
Usage in reset():
|
| 263 |
+
def reset(self, ...) -> MyObservation:
|
| 264 |
+
self._reset_rubric()
|
| 265 |
+
# ... create initial observation ...
|
| 266 |
+
return observation
|
| 267 |
+
"""
|
| 268 |
+
if self.rubric is not None:
|
| 269 |
+
self.rubric.reset()
|
| 270 |
+
|
| 271 |
+
async def _reset_rubric_async(self) -> None:
|
| 272 |
+
"""Reset the rubric state asynchronously if one is provided.
|
| 273 |
+
|
| 274 |
+
Call this in reset_async() to clear any trajectory state in the rubric.
|
| 275 |
+
|
| 276 |
+
Usage in reset_async():
|
| 277 |
+
async def reset_async(self, ...) -> MyObservation:
|
| 278 |
+
await self._reset_rubric_async()
|
| 279 |
+
# ... create initial observation ...
|
| 280 |
+
return observation
|
| 281 |
+
"""
|
| 282 |
+
if self.rubric is not None:
|
| 283 |
+
# Check if rubric has async reset method
|
| 284 |
+
if hasattr(self.rubric, "reset_async"):
|
| 285 |
+
result = self.rubric.reset_async()
|
| 286 |
+
if inspect.iscoroutine(result):
|
| 287 |
+
await result
|
| 288 |
+
else:
|
| 289 |
+
self.rubric.reset()
|
| 290 |
+
|
| 291 |
+
def close(self) -> None:
|
| 292 |
+
"""Clean up resources used by the environment.
|
| 293 |
+
|
| 294 |
+
Override this method to implement custom cleanup logic.
|
| 295 |
+
Called when the environment is being destroyed or reset.
|
| 296 |
+
"""
|
| 297 |
+
pass
|
src/core/env_server/mcp_environment.py
ADDED
|
@@ -0,0 +1,624 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright (c) Meta Platforms, Inc. and affiliates.
|
| 2 |
+
# All rights reserved.
|
| 3 |
+
#
|
| 4 |
+
# This source code is licensed under the BSD-style license found in the
|
| 5 |
+
# LICENSE file in the root directory of this source tree.
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
MCP Environment base class for OpenEnv.
|
| 9 |
+
|
| 10 |
+
This module provides the MCPEnvironment base class that integrates FastMCP servers
|
| 11 |
+
with OpenEnv's Gym-style Environment interface. It handles MCP tool discovery
|
| 12 |
+
and invocation through the step() API, following RFC 003.
|
| 13 |
+
|
| 14 |
+
Key features:
|
| 15 |
+
- Automatic routing of ListToolsAction and CallToolAction to MCP server
|
| 16 |
+
- Reserved tool name validation (reset, step, state, close are protected)
|
| 17 |
+
- Timeout handling for tool calls
|
| 18 |
+
- Proper error categorization (tool not found, execution errors, timeouts)
|
| 19 |
+
- Mode-aware tool registration (production vs simulation)
|
| 20 |
+
- Code mode support via get_callables() and execute_code()
|
| 21 |
+
|
| 22 |
+
Usage:
|
| 23 |
+
from fastmcp import FastMCP
|
| 24 |
+
from openenv.core.env_server.mcp_environment import MCPEnvironment
|
| 25 |
+
|
| 26 |
+
class MyMCPEnv(MCPEnvironment):
|
| 27 |
+
def __init__(self):
|
| 28 |
+
mcp = FastMCP("my-server")
|
| 29 |
+
|
| 30 |
+
# Register mode-specific tools
|
| 31 |
+
@self.tool(mode="production")
|
| 32 |
+
def my_tool(arg: str) -> str:
|
| 33 |
+
return f"Production: {arg}"
|
| 34 |
+
|
| 35 |
+
@self.tool(mode="simulation")
|
| 36 |
+
def my_tool(arg: str) -> str:
|
| 37 |
+
return f"Simulation: {arg}"
|
| 38 |
+
|
| 39 |
+
super().__init__(mcp)
|
| 40 |
+
|
| 41 |
+
def reset(self, seed=None, episode_id=None, **kwargs):
|
| 42 |
+
# Reset logic here
|
| 43 |
+
...
|
| 44 |
+
|
| 45 |
+
def _step_impl(self, action):
|
| 46 |
+
# Handle non-MCP actions
|
| 47 |
+
...
|
| 48 |
+
|
| 49 |
+
@property
|
| 50 |
+
def state(self):
|
| 51 |
+
# Return current state
|
| 52 |
+
...
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
import asyncio
|
| 56 |
+
import inspect
|
| 57 |
+
from abc import abstractmethod
|
| 58 |
+
from collections import defaultdict
|
| 59 |
+
from typing import Any, Callable, Dict, Optional
|
| 60 |
+
|
| 61 |
+
from fastmcp import Client
|
| 62 |
+
from fastmcp.client.client import CallToolResult
|
| 63 |
+
from mcp.types import TextContent
|
| 64 |
+
|
| 65 |
+
from ..utils import run_async_safely
|
| 66 |
+
from .interfaces import Environment
|
| 67 |
+
from .mcp_types import (
|
| 68 |
+
CallToolAction,
|
| 69 |
+
CallToolObservation,
|
| 70 |
+
ListToolsAction,
|
| 71 |
+
ListToolsObservation,
|
| 72 |
+
RESERVED_TOOL_NAMES,
|
| 73 |
+
Tool,
|
| 74 |
+
ToolError,
|
| 75 |
+
ToolErrorType,
|
| 76 |
+
)
|
| 77 |
+
from .types import Action, Observation
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
# Default timeout for MCP tool calls in seconds
|
| 81 |
+
MCP_TOOL_CALL_TIMEOUT = 30.0
|
| 82 |
+
|
| 83 |
+
# Valid modes for tool registration
|
| 84 |
+
VALID_MODES = {"production", "simulation"}
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
def get_server_tools(mcp_server: Any) -> Dict[str, Any]:
|
| 88 |
+
"""
|
| 89 |
+
Get tools from a FastMCP server, compatible with both 2.x and 3.x.
|
| 90 |
+
|
| 91 |
+
Returns:
|
| 92 |
+
Dictionary mapping tool names to tool objects.
|
| 93 |
+
"""
|
| 94 |
+
# FastMCP 2.x: get_tools() returns dict {name: Tool}
|
| 95 |
+
if hasattr(mcp_server, "get_tools"):
|
| 96 |
+
result = run_async_safely(mcp_server.get_tools())
|
| 97 |
+
if isinstance(result, dict):
|
| 98 |
+
return result
|
| 99 |
+
# FastMCP 3.x: list_tools() returns list of Tool objects
|
| 100 |
+
if hasattr(mcp_server, "list_tools"):
|
| 101 |
+
tools_list = run_async_safely(mcp_server.list_tools())
|
| 102 |
+
return {t.name: t for t in tools_list}
|
| 103 |
+
return {}
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class MCPEnvironment(Environment):
|
| 107 |
+
"""
|
| 108 |
+
Base class for environments that expose tools via MCP (Model Context Protocol).
|
| 109 |
+
|
| 110 |
+
MCPEnvironment bridges FastMCP servers with OpenEnv's Gym-style API, allowing
|
| 111 |
+
agents to discover and invoke MCP tools through the standard step() interface.
|
| 112 |
+
|
| 113 |
+
The class automatically handles:
|
| 114 |
+
- ListToolsAction: Returns available tools from the MCP server
|
| 115 |
+
- CallToolAction: Invokes a specific tool with arguments
|
| 116 |
+
|
| 117 |
+
All other actions are delegated to the abstract _step_impl() method,
|
| 118 |
+
which subclasses must implement.
|
| 119 |
+
|
| 120 |
+
Args:
|
| 121 |
+
mcp_server: A FastMCP server instance containing tool definitions.
|
| 122 |
+
The server's tools will be validated against reserved names.
|
| 123 |
+
transform: Optional transform to apply to observations (inherited from Environment).
|
| 124 |
+
|
| 125 |
+
Raises:
|
| 126 |
+
ValueError: If any tool in the MCP server uses a reserved name
|
| 127 |
+
(reset, step, state, close).
|
| 128 |
+
|
| 129 |
+
Example:
|
| 130 |
+
>>> from fastmcp import FastMCP
|
| 131 |
+
>>> mcp = FastMCP("calculator")
|
| 132 |
+
>>> @mcp.tool()
|
| 133 |
+
... def add(a: int, b: int) -> int:
|
| 134 |
+
... return a + b
|
| 135 |
+
>>> env = MyMCPEnvironment(mcp)
|
| 136 |
+
>>> obs = env.step(ListToolsAction())
|
| 137 |
+
>>> obs.tools[0].name
|
| 138 |
+
'add'
|
| 139 |
+
"""
|
| 140 |
+
|
| 141 |
+
def __init__(self, mcp_server: Any, transform: Optional[Any] = None) -> None:
|
| 142 |
+
"""
|
| 143 |
+
Initialize the MCP environment.
|
| 144 |
+
|
| 145 |
+
Args:
|
| 146 |
+
mcp_server: A FastMCP server instance with tool definitions.
|
| 147 |
+
transform: Optional transform to apply to observations.
|
| 148 |
+
|
| 149 |
+
Raises:
|
| 150 |
+
ValueError: If any tool uses a reserved name (reset, step, state, close).
|
| 151 |
+
"""
|
| 152 |
+
super().__init__(transform=transform)
|
| 153 |
+
|
| 154 |
+
# Validate tool names before storing
|
| 155 |
+
self._validate_tool_names(mcp_server)
|
| 156 |
+
|
| 157 |
+
self.mcp_server = mcp_server
|
| 158 |
+
self.mcp_client = Client(mcp_server)
|
| 159 |
+
|
| 160 |
+
# Track mode-specific tools: {tool_name: {mode: func}}
|
| 161 |
+
# mode can be "production", "simulation", or None (available in all modes)
|
| 162 |
+
self._mode_tools = defaultdict(dict)
|
| 163 |
+
|
| 164 |
+
# Track tool schemas for list_tools: {tool_name: {mode: schema}}
|
| 165 |
+
self._mode_tool_schemas = defaultdict(dict)
|
| 166 |
+
|
| 167 |
+
@property
|
| 168 |
+
def supports_code_mode(self) -> bool:
|
| 169 |
+
"""Check if this environment supports code mode (execute_code)."""
|
| 170 |
+
return True
|
| 171 |
+
|
| 172 |
+
def _get_server_tools(self, mcp_server: Any) -> Dict[str, Any]:
|
| 173 |
+
"""
|
| 174 |
+
Get tools from a FastMCP server, compatible with both 2.x and 3.x.
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
Dictionary mapping tool names to tool objects.
|
| 178 |
+
"""
|
| 179 |
+
return get_server_tools(mcp_server)
|
| 180 |
+
|
| 181 |
+
def get_callables(self) -> Dict[str, Callable]:
|
| 182 |
+
"""
|
| 183 |
+
Get callable functions for code mode.
|
| 184 |
+
|
| 185 |
+
Returns tool functions as direct Python callables, enabling code mode
|
| 186 |
+
where agents write Python code that calls tools directly (no JSON-RPC
|
| 187 |
+
overhead). Mode-specific tools are filtered by the current mode.
|
| 188 |
+
|
| 189 |
+
Returns:
|
| 190 |
+
Dictionary mapping tool names to callables.
|
| 191 |
+
"""
|
| 192 |
+
callables: Dict[str, Callable] = {}
|
| 193 |
+
current_mode = getattr(self, "_mode", None)
|
| 194 |
+
|
| 195 |
+
# Extract callables from FastMCP server using public API
|
| 196 |
+
for tool_name, tool in self._get_server_tools(self.mcp_server).items():
|
| 197 |
+
if hasattr(tool, "fn") and callable(tool.fn):
|
| 198 |
+
callables[tool_name] = tool.fn
|
| 199 |
+
|
| 200 |
+
# Add mode-specific tools available in current mode
|
| 201 |
+
for tool_name, mode_funcs in self._mode_tools.items():
|
| 202 |
+
if None in mode_funcs:
|
| 203 |
+
# Tool available in all modes (already in FastMCP if registered there)
|
| 204 |
+
if tool_name not in callables:
|
| 205 |
+
callables[tool_name] = mode_funcs[None]
|
| 206 |
+
elif current_mode in mode_funcs:
|
| 207 |
+
# Tool available in current mode only
|
| 208 |
+
callables[tool_name] = mode_funcs[current_mode]
|
| 209 |
+
|
| 210 |
+
return callables
|
| 211 |
+
|
| 212 |
+
def execute_code(self, code: str) -> Observation:
|
| 213 |
+
"""
|
| 214 |
+
Execute Python code with tools available as callables.
|
| 215 |
+
|
| 216 |
+
This enables the CodeAct pattern where agents write Python code
|
| 217 |
+
that calls tools directly as functions, avoiding JSON-RPC overhead.
|
| 218 |
+
|
| 219 |
+
Args:
|
| 220 |
+
code: Python code to execute. Tools are available as functions
|
| 221 |
+
in the execution namespace. Set a variable named 'result'
|
| 222 |
+
to capture the return value.
|
| 223 |
+
|
| 224 |
+
Returns:
|
| 225 |
+
Observation with result in metadata["result"] or error in
|
| 226 |
+
metadata["error"].
|
| 227 |
+
"""
|
| 228 |
+
namespace = self.get_callables()
|
| 229 |
+
|
| 230 |
+
result_dict: Dict[str, Any] = {}
|
| 231 |
+
try:
|
| 232 |
+
exec(code, namespace, result_dict)
|
| 233 |
+
result = result_dict.get("result")
|
| 234 |
+
return Observation(done=False, reward=0.0, metadata={"result": result})
|
| 235 |
+
except SyntaxError as e:
|
| 236 |
+
return Observation(
|
| 237 |
+
done=False, reward=0.0, metadata={"error": f"Syntax error: {str(e)}"}
|
| 238 |
+
)
|
| 239 |
+
except Exception as e:
|
| 240 |
+
return Observation(done=False, reward=0.0, metadata={"error": str(e)})
|
| 241 |
+
|
| 242 |
+
def _validate_tool_names(self, mcp_server: Any) -> None:
|
| 243 |
+
"""
|
| 244 |
+
Validate that no tools use reserved names.
|
| 245 |
+
|
| 246 |
+
Reserved names (reset, step, state, close) are protected to maintain
|
| 247 |
+
the dual API boundary between infrastructure and agent APIs.
|
| 248 |
+
|
| 249 |
+
Args:
|
| 250 |
+
mcp_server: The FastMCP server to validate.
|
| 251 |
+
|
| 252 |
+
Raises:
|
| 253 |
+
ValueError: If any tool uses a reserved name.
|
| 254 |
+
"""
|
| 255 |
+
tools_dict = self._get_server_tools(mcp_server)
|
| 256 |
+
if tools_dict:
|
| 257 |
+
tool_names = set(tools_dict.keys())
|
| 258 |
+
conflicts = tool_names & RESERVED_TOOL_NAMES
|
| 259 |
+
if conflicts:
|
| 260 |
+
raise ValueError(
|
| 261 |
+
f"MCP tools cannot use reserved names: {sorted(conflicts)}. "
|
| 262 |
+
f"Reserved names are: {sorted(RESERVED_TOOL_NAMES)}"
|
| 263 |
+
)
|
| 264 |
+
|
| 265 |
+
def tool(self, mode: Optional[str] = None) -> Callable:
|
| 266 |
+
"""
|
| 267 |
+
Decorator for registering mode-aware tools.
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
mode: Optional mode for the tool ("production" or "simulation").
|
| 271 |
+
If None, tool is available in all modes.
|
| 272 |
+
|
| 273 |
+
Returns:
|
| 274 |
+
A decorator function for registering tools.
|
| 275 |
+
|
| 276 |
+
Raises:
|
| 277 |
+
ValueError: If mode is not None, "production", or "simulation".
|
| 278 |
+
"""
|
| 279 |
+
if mode is not None and mode not in VALID_MODES:
|
| 280 |
+
raise ValueError(
|
| 281 |
+
f"Invalid mode '{mode}'. Mode must be 'production', 'simulation', or None."
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
def decorator(func: Callable) -> Callable:
|
| 285 |
+
tool_name = func.__name__
|
| 286 |
+
# Validate tool name is not reserved
|
| 287 |
+
if tool_name in RESERVED_TOOL_NAMES:
|
| 288 |
+
raise ValueError(
|
| 289 |
+
f"Tool name '{tool_name}' is reserved and cannot be used. "
|
| 290 |
+
f"Reserved names are: {sorted(RESERVED_TOOL_NAMES)}"
|
| 291 |
+
)
|
| 292 |
+
|
| 293 |
+
# If mode is None, register with FastMCP as usual
|
| 294 |
+
if mode is None:
|
| 295 |
+
decorated_func = self.mcp_server.tool()(func)
|
| 296 |
+
self._mode_tools[tool_name][None] = func
|
| 297 |
+
return decorated_func
|
| 298 |
+
|
| 299 |
+
# For mode-specific tools, don't register with FastMCP
|
| 300 |
+
# Instead, track them ourselves
|
| 301 |
+
self._mode_tools[tool_name][mode] = func
|
| 302 |
+
|
| 303 |
+
# Extract schema information from function signature
|
| 304 |
+
sig = inspect.signature(func)
|
| 305 |
+
schema = {
|
| 306 |
+
"type": "object",
|
| 307 |
+
"properties": {},
|
| 308 |
+
"required": [],
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
for param_name, param in sig.parameters.items():
|
| 312 |
+
# Get type annotation
|
| 313 |
+
param_type = param.annotation
|
| 314 |
+
json_type = "string" # default
|
| 315 |
+
if param_type in (int, "int"):
|
| 316 |
+
json_type = "integer"
|
| 317 |
+
elif param_type in (float, "float"):
|
| 318 |
+
json_type = "number"
|
| 319 |
+
elif param_type in (bool, "bool"):
|
| 320 |
+
json_type = "boolean"
|
| 321 |
+
|
| 322 |
+
schema["properties"][param_name] = {"type": json_type}
|
| 323 |
+
|
| 324 |
+
# If no default value, it's required
|
| 325 |
+
if param.default == inspect.Parameter.empty:
|
| 326 |
+
schema["required"].append(param_name)
|
| 327 |
+
|
| 328 |
+
# Store the schema for this mode-specific tool
|
| 329 |
+
self._mode_tool_schemas[tool_name][mode] = {
|
| 330 |
+
"name": tool_name,
|
| 331 |
+
"description": func.__doc__ or "",
|
| 332 |
+
"input_schema": schema,
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
return func
|
| 336 |
+
|
| 337 |
+
return decorator
|
| 338 |
+
|
| 339 |
+
def step(
|
| 340 |
+
self,
|
| 341 |
+
action: Action,
|
| 342 |
+
timeout_s: Optional[float] = None,
|
| 343 |
+
**kwargs: Any,
|
| 344 |
+
) -> Observation:
|
| 345 |
+
"""
|
| 346 |
+
Execute an action in the environment.
|
| 347 |
+
|
| 348 |
+
This method routes MCP-specific actions (ListToolsAction, CallToolAction)
|
| 349 |
+
to the appropriate handlers, while delegating all other actions to
|
| 350 |
+
the subclass's _step_impl() method.
|
| 351 |
+
|
| 352 |
+
Args:
|
| 353 |
+
action: The action to execute. Can be:
|
| 354 |
+
- ListToolsAction: Returns available MCP tools
|
| 355 |
+
- CallToolAction: Invokes a specific MCP tool
|
| 356 |
+
- Any other Action: Delegated to _step_impl()
|
| 357 |
+
timeout_s: Optional timeout in seconds for the action.
|
| 358 |
+
Defaults to MCP_TOOL_CALL_TIMEOUT (30s) for MCP actions.
|
| 359 |
+
**kwargs: Additional arguments passed to handlers.
|
| 360 |
+
|
| 361 |
+
Returns:
|
| 362 |
+
Observation appropriate to the action type:
|
| 363 |
+
- ListToolsObservation for ListToolsAction
|
| 364 |
+
- CallToolObservation for CallToolAction
|
| 365 |
+
- Subclass-defined Observation for other actions
|
| 366 |
+
"""
|
| 367 |
+
if isinstance(action, ListToolsAction):
|
| 368 |
+
return self._handle_list_tools()
|
| 369 |
+
elif isinstance(action, CallToolAction):
|
| 370 |
+
return self._handle_call_tool(action, timeout_s=timeout_s)
|
| 371 |
+
else:
|
| 372 |
+
return self._step_impl(action, timeout_s=timeout_s, **kwargs)
|
| 373 |
+
|
| 374 |
+
def _handle_list_tools(self) -> ListToolsObservation:
|
| 375 |
+
"""
|
| 376 |
+
Handle a ListToolsAction by querying the MCP server.
|
| 377 |
+
|
| 378 |
+
Returns:
|
| 379 |
+
ListToolsObservation containing all available tools with their
|
| 380 |
+
names, descriptions, and input schemas, filtered by current mode.
|
| 381 |
+
"""
|
| 382 |
+
try:
|
| 383 |
+
# Get current mode
|
| 384 |
+
current_mode = getattr(self, "_mode", None)
|
| 385 |
+
|
| 386 |
+
# Start with tools from FastMCP server (mode=None tools)
|
| 387 |
+
tools_result = run_async_safely(self._async_list_tools())
|
| 388 |
+
|
| 389 |
+
# Build list of Tool objects
|
| 390 |
+
tools = []
|
| 391 |
+
|
| 392 |
+
# Add FastMCP tools that are not mode-specific
|
| 393 |
+
for tool in tools_result:
|
| 394 |
+
if tool.name not in self._mode_tool_schemas:
|
| 395 |
+
tools.append(
|
| 396 |
+
Tool(
|
| 397 |
+
name=tool.name,
|
| 398 |
+
description=tool.description or "",
|
| 399 |
+
input_schema=tool.inputSchema
|
| 400 |
+
if hasattr(tool, "inputSchema")
|
| 401 |
+
else {},
|
| 402 |
+
)
|
| 403 |
+
)
|
| 404 |
+
|
| 405 |
+
# Add mode-specific tools available in current mode
|
| 406 |
+
for tool_name, mode_schemas in self._mode_tool_schemas.items():
|
| 407 |
+
if None in mode_schemas:
|
| 408 |
+
# Tool available in all modes
|
| 409 |
+
schema = mode_schemas[None]
|
| 410 |
+
tools.append(
|
| 411 |
+
Tool(
|
| 412 |
+
name=schema["name"],
|
| 413 |
+
description=schema["description"],
|
| 414 |
+
input_schema=schema["input_schema"],
|
| 415 |
+
)
|
| 416 |
+
)
|
| 417 |
+
elif current_mode in mode_schemas:
|
| 418 |
+
# Tool available in current mode
|
| 419 |
+
schema = mode_schemas[current_mode]
|
| 420 |
+
tools.append(
|
| 421 |
+
Tool(
|
| 422 |
+
name=schema["name"],
|
| 423 |
+
description=schema["description"],
|
| 424 |
+
input_schema=schema["input_schema"],
|
| 425 |
+
)
|
| 426 |
+
)
|
| 427 |
+
|
| 428 |
+
return ListToolsObservation(tools=tools)
|
| 429 |
+
|
| 430 |
+
except Exception as e:
|
| 431 |
+
# Return an observation with error in metadata
|
| 432 |
+
return ListToolsObservation(
|
| 433 |
+
tools=[],
|
| 434 |
+
metadata={
|
| 435 |
+
"error": str(e),
|
| 436 |
+
"error_type": "list_tools_failed",
|
| 437 |
+
},
|
| 438 |
+
)
|
| 439 |
+
|
| 440 |
+
async def _async_list_tools(self) -> list:
|
| 441 |
+
"""
|
| 442 |
+
Async helper to list tools from the MCP client.
|
| 443 |
+
|
| 444 |
+
Returns:
|
| 445 |
+
List of tool objects from the MCP server.
|
| 446 |
+
"""
|
| 447 |
+
async with self.mcp_client:
|
| 448 |
+
return await self.mcp_client.list_tools()
|
| 449 |
+
|
| 450 |
+
def _handle_call_tool(
|
| 451 |
+
self,
|
| 452 |
+
action: CallToolAction,
|
| 453 |
+
timeout_s: Optional[float] = None,
|
| 454 |
+
) -> CallToolObservation:
|
| 455 |
+
"""
|
| 456 |
+
Handle a CallToolAction by invoking the specified tool.
|
| 457 |
+
|
| 458 |
+
Args:
|
| 459 |
+
action: The CallToolAction containing tool_name and arguments.
|
| 460 |
+
timeout_s: Timeout in seconds. Defaults to MCP_TOOL_CALL_TIMEOUT (30s).
|
| 461 |
+
|
| 462 |
+
Returns:
|
| 463 |
+
CallToolObservation with the tool's result or an error.
|
| 464 |
+
"""
|
| 465 |
+
timeout = timeout_s if timeout_s is not None else MCP_TOOL_CALL_TIMEOUT
|
| 466 |
+
|
| 467 |
+
# Check if this is a mode-specific tool
|
| 468 |
+
tool_name = action.tool_name
|
| 469 |
+
current_mode = getattr(self, "_mode", None)
|
| 470 |
+
|
| 471 |
+
if tool_name in self._mode_tools:
|
| 472 |
+
mode_info = self._mode_tools[tool_name]
|
| 473 |
+
|
| 474 |
+
# Check if tool is available in current mode
|
| 475 |
+
# Tool is available if:
|
| 476 |
+
# 1. It has a None mode (available in all modes), OR
|
| 477 |
+
# 2. It has an implementation for the current mode
|
| 478 |
+
if None in mode_info:
|
| 479 |
+
# Use the mode-agnostic version
|
| 480 |
+
func = mode_info[None]
|
| 481 |
+
elif current_mode in mode_info:
|
| 482 |
+
# Use the mode-specific version
|
| 483 |
+
func = mode_info[current_mode]
|
| 484 |
+
else:
|
| 485 |
+
# Tool not available in current mode
|
| 486 |
+
return CallToolObservation(
|
| 487 |
+
tool_name=tool_name,
|
| 488 |
+
result=None,
|
| 489 |
+
error=ToolError(
|
| 490 |
+
error_type=ToolErrorType.TOOL_NOT_FOUND,
|
| 491 |
+
message=f"Tool '{tool_name}' not available in {current_mode} mode",
|
| 492 |
+
),
|
| 493 |
+
)
|
| 494 |
+
|
| 495 |
+
# Call the mode-specific function directly
|
| 496 |
+
try:
|
| 497 |
+
# Check if function is async and await if necessary
|
| 498 |
+
if inspect.iscoroutinefunction(func):
|
| 499 |
+
result = run_async_safely(func(**action.arguments))
|
| 500 |
+
else:
|
| 501 |
+
result = func(**action.arguments)
|
| 502 |
+
|
| 503 |
+
# Wrap result in CallToolResult format to match FastMCP behavior
|
| 504 |
+
return CallToolObservation(
|
| 505 |
+
tool_name=tool_name,
|
| 506 |
+
result=CallToolResult(
|
| 507 |
+
content=[TextContent(type="text", text=str(result))],
|
| 508 |
+
structured_content={"result": result},
|
| 509 |
+
meta=None,
|
| 510 |
+
data=result,
|
| 511 |
+
is_error=False,
|
| 512 |
+
),
|
| 513 |
+
)
|
| 514 |
+
except Exception as e:
|
| 515 |
+
return CallToolObservation(
|
| 516 |
+
tool_name=tool_name,
|
| 517 |
+
result=None,
|
| 518 |
+
error=ToolError(
|
| 519 |
+
error_type=ToolErrorType.EXECUTION_ERROR,
|
| 520 |
+
message=str(e),
|
| 521 |
+
),
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
# Not a mode-specific tool, use FastMCP
|
| 525 |
+
try:
|
| 526 |
+
# Run the async call_tool with timeout
|
| 527 |
+
# Use run_async_safely to handle both sync and async contexts
|
| 528 |
+
result = run_async_safely(
|
| 529 |
+
asyncio.wait_for(
|
| 530 |
+
self._async_call_tool(action.tool_name, action.arguments),
|
| 531 |
+
timeout=timeout,
|
| 532 |
+
)
|
| 533 |
+
)
|
| 534 |
+
|
| 535 |
+
return CallToolObservation(
|
| 536 |
+
tool_name=action.tool_name,
|
| 537 |
+
result=result,
|
| 538 |
+
)
|
| 539 |
+
|
| 540 |
+
except asyncio.TimeoutError:
|
| 541 |
+
return CallToolObservation(
|
| 542 |
+
tool_name=action.tool_name,
|
| 543 |
+
result=None,
|
| 544 |
+
error=ToolError(
|
| 545 |
+
error_type=ToolErrorType.TIMEOUT,
|
| 546 |
+
message=f"Tool '{action.tool_name}' timed out after {timeout} seconds",
|
| 547 |
+
),
|
| 548 |
+
)
|
| 549 |
+
|
| 550 |
+
except Exception as e:
|
| 551 |
+
error_message = str(e)
|
| 552 |
+
|
| 553 |
+
# Determine error type based on the exception
|
| 554 |
+
if (
|
| 555 |
+
"not found" in error_message.lower()
|
| 556 |
+
or "unknown tool" in error_message.lower()
|
| 557 |
+
):
|
| 558 |
+
error_type = ToolErrorType.TOOL_NOT_FOUND
|
| 559 |
+
elif (
|
| 560 |
+
"invalid" in error_message.lower()
|
| 561 |
+
or "argument" in error_message.lower()
|
| 562 |
+
):
|
| 563 |
+
error_type = ToolErrorType.INVALID_ARGS
|
| 564 |
+
else:
|
| 565 |
+
error_type = ToolErrorType.EXECUTION_ERROR
|
| 566 |
+
|
| 567 |
+
return CallToolObservation(
|
| 568 |
+
tool_name=action.tool_name,
|
| 569 |
+
result=None,
|
| 570 |
+
error=ToolError(
|
| 571 |
+
error_type=error_type,
|
| 572 |
+
message=error_message,
|
| 573 |
+
),
|
| 574 |
+
)
|
| 575 |
+
|
| 576 |
+
async def _async_call_tool(self, tool_name: str, arguments: dict) -> Any:
|
| 577 |
+
"""
|
| 578 |
+
Async helper to call a tool on the MCP server.
|
| 579 |
+
|
| 580 |
+
Args:
|
| 581 |
+
tool_name: Name of the tool to invoke.
|
| 582 |
+
arguments: Dictionary of arguments to pass to the tool.
|
| 583 |
+
|
| 584 |
+
Returns:
|
| 585 |
+
The result from the tool execution.
|
| 586 |
+
"""
|
| 587 |
+
async with self.mcp_client:
|
| 588 |
+
return await self.mcp_client.call_tool(tool_name, arguments)
|
| 589 |
+
|
| 590 |
+
@abstractmethod
|
| 591 |
+
def _step_impl(
|
| 592 |
+
self,
|
| 593 |
+
action: Action,
|
| 594 |
+
timeout_s: Optional[float] = None,
|
| 595 |
+
**kwargs: Any,
|
| 596 |
+
) -> Observation:
|
| 597 |
+
"""
|
| 598 |
+
Handle non-MCP actions in the environment.
|
| 599 |
+
|
| 600 |
+
Subclasses must implement this method to handle any actions that are
|
| 601 |
+
not ListToolsAction or CallToolAction. This is where environment-specific
|
| 602 |
+
action processing should occur.
|
| 603 |
+
|
| 604 |
+
Args:
|
| 605 |
+
action: The action to execute (guaranteed not to be an MCP action).
|
| 606 |
+
timeout_s: Optional timeout in seconds.
|
| 607 |
+
**kwargs: Additional arguments.
|
| 608 |
+
|
| 609 |
+
Returns:
|
| 610 |
+
An Observation appropriate for the action.
|
| 611 |
+
"""
|
| 612 |
+
pass
|
| 613 |
+
|
| 614 |
+
def close(self) -> None:
|
| 615 |
+
"""
|
| 616 |
+
Clean up resources used by the environment.
|
| 617 |
+
|
| 618 |
+
This method cleans up the MCP client and any other resources.
|
| 619 |
+
Subclasses should call super().close() if they override this method.
|
| 620 |
+
"""
|
| 621 |
+
# The MCP client uses async context manager, so cleanup happens
|
| 622 |
+
# automatically when the context exits. We just clear references.
|
| 623 |
+
self.mcp_client = None
|
| 624 |
+
self.mcp_server = None
|