Pramod Basavaraj Menasi commited on
Commit
d574597
·
1 Parent(s): 88b5c45

Add application file

Browse files
.gitattributes CHANGED
@@ -33,3 +33,7 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ __pycache__/
37
+ *.pyc
38
+ .env
39
+ incidentops_env/
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+
2
+ .env
Dockerfile ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ # Multi-stage build using openenv-base
8
+ # This Dockerfile is flexible and works for both:
9
+ # - In-repo environments (with local OpenEnv sources)
10
+ # - Standalone environments (with openenv from PyPI/Git)
11
+ # The build script (openenv build) handles context detection and sets appropriate build args.
12
+
13
+ ARG BASE_IMAGE=ghcr.io/meta-pytorch/openenv-base:latest
14
+ FROM ${BASE_IMAGE} AS builder
15
+
16
+ WORKDIR /app
17
+
18
+ # Ensure git is available (required for installing dependencies from VCS)
19
+ RUN apt-get update && \
20
+ apt-get install -y --no-install-recommends git && \
21
+ rm -rf /var/lib/apt/lists/*
22
+
23
+ # Build argument to control whether we're building standalone or in-repo
24
+ ARG BUILD_MODE=in-repo
25
+ ARG ENV_NAME=incidentops_env
26
+
27
+ # Copy environment code (always at root of build context)
28
+ COPY . /app/env
29
+
30
+ # For in-repo builds, openenv is already vendored in the build context
31
+ # For standalone builds, openenv will be installed via pyproject.toml
32
+ WORKDIR /app/env
33
+
34
+ # Ensure uv is available (for local builds where base image lacks it)
35
+ RUN if ! command -v uv >/dev/null 2>&1; then \
36
+ curl -LsSf https://astral.sh/uv/install.sh | sh && \
37
+ mv /root/.local/bin/uv /usr/local/bin/uv && \
38
+ mv /root/.local/bin/uvx /usr/local/bin/uvx; \
39
+ fi
40
+
41
+ # Install dependencies using uv sync
42
+ # If uv.lock exists, use it; otherwise resolve on the fly
43
+ RUN --mount=type=cache,target=/root/.cache/uv \
44
+ if [ -f uv.lock ]; then \
45
+ uv sync --frozen --no-install-project --no-editable; \
46
+ else \
47
+ uv sync --no-install-project --no-editable; \
48
+ fi
49
+
50
+ RUN --mount=type=cache,target=/root/.cache/uv \
51
+ if [ -f uv.lock ]; then \
52
+ uv sync --frozen --no-editable; \
53
+ else \
54
+ uv sync --no-editable; \
55
+ fi
56
+
57
+ # Final runtime stage
58
+ FROM ${BASE_IMAGE}
59
+
60
+ WORKDIR /app
61
+
62
+ # Copy the virtual environment from builder
63
+ COPY --from=builder /app/env/.venv /app/.venv
64
+
65
+ # Copy the environment code
66
+ COPY --from=builder /app/env /app/env
67
+
68
+ # Set PATH to use the virtual environment
69
+ ENV PATH="/app/.venv/bin:$PATH"
70
+
71
+ # Set PYTHONPATH so imports work correctly
72
+ ENV PYTHONPATH="/app/env:$PYTHONPATH"
73
+
74
+ ENV ENABLE_WEB_INTERFACE=true
75
+
76
+ # Health check
77
+ HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
78
+ CMD curl -f http://localhost:8000/health || exit 1
79
+
80
+ # Run the FastAPI server
81
+ # The module path is constructed to work with the /app/env structure
82
+ CMD ["sh", "-c", "cd /app/env && uvicorn server.app:app --host 0.0.0.0 --port 8000"]
README.md CHANGED
@@ -1,11 +1,255 @@
1
  ---
2
- title: Incidentops Env
3
- emoji: 🌖
4
- colorFrom: indigo
5
  colorTo: blue
6
  sdk: docker
7
  pinned: false
8
- license: mit
 
 
 
9
  ---
10
 
11
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  ---
2
+ title: Incidentops Env Environment Server
3
+ emoji:
4
+ colorFrom: yellow
5
  colorTo: blue
6
  sdk: docker
7
  pinned: false
8
+ app_port: 8000
9
+ base_path: /web
10
+ tags:
11
+ - openenv
12
  ---
13
 
14
+ # Incidentops Env Environment
15
+
16
+ A simple test environment that echoes back messages. Perfect for testing the env APIs as well as demonstrating environment usage patterns.
17
+
18
+ ## Quick Start
19
+
20
+ The simplest way to use the Incidentops Env environment is through the `IncidentopsEnv` class:
21
+
22
+ ```python
23
+ from incidentops_env import IncidentopsAction, IncidentopsEnv
24
+
25
+ try:
26
+ # Create environment from Docker image
27
+ incidentops_envenv = IncidentopsEnv.from_docker_image("incidentops_env-env:latest")
28
+
29
+ # Reset
30
+ result = incidentops_envenv.reset()
31
+ print(f"Reset: {result.observation.echoed_message}")
32
+
33
+ # Send multiple messages
34
+ messages = ["Hello, World!", "Testing echo", "Final message"]
35
+
36
+ for msg in messages:
37
+ result = incidentops_envenv.step(IncidentopsAction(message=msg))
38
+ print(f"Sent: '{msg}'")
39
+ print(f" → Echoed: '{result.observation.echoed_message}'")
40
+ print(f" → Length: {result.observation.message_length}")
41
+ print(f" → Reward: {result.reward}")
42
+
43
+ finally:
44
+ # Always clean up
45
+ incidentops_envenv.close()
46
+ ```
47
+
48
+ That's it! The `IncidentopsEnv.from_docker_image()` method handles:
49
+ - Starting the Docker container
50
+ - Waiting for the server to be ready
51
+ - Connecting to the environment
52
+ - Container cleanup when you call `close()`
53
+
54
+ ## Building the Docker Image
55
+
56
+ Before using the environment, you need to build the Docker image:
57
+
58
+ ```bash
59
+ # From project root
60
+ docker build -t incidentops_env-env:latest -f server/Dockerfile .
61
+ ```
62
+
63
+ ## Deploying to Hugging Face Spaces
64
+
65
+ You can easily deploy your OpenEnv environment to Hugging Face Spaces using the `openenv push` command:
66
+
67
+ ```bash
68
+ # From the environment directory (where openenv.yaml is located)
69
+ openenv push
70
+
71
+ # Or specify options
72
+ openenv push --namespace my-org --private
73
+ ```
74
+
75
+ The `openenv push` command will:
76
+ 1. Validate that the directory is an OpenEnv environment (checks for `openenv.yaml`)
77
+ 2. Prepare a custom build for Hugging Face Docker space (enables web interface)
78
+ 3. Upload to Hugging Face (ensuring you're logged in)
79
+
80
+ ### Prerequisites
81
+
82
+ - Authenticate with Hugging Face: The command will prompt for login if not already authenticated
83
+
84
+ ### Options
85
+
86
+ - `--directory`, `-d`: Directory containing the OpenEnv environment (defaults to current directory)
87
+ - `--repo-id`, `-r`: Repository ID in format 'username/repo-name' (defaults to 'username/env-name' from openenv.yaml)
88
+ - `--base-image`, `-b`: Base Docker image to use (overrides Dockerfile FROM)
89
+ - `--private`: Deploy the space as private (default: public)
90
+
91
+ ### Examples
92
+
93
+ ```bash
94
+ # Push to your personal namespace (defaults to username/env-name from openenv.yaml)
95
+ openenv push
96
+
97
+ # Push to a specific repository
98
+ openenv push --repo-id my-org/my-env
99
+
100
+ # Push with a custom base image
101
+ openenv push --base-image ghcr.io/meta-pytorch/openenv-base:latest
102
+
103
+ # Push as a private space
104
+ openenv push --private
105
+
106
+ # Combine options
107
+ openenv push --repo-id my-org/my-env --base-image custom-base:latest --private
108
+ ```
109
+
110
+ After deployment, your space will be available at:
111
+ `https://huggingface.co/spaces/<repo-id>`
112
+
113
+ The deployed space includes:
114
+ - **Web Interface** at `/web` - Interactive UI for exploring the environment
115
+ - **API Documentation** at `/docs` - Full OpenAPI/Swagger interface
116
+ - **Health Check** at `/health` - Container health monitoring
117
+ - **WebSocket** at `/ws` - Persistent session endpoint for low-latency interactions
118
+
119
+ ## Environment Details
120
+
121
+ ### Action
122
+ **IncidentopsAction**: Contains a single field
123
+ - `message` (str) - The message to echo back
124
+
125
+ ### Observation
126
+ **IncidentopsObservation**: Contains the echo response and metadata
127
+ - `echoed_message` (str) - The message echoed back
128
+ - `message_length` (int) - Length of the message
129
+ - `reward` (float) - Reward based on message length (length × 0.1)
130
+ - `done` (bool) - Always False for echo environment
131
+ - `metadata` (dict) - Additional info like step count
132
+
133
+ ### Reward
134
+ The reward is calculated as: `message_length × 0.1`
135
+ - "Hi" → reward: 0.2
136
+ - "Hello, World!" → reward: 1.3
137
+ - Empty message → reward: 0.0
138
+
139
+ ## Advanced Usage
140
+
141
+ ### Connecting to an Existing Server
142
+
143
+ If you already have a Incidentops Env environment server running, you can connect directly:
144
+
145
+ ```python
146
+ from incidentops_env import IncidentopsEnv
147
+
148
+ # Connect to existing server
149
+ incidentops_envenv = IncidentopsEnv(base_url="<ENV_HTTP_URL_HERE>")
150
+
151
+ # Use as normal
152
+ result = incidentops_envenv.reset()
153
+ result = incidentops_envenv.step(IncidentopsAction(message="Hello!"))
154
+ ```
155
+
156
+ Note: When connecting to an existing server, `incidentops_envenv.close()` will NOT stop the server.
157
+
158
+ ### Using the Context Manager
159
+
160
+ The client supports context manager usage for automatic connection management:
161
+
162
+ ```python
163
+ from incidentops_env import IncidentopsAction, IncidentopsEnv
164
+
165
+ # Connect with context manager (auto-connects and closes)
166
+ with IncidentopsEnv(base_url="http://localhost:8000") as env:
167
+ result = env.reset()
168
+ print(f"Reset: {result.observation.echoed_message}")
169
+ # Multiple steps with low latency
170
+ for msg in ["Hello", "World", "!"]:
171
+ result = env.step(IncidentopsAction(message=msg))
172
+ print(f"Echoed: {result.observation.echoed_message}")
173
+ ```
174
+
175
+ The client uses WebSocket connections for:
176
+ - **Lower latency**: No HTTP connection overhead per request
177
+ - **Persistent session**: Server maintains your environment state
178
+ - **Efficient for episodes**: Better for many sequential steps
179
+
180
+ ### Concurrent WebSocket Sessions
181
+
182
+ The server supports multiple concurrent WebSocket connections. To enable this,
183
+ modify `server/app.py` to use factory mode:
184
+
185
+ ```python
186
+ # In server/app.py - use factory mode for concurrent sessions
187
+ app = create_app(
188
+ IncidentopsEnvironment, # Pass class, not instance
189
+ IncidentopsAction,
190
+ IncidentopsObservation,
191
+ max_concurrent_envs=4, # Allow 4 concurrent sessions
192
+ )
193
+ ```
194
+
195
+ Then multiple clients can connect simultaneously:
196
+
197
+ ```python
198
+ from incidentops_env import IncidentopsAction, IncidentopsEnv
199
+ from concurrent.futures import ThreadPoolExecutor
200
+
201
+ def run_episode(client_id: int):
202
+ with IncidentopsEnv(base_url="http://localhost:8000") as env:
203
+ result = env.reset()
204
+ for i in range(10):
205
+ result = env.step(IncidentopsAction(message=f"Client {client_id}, step {i}"))
206
+ return client_id, result.observation.message_length
207
+
208
+ # Run 4 episodes concurrently
209
+ with ThreadPoolExecutor(max_workers=4) as executor:
210
+ results = list(executor.map(run_episode, range(4)))
211
+ ```
212
+
213
+ ## Development & Testing
214
+
215
+ ### Direct Environment Testing
216
+
217
+ Test the environment logic directly without starting the HTTP server:
218
+
219
+ ```bash
220
+ # From the server directory
221
+ python3 server/incidentops_env_environment.py
222
+ ```
223
+
224
+ This verifies that:
225
+ - Environment resets correctly
226
+ - Step executes actions properly
227
+ - State tracking works
228
+ - Rewards are calculated correctly
229
+
230
+ ### Running Locally
231
+
232
+ Run the server locally for development:
233
+
234
+ ```bash
235
+ uvicorn server.app:app --reload
236
+ ```
237
+
238
+ ## Project Structure
239
+
240
+ ```
241
+ incidentops_env/
242
+ ├── .dockerignore # Docker build exclusions
243
+ ├── __init__.py # Module exports
244
+ ├── README.md # This file
245
+ ├── openenv.yaml # OpenEnv manifest
246
+ ├── pyproject.toml # Project metadata and dependencies
247
+ ├── uv.lock # Locked dependencies (generated)
248
+ ├── client.py # IncidentopsEnv client
249
+ ├── models.py # Action and Observation models
250
+ └── server/
251
+ ├── __init__.py # Server module exports
252
+ ├── incidentops_env_environment.py # Core environment logic
253
+ ├── app.py # FastAPI application (HTTP + WebSocket endpoints)
254
+ └── Dockerfile # Container image definition
255
+ ```
__init__.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ """Incidentops Env Environment."""
7
+ from .client import IncidentopsEnv
8
+ from .models import IncidentopsAction, IncidentopsObservation
9
+ __all__ = [
10
+ "IncidentopsAction",
11
+ "IncidentopsObservation",
12
+ "IncidentopsEnv",
13
+ ]
14
+
15
+
__pycache__/client.cpython-313.pyc ADDED
Binary file (3.11 kB). View file
 
__pycache__/models.cpython-313.pyc ADDED
Binary file (2.82 kB). View file
 
client.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+
3
+ # All rights reserved.
4
+
5
+ #
6
+
7
+ # This source code is licensed under the BSD-style license found in the
8
+
9
+ # LICENSE file in the root directory of this source tree.
10
+
11
+ """Incidentops Env Environment Client."""
12
+ from __future__ import annotations
13
+ from typing import Dict
14
+ from openenv.core import EnvClient
15
+ from openenv.core.client_types import StepResult
16
+ from openenv.core.env_server.types import State
17
+ from models import IncidentopsAction, IncidentopsObservation
18
+
19
+ class IncidentopsEnv(EnvClient[IncidentopsAction, IncidentopsObservation, State]):
20
+ def _step_payload(self, action: IncidentopsAction) -> Dict:
21
+ return {"action": action.action}
22
+
23
+ def _parse_result(self, payload: Dict) -> StepResult[IncidentopsObservation]:
24
+ obs_data = payload.get("observation", {})
25
+ observation = IncidentopsObservation(
26
+ alert_summary=obs_data.get("alert_summary", ""),
27
+ severity=obs_data.get("severity", "low"),
28
+ likely_cause=obs_data.get("likely_cause", "unknown"),
29
+ hf_confidence=obs_data.get("hf_confidence", 0.0),
30
+ services_affected=obs_data.get("services_affected", []),
31
+ logs_available=obs_data.get("logs_available", False),
32
+ log_snippet=obs_data.get("log_snippet", ""),
33
+ service_healthy=obs_data.get("service_healthy", False),
34
+ elapsed_steps=obs_data.get("elapsed_steps", 0),
35
+ sla_steps_remaining=obs_data.get("sla_steps_remaining", 0),
36
+ action_history=obs_data.get("action_history", []),
37
+ available_actions=obs_data.get("available_actions", []),
38
+ incident_resolved=obs_data.get("incident_resolved", False),
39
+ wrong_escalations=obs_data.get("wrong_escalations", 0),
40
+ metadata=obs_data.get("metadata", {}),
41
+ reward=payload.get("reward", 0.0),
42
+ done=payload.get("done", False),
43
+ )
44
+ return StepResult(
45
+ observation=observation,
46
+ reward=payload.get("reward", 0.0),
47
+ done=payload.get("done", False),
48
+ )
49
+
50
+ def _parse_state(self, payload: Dict) -> State:
51
+ return State(
52
+ episode_id=payload.get("episode_id"),
53
+ step_count=payload.get("step_count", 0),
54
+ )
55
+
inference.py ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from dotenv import load_dotenv
3
+ import os
4
+
5
+ load_dotenv()
6
+ import asyncio
7
+ import json
8
+ import os
9
+ from typing import List, Optional
10
+
11
+ from openai import OpenAI
12
+
13
+ from client import IncidentopsEnv
14
+ from models import IncidentopsAction
15
+
16
+ API_KEY = os.getenv("HF_TOKEN") or os.getenv("API_KEY") or os.getenv("OPENAI_API_KEY")
17
+ API_BASE_URL = os.getenv("API_BASE_URL", "https://router.huggingface.co/v1")
18
+ MODEL_NAME = os.getenv("MODEL_NAME", "Qwen/Qwen2.5-72B-Instruct")
19
+ TASK_NAME = os.getenv("INCIDENTOPS_TASK", "incidentops")
20
+ BENCHMARK = os.getenv("INCIDENTOPS_BENCHMARK", "incidentops_env")
21
+ MAX_STEPS = int(os.getenv("MAX_STEPS", "12"))
22
+ TEMPERATURE = float(os.getenv("TEMPERATURE", "0.2"))
23
+ ENV_URL = os.getenv("ENV_URL", "http://localhost:8000")
24
+ DIFFICULTY = os.getenv("DIFFICULTY", "easy")
25
+
26
+ SYSTEM_PROMPT = """
27
+ You are an incident-response policy.
28
+ Choose exactly one action from the environment's available actions.
29
+ Prefer investigation when confidence is low.
30
+ Prefer mitigation or escalation when evidence points to a cause.
31
+ Return only the action string.
32
+ """.strip()
33
+
34
+
35
+ def log_start(task: str, env: str, model: str) -> None:
36
+ print(f"[START] task={task} env={env} model={model}", flush=True)
37
+
38
+
39
+ def log_step(step: int, action: str, reward: float, done: bool, error: Optional[str]) -> None:
40
+ print(
41
+ f"[STEP] step={step} action={action} reward={reward:.2f} done={str(done).lower()} error={error if error else 'null'}",
42
+ flush=True,
43
+ )
44
+
45
+
46
+ def log_end(success: bool, steps: int, score: float, rewards: List[float]) -> None:
47
+ rewards_str = ",".join(f"{r:.2f}" for r in rewards)
48
+ print(
49
+ f"[END] success={str(success).lower()} steps={steps} score={score:.2f} rewards={rewards_str}",
50
+ flush=True,
51
+ )
52
+
53
+
54
+ def choose_action(client: OpenAI, obs) -> str:
55
+ available = obs.available_actions or []
56
+ if not available:
57
+ return "resolve_incident"
58
+
59
+ prompt = {
60
+ "alert_summary": obs.alert_summary,
61
+ "severity": obs.severity,
62
+ "likely_cause": obs.likely_cause,
63
+ "hf_confidence": obs.hf_confidence,
64
+ "logs_available": obs.logs_available,
65
+ "log_snippet": obs.log_snippet,
66
+ "services_affected": obs.services_affected,
67
+ "elapsed_steps": obs.elapsed_steps,
68
+ "sla_steps_remaining": obs.sla_steps_remaining,
69
+ "action_history": obs.action_history,
70
+ "available_actions": available,
71
+ }
72
+
73
+ response = client.chat.completions.create(
74
+ model=MODEL_NAME,
75
+ messages=[
76
+ {"role": "system", "content": SYSTEM_PROMPT},
77
+ {"role": "user", "content": json.dumps(prompt)},
78
+ ],
79
+ temperature=TEMPERATURE,
80
+ max_tokens=20,
81
+ )
82
+ text = (response.choices[0].message.content or "").strip().splitlines()[0].strip()
83
+
84
+ if text in available:
85
+ return text
86
+
87
+ # fallback heuristics
88
+ if not obs.logs_available and "request_logs" in available:
89
+ return "request_logs"
90
+ if obs.likely_cause == "dns_issue" and "query_dns_status" in available:
91
+ return "query_dns_status"
92
+ if obs.likely_cause == "dependency_issue" and "query_dependencies" in available:
93
+ return "query_dependencies"
94
+ if obs.hf_confidence < 0.7 and "query_region_health" in available:
95
+ return "query_region_health"
96
+ if "resolve_incident" in available and (obs.service_healthy or obs.incident_resolved):
97
+ return "resolve_incident"
98
+ return available[0]
99
+
100
+
101
+ async def main() -> None:
102
+ if not API_KEY:
103
+ raise RuntimeError("Missing HF_TOKEN/API_KEY/OPENAI_API_KEY")
104
+
105
+ client = OpenAI(base_url=API_BASE_URL, api_key=API_KEY)
106
+ env = await IncidentopsEnv.from_docker_image(os.getenv("IMAGE_NAME")) if os.getenv("IMAGE_NAME") else IncidentopsEnv(base_url=ENV_URL)
107
+
108
+ rewards: List[float] = []
109
+ steps_taken = 0
110
+ success = False
111
+ score = 0.0
112
+
113
+ log_start(TASK_NAME, BENCHMARK, MODEL_NAME)
114
+
115
+ try:
116
+ result = await env.reset(difficulty=DIFFICULTY)
117
+ obs = result.observation
118
+
119
+ for step in range(1, MAX_STEPS + 1):
120
+ if result.done:
121
+ break
122
+
123
+ action_name = choose_action(client, obs)
124
+ result = await env.step(IncidentopsAction(action=action_name))
125
+ obs = result.observation
126
+ reward = float(result.reward or 0.0)
127
+ done = bool(result.done)
128
+
129
+ rewards.append(reward)
130
+ steps_taken = step
131
+ log_step(step, action_name, reward, done, None)
132
+
133
+ if done:
134
+ break
135
+
136
+ total_reward = sum(rewards)
137
+ score = max(0.0, min(1.0, total_reward / 5.0))
138
+ success = bool(obs.incident_resolved) and score >= 0.1
139
+
140
+ finally:
141
+ try:
142
+ await env.close()
143
+ except Exception:
144
+ pass
145
+ log_end(success, steps_taken, score, rewards)
146
+
147
+
148
+ if __name__ == "__main__":
149
+ asyncio.run(main())
models.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+
8
+
9
+ """
10
+ Data models for the Incidentops Env Environment.
11
+ The incidentops_env environment is a simple test environment that echoes back messages.
12
+ """
13
+
14
+ from __future__ import annotations
15
+ from typing import Any, Dict, List, Optional
16
+ from openenv.core.env_server.types import Action, Observation
17
+ from pydantic import Field
18
+
19
+ class IncidentopsAction(Action):
20
+ action: str = Field(..., description="Incident response action to execute")
21
+
22
+ class IncidentopsObservation(Observation):
23
+ alert_summary: str = Field(default="", description="Human-readable incident summary")
24
+ severity: str = Field(default="low", description="Incident severity")
25
+ likely_cause: str = Field(default="unknown", description="Current hypothesis for the root cause")
26
+ hf_confidence: float = Field(default=0.0, description="Confidence score from the parsing model")
27
+ services_affected: List[str] = Field(default_factory=list, description="Affected services")
28
+ logs_available: bool = Field(default=False, description="Whether logs are available")
29
+ log_snippet: str = Field(default="", description="Short evidence snippet")
30
+ service_healthy: bool = Field(default=False, description="Whether service is healthy")
31
+ elapsed_steps: int = Field(default=0, description="Steps since reset")
32
+ sla_steps_remaining: int = Field(default=0, description="Steps remaining before SLA breach")
33
+ action_history: List[str] = Field(default_factory=list, description="Actions taken so far")
34
+ available_actions: List[str] = Field(default_factory=list, description="Available actions")
35
+ incident_resolved: bool = Field(default=False, description="Whether the incident is resolved")
36
+ wrong_escalations: int = Field(default=0, description="Count of wrong team escalations")
37
+ metadata: Dict[str, Any] = Field(default_factory=dict, description="Extra debug metadata")
38
+ reward: float = Field(default=0.0, description="Reward returned by the last step")
39
+ done: bool = Field(default=False, description="Whether the episode is finished")
40
+
41
+
42
+
openenv.yaml ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ spec_version: 1
2
+ name: incidentops_env
3
+ type: space
4
+ runtime: fastapi
5
+ app: server.app:app
6
+ port: 8000
7
+
openenv_incidentops_env.egg-info/PKG-INFO ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.4
2
+ Name: openenv-incidentops_env
3
+ Version: 0.1.0
4
+ Summary: Incidentops Env environment for OpenEnv
5
+ Requires-Python: >=3.10
6
+ Requires-Dist: openenv-core[core]>=0.2.2
7
+ Provides-Extra: dev
8
+ Requires-Dist: pytest>=8.0.0; extra == "dev"
9
+ Requires-Dist: pytest-cov>=4.0.0; extra == "dev"
openenv_incidentops_env.egg-info/SOURCES.txt ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ README.md
2
+ pyproject.toml
3
+ ./__init__.py
4
+ ./client.py
5
+ ./inference.py
6
+ ./models.py
7
+ openenv_incidentops_env.egg-info/PKG-INFO
8
+ openenv_incidentops_env.egg-info/SOURCES.txt
9
+ openenv_incidentops_env.egg-info/dependency_links.txt
10
+ openenv_incidentops_env.egg-info/entry_points.txt
11
+ openenv_incidentops_env.egg-info/requires.txt
12
+ openenv_incidentops_env.egg-info/top_level.txt
13
+ server/__init__.py
14
+ server/app.py
15
+ server/incidentops_env_environment.py
openenv_incidentops_env.egg-info/dependency_links.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
openenv_incidentops_env.egg-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [console_scripts]
2
+ server = incidentops_env.server.app:main
openenv_incidentops_env.egg-info/requires.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ openenv-core[core]>=0.2.2
2
+
3
+ [dev]
4
+ pytest>=8.0.0
5
+ pytest-cov>=4.0.0
openenv_incidentops_env.egg-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ incidentops_env
pyproject.toml ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+ [build-system]
7
+ requires = ["setuptools>=45", "wheel"]
8
+ build-backend = "setuptools.build_meta"
9
+
10
+ [project]
11
+ name = "openenv-incidentops_env"
12
+ version = "0.1.0"
13
+ description = "Incidentops Env environment for OpenEnv"
14
+ requires-python = ">=3.10"
15
+ dependencies = [
16
+ # Core OpenEnv runtime (provides FastAPI server + HTTP client types)
17
+ # install from github
18
+ # "openenv-core[core] @ git+https://github.com/meta-pytorch/OpenEnv.git",
19
+ "openenv-core[core]>=0.2.2",
20
+ # Environment-specific dependencies
21
+ # Add all dependencies needed for your environment here
22
+ # Examples:
23
+ # "numpy>=1.19.0",
24
+ # "torch>=2.0.0",
25
+ # "gymnasium>=0.29.0",
26
+ # "openspiel>=1.0.0",
27
+ # "smolagents>=1.22.0,<2",
28
+ ]
29
+
30
+ [project.optional-dependencies]
31
+ dev = [
32
+ "pytest>=8.0.0",
33
+ "pytest-cov>=4.0.0",
34
+ ]
35
+
36
+ [project.scripts]
37
+ # Server entry point - enables running via: uv run --project . server
38
+ # or: python -m incidentops_env.server.app
39
+ server = "incidentops_env.server.app:main"
40
+
41
+ [tool.setuptools]
42
+ include-package-data = true
43
+ packages = ["incidentops_env", "incidentops_env.server"]
44
+ package-dir = { "incidentops_env" = ".", "incidentops_env.server" = "server" }
server/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """Incidentops Env environment server components."""
8
+
9
+ from .incidentops_env_environment import IncidentopsEnvironment
10
+
11
+ __all__ = ["IncidentopsEnvironment"]
server/app.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+
3
+ # All rights reserved.
4
+
5
+ #
6
+
7
+ # This source code is licensed under the BSD-style license found in the
8
+
9
+ # LICENSE file in the root directory of this source tree.
10
+
11
+
12
+
13
+ """
14
+ FastAPI application for the Incidentops Env Environment.
15
+ This module creates an HTTP server that exposes the IncidentopsEnvironment
16
+ over HTTP and WebSocket endpoints, compatible with EnvClient.
17
+ Endpoints:
18
+ - POST /reset: Reset the environment
19
+ - POST /step: Execute an action
20
+ - GET /state: Get current environment state
21
+ - GET /schema: Get action/observation schemas
22
+ - WS /ws: WebSocket endpoint for persistent sessions
23
+
24
+ Usage:
25
+ # Development (with auto-reload):
26
+ uvicorn server.app:app --reload --host 0.0.0.0 --port 8000
27
+
28
+ # Production:
29
+ uvicorn server.app:app --host 0.0.0.0 --port 8000 --workers 4
30
+
31
+ # Or run directly:
32
+ python -m server.app
33
+ """
34
+ from __future__ import annotations
35
+ from openenv.core.env_server.http_server import create_app
36
+ try:
37
+ from ..models import IncidentopsAction, IncidentopsObservation
38
+ from incidentops_env_environment import IncidentopsEnvironment
39
+ except Exception:
40
+ from models import IncidentopsAction, IncidentopsObservation
41
+ from server.incidentops_env_environment import IncidentopsEnvironment
42
+
43
+ app = create_app(
44
+ IncidentopsEnvironment,
45
+ IncidentopsAction,
46
+ IncidentopsObservation,
47
+ env_name="incidentops_env",
48
+ max_concurrent_envs=1,
49
+ )
50
+
51
+ def main(host: str = "0.0.0.0", port: int = 7860) -> None:
52
+ import uvicorn
53
+ uvicorn.run(app, host=host, port=port)
54
+
55
+ if __name__ == "__main__":
56
+ main()
server/incidentops_env_environment.py ADDED
@@ -0,0 +1,282 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # All rights reserved.
3
+ #
4
+ # This source code is licensed under the BSD-style license found in the
5
+ # LICENSE file in the root directory of this source tree.
6
+
7
+ """
8
+ Incidentops Env Environment Implementation.
9
+
10
+ A simple test environment that echoes back messages sent to it.
11
+ Perfect for testing HTTP server infrastructure.
12
+ """
13
+
14
+ from __future__ import annotations
15
+
16
+ from dataclasses import dataclass, field
17
+ from typing import Any, Dict, List, Optional
18
+ from uuid import uuid4
19
+
20
+ from openenv.core.env_server.interfaces import Environment
21
+ from openenv.core.env_server.types import State
22
+
23
+ try:
24
+ from ..models import IncidentopsAction, IncidentopsObservation
25
+ except Exception:
26
+ from models import IncidentopsAction, IncidentopsObservation
27
+
28
+
29
+ @dataclass
30
+ class IncidentSnapshot:
31
+ scenario_id: str
32
+ task: str
33
+ alert_text: str
34
+ hidden_truth: str
35
+ severity: str
36
+ affected_services: List[str]
37
+ logs_available: bool
38
+ log_snippet: str
39
+ likely_cause: str
40
+ hf_confidence: float
41
+ available_actions: List[str]
42
+ correct_action_sequence: List[str]
43
+ sla_steps: int
44
+ step_count: int = 0
45
+ resolved: bool = False
46
+ wrong_escalations: int = 0
47
+ action_history: List[str] = field(default_factory=list)
48
+ evidence_collected: bool = False
49
+ team_engaged: Optional[str] = None
50
+
51
+
52
+ SCENARIOS: Dict[str, List[Dict[str, Any]]] = {
53
+ "easy": [
54
+ {
55
+ "scenario_id": "easy_001",
56
+ "task": "single_service_outage",
57
+ "alert_text": "SEV-2: payment-service latency high after deploy.",
58
+ "hidden_truth": "bad_deployment",
59
+ "severity": "high",
60
+ "affected_services": ["payment-service"],
61
+ "logs_available": True,
62
+ "log_snippet": "deploy at 14:32 UTC caused connection pool exhaustion",
63
+ "likely_cause": "bad_deployment",
64
+ "hf_confidence": 0.92,
65
+ "available_actions": ["request_logs", "rollback_deploy", "restart_service", "resolve_incident"],
66
+ "correct_action_sequence": ["rollback_deploy", "resolve_incident"],
67
+ "sla_steps": 5,
68
+ }
69
+ ],
70
+ "medium": [
71
+ {
72
+ "scenario_id": "medium_001",
73
+ "task": "dependency_failure",
74
+ "alert_text": "SEV-1: api-gateway 5xx errors; user-profile-service slow; no logs available.",
75
+ "hidden_truth": "db_timeout",
76
+ "severity": "critical",
77
+ "affected_services": ["api-gateway", "user-profile-service"],
78
+ "logs_available": False,
79
+ "log_snippet": "DB timeout errors from checkout reads",
80
+ "likely_cause": "dependency_issue",
81
+ "hf_confidence": 0.72,
82
+ "available_actions": [
83
+ "request_logs",
84
+ "query_dependencies",
85
+ "escalate_db_team",
86
+ "escalate_network_team",
87
+ "restart_service",
88
+ "resolve_incident",
89
+ ],
90
+ "correct_action_sequence": ["request_logs", "query_dependencies", "escalate_db_team", "restart_service", "resolve_incident"],
91
+ "sla_steps": 8,
92
+ }
93
+ ],
94
+ "hard": [
95
+ {
96
+ "scenario_id": "hard_001",
97
+ "task": "multi_service_root_cause",
98
+ "alert_text": "SEV-1: EU checkout failures. Auth and payment degraded. Logs incomplete.",
99
+ "hidden_truth": "dns_issue",
100
+ "severity": "critical",
101
+ "affected_services": ["auth-service", "payment-service", "checkout-service"],
102
+ "logs_available": False,
103
+ "log_snippet": "DNS query failures in EU region resolver",
104
+ "likely_cause": "ambiguous",
105
+ "hf_confidence": 0.55,
106
+ "available_actions": [
107
+ "request_logs",
108
+ "query_dns_status",
109
+ "query_region_health",
110
+ "rollback_deploy",
111
+ "restart_service",
112
+ "escalate_network_team",
113
+ "escalate_db_team",
114
+ "broadcast_status_page",
115
+ "resolve_incident",
116
+ ],
117
+ "correct_action_sequence": [
118
+ "query_region_health",
119
+ "query_dns_status",
120
+ "escalate_network_team",
121
+ "broadcast_status_page",
122
+ "resolve_incident",
123
+ ],
124
+ "sla_steps": 12,
125
+ }
126
+ ],
127
+ }
128
+
129
+
130
+ class IncidentopsEnvironment(Environment):
131
+ SUPPORTS_CONCURRENT_SESSIONS: bool = True
132
+
133
+ def __init__(self):
134
+ self._state = State(episode_id=str(uuid4()), step_count=0)
135
+ self._snapshot: Optional[IncidentSnapshot] = None
136
+ self._difficulty = "easy"
137
+ self._last_observation: Optional[IncidentopsObservation] = None
138
+
139
+ def _pick_scenario(self, difficulty: str) -> Dict[str, Any]:
140
+ scenarios = SCENARIOS.get(difficulty, SCENARIOS["easy"])
141
+ return scenarios[0]
142
+
143
+ def _build_observation(self) -> IncidentopsObservation:
144
+ assert self._snapshot is not None
145
+ remaining = max(self._snapshot.sla_steps - self._snapshot.step_count, 0)
146
+ return IncidentopsObservation(
147
+ alert_summary=self._snapshot.alert_text,
148
+ severity=self._snapshot.severity,
149
+ likely_cause=self._snapshot.likely_cause,
150
+ hf_confidence=self._snapshot.hf_confidence,
151
+ services_affected=self._snapshot.affected_services,
152
+ logs_available=self._snapshot.logs_available,
153
+ log_snippet=self._snapshot.log_snippet if self._snapshot.logs_available else "",
154
+ service_healthy=self._snapshot.resolved,
155
+ elapsed_steps=self._snapshot.step_count,
156
+ sla_steps_remaining=remaining,
157
+ action_history=list(self._snapshot.action_history),
158
+ available_actions=self._snapshot.available_actions,
159
+ incident_resolved=self._snapshot.resolved,
160
+ wrong_escalations=self._snapshot.wrong_escalations,
161
+ metadata={
162
+ "scenario_id": self._snapshot.scenario_id,
163
+ "task": self._snapshot.task,
164
+ "hidden_truth": self._snapshot.hidden_truth,
165
+ "team_engaged": self._snapshot.team_engaged,
166
+ "evidence_collected": self._snapshot.evidence_collected,
167
+ },
168
+ reward=0.0,
169
+ done=self._snapshot.resolved,
170
+ )
171
+
172
+ def _calc_reward(self, action: str) -> float:
173
+ assert self._snapshot is not None
174
+ s = self._snapshot
175
+
176
+ reward = -0.05 # small step cost
177
+
178
+ if s.action_history.count(action) > 1:
179
+ reward -= 0.2
180
+
181
+ if action == "request_logs" and not s.logs_available:
182
+ reward += 0.3
183
+ s.logs_available = True
184
+ s.evidence_collected = True
185
+
186
+ if action == "query_dependencies" and s.hidden_truth == "db_timeout":
187
+ reward += 0.5
188
+ s.likely_cause = "db_timeout"
189
+ s.hf_confidence = min(0.95, s.hf_confidence + 0.15)
190
+ s.evidence_collected = True
191
+
192
+ if action == "query_dns_status" and s.hidden_truth == "dns_issue":
193
+ reward += 0.5
194
+ s.likely_cause = "dns_issue"
195
+ s.hf_confidence = min(0.95, s.hf_confidence + 0.20)
196
+ s.evidence_collected = True
197
+
198
+ if action == "query_region_health" and s.hidden_truth == "dns_issue":
199
+ reward += 0.4
200
+ s.hf_confidence = min(0.95, s.hf_confidence + 0.10)
201
+
202
+ if action == "rollback_deploy" and s.hidden_truth == "bad_deployment":
203
+ reward += 1.0
204
+ s.resolved = True
205
+ elif action == "rollback_deploy":
206
+ reward -= 0.8
207
+
208
+ if action == "escalate_db_team" and s.hidden_truth == "db_timeout":
209
+ reward += 0.7
210
+ s.team_engaged = "db_team"
211
+ elif action == "escalate_db_team":
212
+ reward -= 0.5
213
+ s.wrong_escalations += 1
214
+
215
+ if action == "escalate_network_team" and s.hidden_truth == "dns_issue":
216
+ reward += 0.7
217
+ s.team_engaged = "network_team"
218
+ elif action == "escalate_network_team":
219
+ reward -= 0.5
220
+ s.wrong_escalations += 1
221
+
222
+ if action == "broadcast_status_page":
223
+ reward += 0.2 if s.step_count <= 2 else 0.05
224
+
225
+ if action == "restart_service" and s.hidden_truth in {"bad_deployment", "db_timeout"}:
226
+ reward += 0.8
227
+ elif action == "restart_service":
228
+ reward -= 0.2
229
+
230
+ if action == "resolve_incident":
231
+ if s.resolved or s.hidden_truth in {"bad_deployment", "db_timeout", "dns_issue"}:
232
+ if s.step_count <= s.sla_steps and (s.evidence_collected or s.team_engaged is not None or s.hidden_truth == "bad_deployment"):
233
+ reward += 1.5
234
+ s.resolved = True
235
+ else:
236
+ reward -= 2.0
237
+ else:
238
+ reward -= 1.0
239
+
240
+ if s.step_count > s.sla_steps:
241
+ reward -= 0.5
242
+
243
+ return reward
244
+
245
+ def reset(self, difficulty: str = "easy") -> IncidentopsObservation:
246
+ scenario = self._pick_scenario(difficulty)
247
+ self._difficulty = difficulty
248
+ self._state = State(episode_id=str(uuid4()), step_count=0)
249
+
250
+ self._snapshot = IncidentSnapshot(**scenario)
251
+
252
+ # ✅ FORCE CLEAN (important)
253
+ self._snapshot.action_history = []
254
+
255
+ self._last_observation = self._build_observation()
256
+ return self._last_observation
257
+
258
+ def step(self, action: IncidentopsAction) -> IncidentopsObservation: # type: ignore[override]
259
+ assert self._snapshot is not None
260
+ self._snapshot.step_count += 1
261
+ self._state.step_count = self._snapshot.step_count
262
+
263
+ action_name = action.action
264
+ self._snapshot.action_history.append(action_name)
265
+
266
+ reward = self._calc_reward(action_name)
267
+ done = self._snapshot.resolved or self._snapshot.step_count >= self._snapshot.sla_steps
268
+
269
+ obs = self._build_observation()
270
+ obs.reward = reward
271
+ obs.done = done
272
+ obs.metadata = {
273
+ **(obs.metadata or {}),
274
+ "last_action": action_name,
275
+ "last_reward": reward,
276
+ }
277
+ self._last_observation = obs
278
+ return obs
279
+
280
+ @property
281
+ def state(self) -> State:
282
+ return self._state
server/requirements.txt ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ openenv[core]>=0.2.0
2
+ fastapi>=0.115.0
3
+ uvicorn>=0.24.0
4
+
5
+
6
+
7
+
8
+
9
+
10
+
11
+
12
+
13
+
14
+
uv.lock ADDED
The diff for this file is too large to render. See raw diff