YUS200619 commited on
Commit
d9073dc
·
verified ·
1 Parent(s): 2d2d9f8

Upload app.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. app.py +29 -2
app.py CHANGED
@@ -1,6 +1,9 @@
1
  from __future__ import annotations
2
 
3
  import os
 
 
 
4
  from typing import Dict, List, Tuple
5
 
6
  import gradio as gr
@@ -8,6 +11,11 @@ import gradio as gr
8
  from environment.env import WorkLifeFirewallEnv
9
 
10
 
 
 
 
 
 
11
  def _action_for_policy(policy_style: str, event_id: str) -> str:
12
  strategic_actions = {
13
  "E1_staging": "I will fix staging first, post an incident update in 15 minutes, and share ETA.",
@@ -66,8 +74,25 @@ def _run_single_episode(policy_style: str, seed: int, randomize_order: bool) ->
66
  return "\n".join(logs), state, components
67
 
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  def run_episode(policy_style: str, seed: int, randomize_order: bool):
70
- logs, state, components = _run_single_episode(policy_style, seed, randomize_order)
 
71
  summary = (
72
  "### Outcome\n"
73
  f"- Friday energy: **{state['energy_pct']}%**\n"
@@ -81,9 +106,10 @@ def run_episode(policy_style: str, seed: int, randomize_order: bool):
81
 
82
 
83
  def compare_policies(seed: int, randomize_order: bool):
 
84
  rows = []
85
  for policy in ["strategic", "balanced", "people_pleaser"]:
86
- _, state, components = _run_single_episode(policy, seed, randomize_order)
87
  rows.append([
88
  policy,
89
  state["energy_pct"],
@@ -167,6 +193,7 @@ with gr.Blocks(title="Work-Life Firewall") as demo:
167
 
168
  if __name__ == "__main__":
169
  on_hugging_face_space = bool(os.getenv("SPACE_ID"))
 
170
  demo.launch(
171
  server_name="0.0.0.0",
172
  server_port=int(os.getenv("PORT", "7860")),
 
1
  from __future__ import annotations
2
 
3
  import os
4
+ import time
5
+ from functools import lru_cache
6
+ from threading import Lock
7
  from typing import Dict, List, Tuple
8
 
9
  import gradio as gr
 
11
  from environment.env import WorkLifeFirewallEnv
12
 
13
 
14
+ MIN_EVENT_INTERVAL_SECONDS = float(os.getenv("MIN_EVENT_INTERVAL_SECONDS", "0.75"))
15
+ _RATE_LIMIT_LOCK = Lock()
16
+ _LAST_EVENT_TS = 0.0
17
+
18
+
19
  def _action_for_policy(policy_style: str, event_id: str) -> str:
20
  strategic_actions = {
21
  "E1_staging": "I will fix staging first, post an incident update in 15 minutes, and share ETA.",
 
74
  return "\n".join(logs), state, components
75
 
76
 
77
+ def _throttle_event_requests() -> None:
78
+ global _LAST_EVENT_TS
79
+ with _RATE_LIMIT_LOCK:
80
+ now = time.monotonic()
81
+ remaining = MIN_EVENT_INTERVAL_SECONDS - (now - _LAST_EVENT_TS)
82
+ if remaining > 0:
83
+ time.sleep(remaining)
84
+ _LAST_EVENT_TS = time.monotonic()
85
+
86
+
87
+ @lru_cache(maxsize=256)
88
+ def _cached_episode(policy_style: str, seed: int, randomize_order: bool) -> Tuple[str, Dict[str, object], Dict[str, float]]:
89
+ # Cache deterministic episodes so repeated button clicks do not trigger repeated backend work.
90
+ return _run_single_episode(policy_style, seed, randomize_order)
91
+
92
+
93
  def run_episode(policy_style: str, seed: int, randomize_order: bool):
94
+ _throttle_event_requests()
95
+ logs, state, components = _cached_episode(policy_style, seed, randomize_order)
96
  summary = (
97
  "### Outcome\n"
98
  f"- Friday energy: **{state['energy_pct']}%**\n"
 
106
 
107
 
108
  def compare_policies(seed: int, randomize_order: bool):
109
+ _throttle_event_requests()
110
  rows = []
111
  for policy in ["strategic", "balanced", "people_pleaser"]:
112
+ _, state, components = _cached_episode(policy, seed, randomize_order)
113
  rows.append([
114
  policy,
115
  state["energy_pct"],
 
193
 
194
  if __name__ == "__main__":
195
  on_hugging_face_space = bool(os.getenv("SPACE_ID"))
196
+ demo.queue(default_concurrency_limit=1, max_size=32)
197
  demo.launch(
198
  server_name="0.0.0.0",
199
  server_port=int(os.getenv("PORT", "7860")),