Draken1606 commited on
Commit
a8ffe4c
·
1 Parent(s): f776f88

fix: emit strict score in END output and add scorer variance guard test

Browse files
Files changed (3) hide show
  1. README.md +2 -2
  2. inference.py +4 -3
  3. tests/test_openenv_env.py +19 -0
README.md CHANGED
@@ -61,11 +61,11 @@ LLM mode is enabled by default in `inference.py` and requires:
61
 
62
  ```bash
63
  export API_BASE_URL="https://api.openai.com/v1" # or validator-provided proxy URL
64
- export OPENAI_API_KEY="your-validator-provided-token"
65
  ```
66
 
67
  `MODEL_NAME` is optional and defaults to `meta-llama/Llama-3.1-8B-Instruct`.
68
- For compatibility with different validator versions, `API_KEY` and `HF_TOKEN` are also accepted.
69
 
70
  To run greedy mode locally without LLM calls:
71
 
 
61
 
62
  ```bash
63
  export API_BASE_URL="https://api.openai.com/v1" # or validator-provided proxy URL
64
+ export HF_TOKEN="your-validator-provided-token"
65
  ```
66
 
67
  `MODEL_NAME` is optional and defaults to `meta-llama/Llama-3.1-8B-Instruct`.
68
+ `HF_TOKEN` is required by this script.
69
 
70
  To run greedy mode locally without LLM calls:
71
 
inference.py CHANGED
@@ -85,10 +85,10 @@ def log_step(step: int, action: str, reward: float, done: bool, error: Optional[
85
  )
86
 
87
 
88
- def log_end(success: bool, steps: int, rewards: List[float]) -> None:
89
  rewards_str = ','.join(f'{r:.2f}' for r in rewards)
90
  print(
91
- f'[END] success={str(success).lower()} steps={steps} rewards={rewards_str}',
92
  flush=True,
93
  )
94
 
@@ -250,7 +250,8 @@ async def run_episode(url: str, difficulty: str = 'medium', use_llm: bool = Fals
250
  print(f'[DEBUG] Episode error: {exc}', file=sys.stderr, flush=True)
251
 
252
  finally:
253
- log_end(success=success, steps=steps_taken, rewards=rewards)
 
254
 
255
  return score
256
 
 
85
  )
86
 
87
 
88
+ def log_end(success: bool, steps: int, score: float, rewards: List[float]) -> None:
89
  rewards_str = ','.join(f'{r:.2f}' for r in rewards)
90
  print(
91
+ f'[END] success={str(success).lower()} steps={steps} score={score:.3f} rewards={rewards_str}',
92
  flush=True,
93
  )
94
 
 
250
  print(f'[DEBUG] Episode error: {exc}', file=sys.stderr, flush=True)
251
 
252
  finally:
253
+ score = _strict_unit_interval(score, fallback=0.5)
254
+ log_end(success=success, steps=steps_taken, score=score, rewards=rewards)
255
 
256
  return score
257
 
tests/test_openenv_env.py CHANGED
@@ -58,6 +58,25 @@ def test_score_in_range():
58
  assert 0.0 < env.score() < 1.0
59
 
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  @pytest.mark.parametrize("difficulty", ["easy", "medium", "hard"])
62
  def test_full_episode_completes(difficulty):
63
  env = ContainerYardEnvironment()
 
58
  assert 0.0 < env.score() < 1.0
59
 
60
 
61
+ def test_score_varies_across_seeds():
62
+ scores = []
63
+ for seed in [1, 7, 13, 21, 42]:
64
+ env = ContainerYardEnvironment()
65
+ env.reset(difficulty="medium", seed=seed)
66
+ done = False
67
+ while not done:
68
+ stacks = as_dict(env._observe())["stack_states"]
69
+ chosen = next(
70
+ (i for i, stack in enumerate(stacks) if len(stack) < env.max_height), 0
71
+ )
72
+ obs = as_dict(env.step(ContainerAction(stack_index=chosen)))
73
+ done = obs["done"]
74
+ scores.append(env.score())
75
+
76
+ # Avoid disqualification: grader must not return a constant score.
77
+ assert len(set(scores)) > 1, f"Scores are constant across seeds: {scores}"
78
+
79
+
80
  @pytest.mark.parametrize("difficulty", ["easy", "medium", "hard"])
81
  def test_full_episode_completes(difficulty):
82
  env = ContainerYardEnvironment()