Spaces:
Sleeping
Sleeping
siddeshwar-kagatikar commited on
Commit ·
fde79db
1
Parent(s): 9af411f
fixed context
Browse files
src/osint_env/baselines/openai_runner.py
CHANGED
|
@@ -232,6 +232,13 @@ class OpenAIBaselineRunner:
|
|
| 232 |
def _is_gpt5_family(model: str) -> bool:
|
| 233 |
return str(model).strip().lower().startswith("gpt-5")
|
| 234 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 235 |
def _request_kwargs(self, messages: list[dict[str, Any]], episode_index: int) -> dict[str, Any]:
|
| 236 |
kwargs: dict[str, Any] = {
|
| 237 |
"model": self.config.model,
|
|
@@ -247,7 +254,8 @@ class OpenAIBaselineRunner:
|
|
| 247 |
if self._is_gpt5_family(self.config.model):
|
| 248 |
# GPT-5 family chat-completions compatibility:
|
| 249 |
# use max_completion_tokens and avoid temperature for older GPT-5 models.
|
| 250 |
-
|
|
|
|
| 251 |
else:
|
| 252 |
kwargs["temperature"] = self.config.temperature
|
| 253 |
|
|
|
|
| 232 |
def _is_gpt5_family(model: str) -> bool:
|
| 233 |
return str(model).strip().lower().startswith("gpt-5")
|
| 234 |
|
| 235 |
+
@staticmethod
|
| 236 |
+
def _supports_reasoning_effort_in_chat_completions(model: str) -> bool:
|
| 237 |
+
model_name = str(model).strip().lower()
|
| 238 |
+
if model_name.startswith("gpt-5.4-mini"):
|
| 239 |
+
return False
|
| 240 |
+
return model_name.startswith("gpt-5")
|
| 241 |
+
|
| 242 |
def _request_kwargs(self, messages: list[dict[str, Any]], episode_index: int) -> dict[str, Any]:
|
| 243 |
kwargs: dict[str, Any] = {
|
| 244 |
"model": self.config.model,
|
|
|
|
| 254 |
if self._is_gpt5_family(self.config.model):
|
| 255 |
# GPT-5 family chat-completions compatibility:
|
| 256 |
# use max_completion_tokens and avoid temperature for older GPT-5 models.
|
| 257 |
+
if self._supports_reasoning_effort_in_chat_completions(self.config.model):
|
| 258 |
+
kwargs["reasoning_effort"] = "none"
|
| 259 |
else:
|
| 260 |
kwargs["temperature"] = self.config.temperature
|
| 261 |
|
tests/test_openai_baseline.py
CHANGED
|
@@ -18,3 +18,13 @@ def test_gpt5_request_kwargs_avoid_temperature_and_use_max_completion_tokens():
|
|
| 18 |
assert kwargs["max_completion_tokens"] == 321
|
| 19 |
assert kwargs["reasoning_effort"] == "none"
|
| 20 |
assert "temperature" not in kwargs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
assert kwargs["max_completion_tokens"] == 321
|
| 19 |
assert kwargs["reasoning_effort"] == "none"
|
| 20 |
assert "temperature" not in kwargs
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
def test_gpt54_mini_request_kwargs_skip_reasoning_effort_for_chat_completions():
|
| 24 |
+
runner = OpenAIBaselineRunner.__new__(OpenAIBaselineRunner)
|
| 25 |
+
runner.config = OpenAIBaselineConfig(model="gpt-5.4-mini", max_tokens=321, temperature=0.0, seed=7)
|
| 26 |
+
runner.tools = build_action_tools()
|
| 27 |
+
kwargs = runner._request_kwargs(messages=[{"role": "user", "content": "hi"}], episode_index=0)
|
| 28 |
+
assert kwargs["max_completion_tokens"] == 321
|
| 29 |
+
assert "reasoning_effort" not in kwargs
|
| 30 |
+
assert "temperature" not in kwargs
|