{"repo": "DLR-RM/stable-baselines3", "n_pairs": 167, "version": "v2_function_scoped", "contexts": {"tests/test_monitor.py::104": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["LoadMonitorResultsError", "Monitor", "get_monitor_files", "load_results", "os", "pytest", "uuid", "warnings"], "enclosing_function": "test_monitor_load_results", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\nclass LoadMonitorResultsError(Exception):\n \"\"\"\n Raised when loading the monitor log fails.\n \"\"\"\n\n pass\n\ndef get_monitor_files(path: str) -> list[str]:\n \"\"\"\n get all the monitor files in the given path\n\n :param path: the logging folder\n :return: the log files\n \"\"\"\n return glob(os.path.join(path, \"*\" + Monitor.EXT))\n\ndef load_results(path: str) -> pandas.DataFrame:\n \"\"\"\n Load all Monitor logs from a given directory path matching ``*monitor.csv``\n\n :param path: the directory path containing the log file(s)\n :return: the logged data\n \"\"\"\n monitor_files = get_monitor_files(path)\n if len(monitor_files) == 0:\n raise LoadMonitorResultsError(f\"No monitor files of the form *{Monitor.EXT} found in {path}\")\n data_frames, headers = [], []\n for file_name in monitor_files:\n with open(file_name) as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frames.append(data_frame)\n data_frames = [df for df in data_frames if not df.empty]\n if not data_frames:\n # Only empty monitor files, return empty df\n empty_df = pandas.DataFrame(columns=[\"r\", \"l\", \"t\"])\n # Create index to have the same columns\n empty_df.reset_index(inplace=True)\n return empty_df\n data_frame = pandas.concat(data_frames)\n data_frame.sort_values(\"t\", inplace=True)\n data_frame.reset_index(inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n return data_frame", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 6981}, "tests/test_logger.py::496": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["A2C", "DQN", "pytest"], "enclosing_function": "test_ep_buffers_stats_window_size", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 1181}, "tests/test_monitor.py::86": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["LoadMonitorResultsError", "Monitor", "get_monitor_files", "load_results", "os", "pytest", "uuid", "warnings"], "enclosing_function": "test_monitor_load_results", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\nclass LoadMonitorResultsError(Exception):\n \"\"\"\n Raised when loading the monitor log fails.\n \"\"\"\n\n pass\n\ndef get_monitor_files(path: str) -> list[str]:\n \"\"\"\n get all the monitor files in the given path\n\n :param path: the logging folder\n :return: the log files\n \"\"\"\n return glob(os.path.join(path, \"*\" + Monitor.EXT))\n\ndef load_results(path: str) -> pandas.DataFrame:\n \"\"\"\n Load all Monitor logs from a given directory path matching ``*monitor.csv``\n\n :param path: the directory path containing the log file(s)\n :return: the logged data\n \"\"\"\n monitor_files = get_monitor_files(path)\n if len(monitor_files) == 0:\n raise LoadMonitorResultsError(f\"No monitor files of the form *{Monitor.EXT} found in {path}\")\n data_frames, headers = [], []\n for file_name in monitor_files:\n with open(file_name) as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frames.append(data_frame)\n data_frames = [df for df in data_frames if not df.empty]\n if not data_frames:\n # Only empty monitor files, return empty df\n empty_df = pandas.DataFrame(columns=[\"r\", \"l\", \"t\"])\n # Create index to have the same columns\n empty_df.reset_index(inplace=True)\n return empty_df\n data_frame = pandas.concat(data_frames)\n data_frame.sort_values(\"t\", inplace=True)\n data_frame.reset_index(inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n return data_frame", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 6981}, "tests/test_monitor.py::139": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["LoadMonitorResultsError", "Monitor", "get_monitor_files", "load_results", "os", "pytest", "uuid", "warnings"], "enclosing_function": "test_monitor_load_results", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\nclass LoadMonitorResultsError(Exception):\n \"\"\"\n Raised when loading the monitor log fails.\n \"\"\"\n\n pass\n\ndef get_monitor_files(path: str) -> list[str]:\n \"\"\"\n get all the monitor files in the given path\n\n :param path: the logging folder\n :return: the log files\n \"\"\"\n return glob(os.path.join(path, \"*\" + Monitor.EXT))\n\ndef load_results(path: str) -> pandas.DataFrame:\n \"\"\"\n Load all Monitor logs from a given directory path matching ``*monitor.csv``\n\n :param path: the directory path containing the log file(s)\n :return: the logged data\n \"\"\"\n monitor_files = get_monitor_files(path)\n if len(monitor_files) == 0:\n raise LoadMonitorResultsError(f\"No monitor files of the form *{Monitor.EXT} found in {path}\")\n data_frames, headers = [], []\n for file_name in monitor_files:\n with open(file_name) as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frames.append(data_frame)\n data_frames = [df for df in data_frames if not df.empty]\n if not data_frames:\n # Only empty monitor files, return empty df\n empty_df = pandas.DataFrame(columns=[\"r\", \"l\", \"t\"])\n # Create index to have the same columns\n empty_df.reset_index(inplace=True)\n return empty_df\n data_frame = pandas.concat(data_frames)\n data_frame.sort_values(\"t\", inplace=True)\n data_frame.reset_index(inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n return data_frame", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 6981}, "tests/test_vec_normalize.py::318": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "test_get_original", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/test_callbacks.py::241": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DummyVecEnv", "EvalCallback", "VecNormalize", "pytest"], "enclosing_function": "test_eval_friendly_error", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/callbacks.py\nclass EvalCallback(EventCallback):\n \"\"\"\n Callback for evaluating an agent.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger\n when there is a new best model according to the ``mean_reward``\n :param callback_after_eval: Callback to trigger after every evaluation\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``)\n will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model\n according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about evaluation results\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been\n wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env | VecEnv,\n callback_on_new_best: BaseCallback | None = None,\n callback_after_eval: BaseCallback | None = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: str | None = None,\n best_model_save_path: str | None = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super().__init__(callback_after_eval, verbose=verbose)\n\n self.callback_on_new_best = callback_on_new_best\n if self.callback_on_new_best is not None:\n # Give access to the parent\n self.callback_on_new_best.parent = self\n\n self.n_eval_episodes = n_eval_episodes\n self.eval_freq = eval_freq\n self.best_mean_reward = -np.inf\n self.last_mean_reward = -np.inf\n self.deterministic = deterministic\n self.render = render\n self.warn = warn\n\n # Convert to VecEnv for consistency\n if not isinstance(eval_env, VecEnv):\n eval_env = DummyVecEnv([lambda: eval_env]) # type: ignore[list-item, return-value]\n\n self.eval_env = eval_env\n self.best_model_save_path = best_model_save_path\n # Logs will be written in ``evaluations.npz``\n if log_path is not None:\n log_path = os.path.join(log_path, \"evaluations\")\n self.log_path = log_path\n self.evaluations_results: list[list[float]] = []\n self.evaluations_timesteps: list[int] = []\n self.evaluations_length: list[list[int]] = []\n # For computing success rate\n self._is_success_buffer: list[bool] = []\n self.evaluations_successes: list[list[bool]] = []\n\n def _init_callback(self) -> None:\n # Does not work in some corner cases, where the wrapper is not the same\n if not isinstance(self.training_env, type(self.eval_env)):\n warnings.warn(\"Training and eval env are not of the same type\" f\"{self.training_env} != {self.eval_env}\")\n\n # Create folders if needed\n if self.best_model_save_path is not None:\n os.makedirs(self.best_model_save_path, exist_ok=True)\n if self.log_path is not None:\n os.makedirs(os.path.dirname(self.log_path), exist_ok=True)\n\n # Init callback called on new best model\n if self.callback_on_new_best is not None:\n self.callback_on_new_best.init_callback(self.model)\n\n def _log_success_callback(self, locals_: dict[str, Any], globals_: dict[str, Any]) -> None:\n \"\"\"\n Callback passed to the ``evaluate_policy`` function\n in order to log the success rate (when applicable),\n for instance when using HER.\n\n :param locals_:\n :param globals_:\n \"\"\"\n info = locals_[\"info\"]\n\n if locals_[\"done\"]:\n maybe_is_success = info.get(\"is_success\")\n if maybe_is_success is not None:\n self._is_success_buffer.append(maybe_is_success)\n\n def _on_step(self) -> bool:\n continue_training = True\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n if self.model.get_vec_normalize_env() is not None:\n try:\n sync_envs_normalization(self.training_env, self.eval_env)\n except AttributeError as e:\n raise AssertionError(\n \"Training and eval env are not wrapped the same way, \"\n \"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback \"\n \"and warning above.\"\n ) from e\n\n # Reset success rate buffer\n self._is_success_buffer = []\n\n episode_rewards, episode_lengths = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n return_episode_rewards=True,\n warn=self.warn,\n callback=self._log_success_callback,\n )\n\n if self.log_path is not None:\n assert isinstance(episode_rewards, list)\n assert isinstance(episode_lengths, list)\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(episode_rewards)\n self.evaluations_length.append(episode_lengths)\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n **kwargs, # type: ignore[arg-type]\n )\n\n mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)\n mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)\n self.last_mean_reward = float(mean_reward)\n\n if self.verbose >= 1:\n print(f\"Eval num_timesteps={self.num_timesteps}, \" f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n self.logger.record(\"eval/mean_reward\", float(mean_reward))\n self.logger.record(\"eval/mean_ep_length\", mean_ep_length)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose >= 1:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n # Dump log so the evaluation results are printed with the correct timestep\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(self.num_timesteps)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose >= 1:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = float(mean_reward)\n # Trigger callback on new best model, if needed\n if self.callback_on_new_best is not None:\n continue_training = self.callback_on_new_best.on_step()\n\n # Trigger callback after every evaluation, if needed\n if self.callback is not None:\n continue_training = continue_training and self._on_event()\n\n return continue_training\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n if self.callback:\n self.callback.update_locals(locals_)\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 12266}, "tests/test_vec_normalize.py::321": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "test_get_original", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/test_buffers.py::244": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/type_aliases.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DictRolloutBuffer", "RolloutBuffer", "pytest"], "enclosing_function": "test_custom_rollout_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/buffers.py\nclass RolloutBuffer(BaseBuffer):\n \"\"\"\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n advantages: np.ndarray\n returns: np.ndarray\n episode_starts: np.ndarray\n log_probs: np.ndarray\n values: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=self.observation_space.dtype)\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super().reset()\n\n def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None:\n \"\"\"\n Post-processing step: compute the lambda-return (TD(lambda) estimate)\n and GAE(lambda) advantage.\n\n Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)\n to compute the advantage. To obtain Monte-Carlo advantage estimate (A(s) = R - V(S))\n where R is the sum of discounted reward with value bootstrap\n (because we don't always have full episode), set ``gae_lambda=1.0`` during initialization.\n\n The TD(lambda) estimator has also two special cases:\n - TD(1) is Monte-Carlo estimate (sum of discounted rewards)\n - TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))\n\n For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.\n\n :param last_values: state value estimation for the last step (one for each env)\n :param dones: if the last step was a terminal step (one bool for each env).\n \"\"\"\n # Convert to numpy\n last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment]\n\n last_gae_lam = 0\n for step in reversed(range(self.buffer_size)):\n if step == self.buffer_size - 1:\n next_non_terminal = 1.0 - dones.astype(np.float32)\n next_values = last_values\n else:\n next_non_terminal = 1.0 - self.episode_starts[step + 1]\n next_values = self.values[step + 1]\n delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step]\n last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam\n self.advantages[step] = last_gae_lam\n # TD(lambda) estimator, see Github PR #375 or \"Telescoping in TD(lambda)\"\n # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA\n self.returns = self.advantages + self.values\n\n def add(\n self,\n obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.observations[self.pos] = np.array(obs)\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get(self, batch_size: int | None = None) -> Generator[RolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n _tensor_names = [\n \"observations\",\n \"actions\",\n \"values\",\n \"log_probs\",\n \"advantages\",\n \"returns\",\n ]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples(\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> RolloutBufferSamples:\n data = (\n self.observations[batch_inds],\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n self.actions[batch_inds].astype(np.float32, copy=False),\n self.values[batch_inds].flatten(),\n self.log_probs[batch_inds].flatten(),\n self.advantages[batch_inds].flatten(),\n self.returns[batch_inds].flatten(),\n )\n return RolloutBufferSamples(*tuple(map(self.to_torch, data)))\n\nclass DictRolloutBuffer(RolloutBuffer):\n \"\"\"\n Dict Rollout buffer used in on-policy algorithms like A2C/PPO.\n Extends the RolloutBuffer to use dictionary observations\n\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to Monte-Carlo advantage estimate when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictRolloutBuffer must be used with Dict obs space only\"\n\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = {}\n for key, obs_input_shape in self.obs_shape.items():\n self.observations[key] = np.zeros(\n (self.buffer_size, self.n_envs, *obs_input_shape), dtype=self.observation_space[key].dtype\n )\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super(RolloutBuffer, self).reset()\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n for key in self.observations.keys():\n obs_ = np.array(obs[key])\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs_ = obs_.reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = obs_\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get( # type: ignore[override]\n self,\n batch_size: int | None = None,\n ) -> Generator[DictRolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n for key, obs in self.observations.items():\n self.observations[key] = self.swap_and_flatten(obs)\n\n _tensor_names = [\"actions\", \"values\", \"log_probs\", \"advantages\", \"returns\"]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictRolloutBufferSamples:\n return DictRolloutBufferSamples(\n observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()},\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n actions=self.to_torch(self.actions[batch_inds].astype(np.float32, copy=False)),\n old_values=self.to_torch(self.values[batch_inds].flatten()),\n old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()),\n advantages=self.to_torch(self.advantages[batch_inds].flatten()),\n returns=self.to_torch(self.returns[batch_inds].flatten()),\n )", "n_imports_parsed": 12, "n_files_resolved": 7, "n_chars_extracted": 14971}, "tests/test_dict_env.py::331": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "PPO", "pytest"], "enclosing_function": "test_dict_nested", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 10, "n_files_resolved": 6, "n_chars_extracted": 1711}, "tests/test_utils.py::175": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["SubprocVecEnv", "make_vec_env", "os", "pytest", "shutil"], "enclosing_function": "test_custom_vec_env", "extracted_code": "# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 5505}, "tests/test_vec_normalize.py::163": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "check_vec_norm_equal", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/test_vec_extract_dict_obs.py::71": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["VecExtractDictObs"], "enclosing_function": "test_extract_dict_obs", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\n\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 1003}, "tests/test_tensorboard.py::83": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/utils.py"], "used_names": ["get_latest_run_id", "os"], "enclosing_function": "test_escape_log_name", "extracted_code": "# Source: stable_baselines3/common/utils.py\ndef get_latest_run_id(log_path: str = \"\", log_name: str = \"\") -> int:\n \"\"\"\n Returns the latest run number for the given log name and log path,\n by finding the greatest number in the directories.\n\n :param log_path: Path to the log folder containing several runs.\n :param log_name: Name of the experiment. Each run is stored\n in a folder named ``log_name_1``, ``log_name_2``, ...\n :return: latest run number\n \"\"\"\n max_run_id = 0\n for path in glob.glob(os.path.join(log_path, f\"{glob.escape(log_name)}_[0-9]*\")):\n file_name = path.split(os.sep)[-1]\n ext = file_name.split(\"_\")[-1]\n if log_name == \"_\".join(file_name.split(\"_\")[:-1]) and ext.isdigit() and int(ext) > max_run_id:\n max_run_id = int(ext)\n return max_run_id", "n_imports_parsed": 6, "n_files_resolved": 4, "n_chars_extracted": 829}, "tests/test_run.py::139": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/noise.py"], "used_names": ["SAC", "pytest"], "enclosing_function": "test_train_freq_fail", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 525}, "tests/test_buffers.py::205": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/type_aliases.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DictReplayBuffer", "DictRolloutBuffer", "ReplayBuffer", "RolloutBuffer", "numpy", "pytest", "spaces"], "enclosing_function": "test_buffer_dtypes", "extracted_code": "# Source: stable_baselines3/common/buffers.py\nclass ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n observations: np.ndarray\n next_observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n dones: np.ndarray\n timeouts: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n # Adjust buffer size\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n # there is a bug if both optimize_memory_usage and handle_timeout_termination are true\n # see https://github.com/DLR-RM/stable-baselines3/issues/934\n if optimize_memory_usage and handle_timeout_termination:\n raise ValueError(\n \"ReplayBuffer does not support optimize_memory_usage = True \"\n \"and handle_timeout_termination = True simultaneously.\"\n )\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=observation_space.dtype)\n\n if not optimize_memory_usage:\n # When optimizing memory, `observations` contains also the next observation\n self.next_observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=observation_space.dtype)\n\n self.actions = np.zeros(\n (self.buffer_size, self.n_envs, self.action_dim), dtype=self._maybe_cast_dtype(action_space.dtype)\n )\n\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n total_memory_usage: float = (\n self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n )\n\n if not optimize_memory_usage:\n total_memory_usage += self.next_observations.nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: list[dict[str, Any]],\n ) -> None:\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n next_obs = next_obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n # Copy to avoid modification by reference\n self.observations[self.pos] = np.array(obs)\n\n if self.optimize_memory_usage:\n self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs)\n else:\n self.next_observations[self.pos] = np.array(next_obs)\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.dones[self.pos] = np.array(done)\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample(self, batch_size: int, env: VecNormalize | None = None) -> ReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n Custom sampling when using memory efficient variant,\n as we should not sample the element with index `self.pos`\n See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n if not self.optimize_memory_usage:\n return super().sample(batch_size=batch_size, env=env)\n # Do not sample the element with index `self.pos` as the transitions is invalid\n # (we use only one array to store `obs` and `next_obs`)\n if self.full:\n batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size\n else:\n batch_inds = np.random.randint(0, self.pos, size=batch_size)\n return self._get_samples(batch_inds, env=env)\n\n def _get_samples(self, batch_inds: np.ndarray, env: VecNormalize | None = None) -> ReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n if self.optimize_memory_usage:\n next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, env_indices, :], env)\n else:\n next_obs = self._normalize_obs(self.next_observations[batch_inds, env_indices, :], env)\n\n data = (\n self._normalize_obs(self.observations[batch_inds, env_indices, :], env),\n self.actions[batch_inds, env_indices, :],\n next_obs,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n (self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(-1, 1),\n self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env),\n )\n return ReplayBufferSamples(*tuple(map(self.to_torch, data)))\n\n @staticmethod\n def _maybe_cast_dtype(dtype: np.typing.DTypeLike | None) -> np.typing.DTypeLike | None:\n \"\"\"\n Cast `np.float64` action datatype to `np.float32`,\n keep the others dtype unchanged.\n See GH#1572 for more information.\n\n :param dtype: The original action space dtype\n :return: ``np.float32`` if the dtype was float64,\n the original dtype otherwise.\n \"\"\"\n if dtype == np.float64:\n return np.float32\n return dtype\n\nclass RolloutBuffer(BaseBuffer):\n \"\"\"\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n advantages: np.ndarray\n returns: np.ndarray\n episode_starts: np.ndarray\n log_probs: np.ndarray\n values: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=self.observation_space.dtype)\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super().reset()\n\n def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None:\n \"\"\"\n Post-processing step: compute the lambda-return (TD(lambda) estimate)\n and GAE(lambda) advantage.\n\n Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)\n to compute the advantage. To obtain Monte-Carlo advantage estimate (A(s) = R - V(S))\n where R is the sum of discounted reward with value bootstrap\n (because we don't always have full episode), set ``gae_lambda=1.0`` during initialization.\n\n The TD(lambda) estimator has also two special cases:\n - TD(1) is Monte-Carlo estimate (sum of discounted rewards)\n - TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))\n\n For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.\n\n :param last_values: state value estimation for the last step (one for each env)\n :param dones: if the last step was a terminal step (one bool for each env).\n \"\"\"\n # Convert to numpy\n last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment]\n\n last_gae_lam = 0\n for step in reversed(range(self.buffer_size)):\n if step == self.buffer_size - 1:\n next_non_terminal = 1.0 - dones.astype(np.float32)\n next_values = last_values\n else:\n next_non_terminal = 1.0 - self.episode_starts[step + 1]\n next_values = self.values[step + 1]\n delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step]\n last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam\n self.advantages[step] = last_gae_lam\n # TD(lambda) estimator, see Github PR #375 or \"Telescoping in TD(lambda)\"\n # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA\n self.returns = self.advantages + self.values\n\n def add(\n self,\n obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.observations[self.pos] = np.array(obs)\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get(self, batch_size: int | None = None) -> Generator[RolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n _tensor_names = [\n \"observations\",\n \"actions\",\n \"values\",\n \"log_probs\",\n \"advantages\",\n \"returns\",\n ]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples(\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> RolloutBufferSamples:\n data = (\n self.observations[batch_inds],\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n self.actions[batch_inds].astype(np.float32, copy=False),\n self.values[batch_inds].flatten(),\n self.log_probs[batch_inds].flatten(),\n self.advantages[batch_inds].flatten(),\n self.returns[batch_inds].flatten(),\n )\n return RolloutBufferSamples(*tuple(map(self.to_torch, data)))\n\nclass DictReplayBuffer(ReplayBuffer):\n \"\"\"\n Dict Replay buffer used in off-policy algorithms like SAC/TD3.\n Extends the ReplayBuffer to use dictionary observations\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n Disabled for now (see https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702)\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n next_observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictReplayBuffer must be used with Dict obs space only\"\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n assert not optimize_memory_usage, \"DictReplayBuffer does not support optimize_memory_usage\"\n # disabling as this adds quite a bit of complexity\n # https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = {\n key: np.zeros((self.buffer_size, self.n_envs, *_obs_shape), dtype=observation_space[key].dtype)\n for key, _obs_shape in self.obs_shape.items()\n }\n self.next_observations = {\n key: np.zeros((self.buffer_size, self.n_envs, *_obs_shape), dtype=observation_space[key].dtype)\n for key, _obs_shape in self.obs_shape.items()\n }\n\n self.actions = np.zeros(\n (self.buffer_size, self.n_envs, self.action_dim), dtype=self._maybe_cast_dtype(action_space.dtype)\n )\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n obs_nbytes = 0\n for _, obs in self.observations.items():\n obs_nbytes += obs.nbytes\n\n total_memory_usage: float = obs_nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n if not optimize_memory_usage:\n next_obs_nbytes = 0\n for _, obs in self.observations.items():\n next_obs_nbytes += obs.nbytes\n total_memory_usage += next_obs_nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n next_obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: list[dict[str, Any]],\n ) -> None:\n # Copy to avoid modification by reference\n for key in self.observations.keys():\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs[key] = obs[key].reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = np.array(obs[key])\n\n for key in self.next_observations.keys():\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n next_obs[key] = next_obs[key].reshape((self.n_envs,) + self.obs_shape[key])\n self.next_observations[key][self.pos] = np.array(next_obs[key])\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.dones[self.pos] = np.array(done)\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample( # type: ignore[override]\n self,\n batch_size: int,\n env: VecNormalize | None = None,\n ) -> DictReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n return super(ReplayBuffer, self).sample(batch_size=batch_size, env=env)\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n # Normalize if needed and remove extra dimension (we are using only one env for now)\n obs_ = self._normalize_obs({key: obs[batch_inds, env_indices, :] for key, obs in self.observations.items()}, env)\n next_obs_ = self._normalize_obs(\n {key: obs[batch_inds, env_indices, :] for key, obs in self.next_observations.items()}, env\n )\n\n assert isinstance(obs_, dict)\n assert isinstance(next_obs_, dict)\n # Convert to torch tensor\n observations = {key: self.to_torch(obs) for key, obs in obs_.items()}\n next_observations = {key: self.to_torch(obs) for key, obs in next_obs_.items()}\n\n return DictReplayBufferSamples(\n observations=observations,\n actions=self.to_torch(self.actions[batch_inds, env_indices]),\n next_observations=next_observations,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n dones=self.to_torch(self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(\n -1, 1\n ),\n rewards=self.to_torch(self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env)),\n )\n\nclass DictRolloutBuffer(RolloutBuffer):\n \"\"\"\n Dict Rollout buffer used in on-policy algorithms like A2C/PPO.\n Extends the RolloutBuffer to use dictionary observations\n\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to Monte-Carlo advantage estimate when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictRolloutBuffer must be used with Dict obs space only\"\n\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = {}\n for key, obs_input_shape in self.obs_shape.items():\n self.observations[key] = np.zeros(\n (self.buffer_size, self.n_envs, *obs_input_shape), dtype=self.observation_space[key].dtype\n )\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super(RolloutBuffer, self).reset()\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n for key in self.observations.keys():\n obs_ = np.array(obs[key])\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs_ = obs_.reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = obs_\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get( # type: ignore[override]\n self,\n batch_size: int | None = None,\n ) -> Generator[DictRolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n for key, obs in self.observations.items():\n self.observations[key] = self.swap_and_flatten(obs)\n\n _tensor_names = [\"actions\", \"values\", \"log_probs\", \"advantages\", \"returns\"]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictRolloutBufferSamples:\n return DictRolloutBufferSamples(\n observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()},\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n actions=self.to_torch(self.actions[batch_inds].astype(np.float32, copy=False)),\n old_values=self.to_torch(self.values[batch_inds].flatten()),\n old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()),\n advantages=self.to_torch(self.advantages[batch_inds].flatten()),\n returns=self.to_torch(self.returns[batch_inds].flatten()),\n )", "n_imports_parsed": 12, "n_files_resolved": 7, "n_chars_extracted": 30201}, "tests/test_spaces.py::169": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py"], "used_names": ["A2C", "DDPG", "PPO", "SAC", "TD3", "pytest", "spaces"], "enclosing_function": "test_float64_action_space", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(", "n_imports_parsed": 10, "n_files_resolved": 4, "n_chars_extracted": 2077}, "tests/test_envs.py::274": {"resolved_imports": ["stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py"], "used_names": ["IdentityEnvBox", "SimpleMultiObsEnv", "check_env", "pytest", "types"], "enclosing_function": "test_common_failures_reset", "extracted_code": "# Source: stable_baselines3/common/env_checker.py\ndef check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:\n \"\"\"\n Check that an environment follows Gym API.\n This is particularly useful when using a custom environment.\n Please take a look at https://gymnasium.farama.org/api/env/\n for more information about the API.\n\n It also optionally check that the environment is compatible with Stable-Baselines.\n\n :param env: The Gym environment that will be checked\n :param warn: Whether to output additional warnings\n mainly related to the interaction with Stable Baselines\n :param skip_render_check: Whether to skip the checks for the render method.\n True by default (useful for the CI)\n \"\"\"\n assert isinstance(\n env, gym.Env\n ), \"Your environment must inherit from the gymnasium.Env class cf. https://gymnasium.farama.org/api/env/\"\n\n # ============= Check the spaces (observation and action) ================\n _check_spaces(env)\n\n # Define aliases for convenience\n observation_space = env.observation_space\n action_space = env.action_space\n\n try:\n env.reset(seed=0)\n except TypeError as e:\n raise TypeError(\"The reset() method must accept a `seed` parameter\") from e\n\n # Warn the user if needed.\n # A warning means that the environment may run but not work properly with Stable Baselines algorithms\n should_skip = False\n if warn:\n should_skip = _check_unsupported_spaces(env, observation_space, action_space)\n\n obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {\"\": observation_space}\n for key, space in obs_spaces.items():\n if isinstance(space, spaces.Box):\n _check_box_obs(space, key)\n\n # Check for the action space, it may lead to hard-to-debug issues\n if isinstance(action_space, spaces.Box) and (\n np.any(np.abs(action_space.low) != np.abs(action_space.high))\n or np.any(action_space.low != -1)\n or np.any(action_space.high != 1)\n ):\n warnings.warn(\n \"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) \"\n \"cf. https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\"\n )\n\n if isinstance(action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([action_space.low, action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n if isinstance(action_space, spaces.Box) and action_space.dtype != np.dtype(np.float32):\n warnings.warn(\n f\"Your action space has dtype {action_space.dtype}, we recommend using np.float32 to avoid cast errors.\"\n )\n\n # If Sequence or Graph observation space, do not check the observation any further\n if should_skip:\n return\n\n # ============ Check the returned values ===============\n _check_returned_values(env, observation_space, action_space)\n\n # ==== Check the render method and the declared render modes ====\n if not skip_render_check:\n _check_render(env, warn) # pragma: no cover\n\n try:\n check_for_nested_spaces(env.observation_space)\n # The check doesn't support nested observations/dict actions\n # A warning about it has already been emitted\n _check_nan(env)\n except NotImplementedError:\n pass\n\n\n# Source: stable_baselines3/common/envs/__init__.py\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 4487}, "tests/test_envs.py::81": {"resolved_imports": ["stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py"], "used_names": ["BitFlippingEnv", "check_env", "pytest", "spaces", "warnings"], "enclosing_function": "test_bit_flipping", "extracted_code": "# Source: stable_baselines3/common/env_checker.py\ndef check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:\n \"\"\"\n Check that an environment follows Gym API.\n This is particularly useful when using a custom environment.\n Please take a look at https://gymnasium.farama.org/api/env/\n for more information about the API.\n\n It also optionally check that the environment is compatible with Stable-Baselines.\n\n :param env: The Gym environment that will be checked\n :param warn: Whether to output additional warnings\n mainly related to the interaction with Stable Baselines\n :param skip_render_check: Whether to skip the checks for the render method.\n True by default (useful for the CI)\n \"\"\"\n assert isinstance(\n env, gym.Env\n ), \"Your environment must inherit from the gymnasium.Env class cf. https://gymnasium.farama.org/api/env/\"\n\n # ============= Check the spaces (observation and action) ================\n _check_spaces(env)\n\n # Define aliases for convenience\n observation_space = env.observation_space\n action_space = env.action_space\n\n try:\n env.reset(seed=0)\n except TypeError as e:\n raise TypeError(\"The reset() method must accept a `seed` parameter\") from e\n\n # Warn the user if needed.\n # A warning means that the environment may run but not work properly with Stable Baselines algorithms\n should_skip = False\n if warn:\n should_skip = _check_unsupported_spaces(env, observation_space, action_space)\n\n obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {\"\": observation_space}\n for key, space in obs_spaces.items():\n if isinstance(space, spaces.Box):\n _check_box_obs(space, key)\n\n # Check for the action space, it may lead to hard-to-debug issues\n if isinstance(action_space, spaces.Box) and (\n np.any(np.abs(action_space.low) != np.abs(action_space.high))\n or np.any(action_space.low != -1)\n or np.any(action_space.high != 1)\n ):\n warnings.warn(\n \"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) \"\n \"cf. https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\"\n )\n\n if isinstance(action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([action_space.low, action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n if isinstance(action_space, spaces.Box) and action_space.dtype != np.dtype(np.float32):\n warnings.warn(\n f\"Your action space has dtype {action_space.dtype}, we recommend using np.float32 to avoid cast errors.\"\n )\n\n # If Sequence or Graph observation space, do not check the observation any further\n if should_skip:\n return\n\n # ============ Check the returned values ===============\n _check_returned_values(env, observation_space, action_space)\n\n # ==== Check the render method and the declared render modes ====\n if not skip_render_check:\n _check_render(env, warn) # pragma: no cover\n\n try:\n check_for_nested_spaces(env.observation_space)\n # The check doesn't support nested observations/dict actions\n # A warning about it has already been emitted\n _check_nan(env)\n except NotImplementedError:\n pass\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 4087}, "tests/test_preprocessing.py::34": {"resolved_imports": ["stable_baselines3/common/preprocessing.py"], "used_names": ["preprocess_obs", "spaces", "torch"], "enclosing_function": "test_preprocess_obs_discrete", "extracted_code": "# Source: stable_baselines3/common/preprocessing.py\ndef preprocess_obs(\n obs: th.Tensor | dict[str, th.Tensor],\n observation_space: spaces.Space,\n normalize_images: bool = True,\n) -> th.Tensor | dict[str, th.Tensor]:\n \"\"\"\n Preprocess observation to be to a neural network.\n For images, it normalizes the values by dividing them by 255 (to have values in [0, 1])\n For discrete observations, it create a one hot vector.\n\n :param obs: Observation\n :param observation_space:\n :param normalize_images: Whether to normalize images or not\n (True by default)\n :return:\n \"\"\"\n if isinstance(observation_space, spaces.Dict):\n # Do not modify by reference the original observation\n assert isinstance(obs, dict), f\"Expected dict, got {type(obs)}\"\n preprocessed_obs = {}\n for key, _obs in obs.items():\n preprocessed_obs[key] = preprocess_obs(_obs, observation_space[key], normalize_images=normalize_images)\n return preprocessed_obs # type: ignore[return-value]\n\n assert isinstance(obs, th.Tensor), f\"Expecting a torch Tensor, but got {type(obs)}\"\n\n if isinstance(observation_space, spaces.Box):\n if normalize_images and is_image_space(observation_space):\n return obs.float() / 255.0\n return obs.float()\n\n elif isinstance(observation_space, spaces.Discrete):\n # One hot encoding and convert to float to avoid errors\n return F.one_hot(obs.long(), num_classes=int(observation_space.n)).float()\n\n elif isinstance(observation_space, spaces.MultiDiscrete):\n # Tensor concatenation of one hot encodings of each Categorical sub-space\n return th.cat(\n [\n F.one_hot(obs_.long(), num_classes=int(observation_space.nvec[idx])).float()\n for idx, obs_ in enumerate(th.split(obs.long(), 1, dim=1))\n ],\n dim=-1,\n ).view(obs.shape[0], sum(observation_space.nvec))\n\n elif isinstance(observation_space, spaces.MultiBinary):\n return obs.float()\n else:\n raise NotImplementedError(f\"Preprocessing not implemented for {observation_space}\")", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 2150}, "tests/test_logger.py::133": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["A2C", "CSVOutputFormat", "HumanOutputFormat", "TensorBoardOutputFormat", "configure", "os"], "enclosing_function": "test_set_logger", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/logger.py\nclass HumanOutputFormat(KVWriter, SeqWriter):\n \"\"\"A human-readable output format producing ASCII tables of key-value pairs.\n\n Set attribute ``max_length`` to change the maximum length of keys and values\n to write to output (or specify it when calling ``__init__``).\n\n :param filename_or_file: the file to write the log to\n :param max_length: the maximum length of keys and values to write to output.\n Outputs longer than this will be truncated. An error will be raised\n if multiple keys are truncated to the same value. The maximum output\n width will be ``2*max_length + 7``. The default of 36 produces output\n no longer than 79 characters wide.\n \"\"\"\n\n def __init__(self, filename_or_file: str | TextIO, max_length: int = 36):\n self.max_length = max_length\n if isinstance(filename_or_file, str):\n self.file = open(filename_or_file, \"w\")\n self.own_file = True\n elif isinstance(filename_or_file, TextIOBase) or hasattr(filename_or_file, \"write\"):\n # Note: in theory `TextIOBase` check should be sufficient,\n # in practice, libraries don't always inherit from it, see GH#1598\n self.file = filename_or_file # type: ignore[assignment]\n self.own_file = False\n else:\n raise ValueError(f\"Expected file or str, got {filename_or_file}\")\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Create strings for printing\n key2str = {}\n tag = \"\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and (\"stdout\" in excluded or \"log\" in excluded):\n continue\n\n elif isinstance(value, Video):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"hparam\")\n\n elif isinstance(value, float):\n # Align left\n value_str = f\"{value:<8.3g}\"\n else:\n value_str = str(value)\n\n if key.find(\"/\") > 0: # Find tag and add it to the dict\n tag = key[: key.find(\"/\") + 1]\n key2str[(tag, self._truncate(tag))] = \"\"\n # Remove tag from key and indent the key\n if len(tag) > 0 and tag in key:\n key = f\"{'':3}{key[len(tag) :]}\"\n\n truncated_key = self._truncate(key)\n if (tag, truncated_key) in key2str:\n raise ValueError(\n f\"Key '{key}' truncated to '{truncated_key}' that already exists. Consider increasing `max_length`.\"\n )\n key2str[(tag, truncated_key)] = self._truncate(value_str)\n\n # Find max widths\n if len(key2str) == 0:\n warnings.warn(\"Tried to write empty key-value dict\")\n return\n else:\n tagless_keys = map(lambda x: x[1], key2str.keys())\n key_width = max(map(len, tagless_keys))\n val_width = max(map(len, key2str.values()))\n\n # Write out the data\n dashes = \"-\" * (key_width + val_width + 7)\n lines = [dashes]\n for (_, key), value in key2str.items():\n key_space = \" \" * (key_width - len(key))\n val_space = \" \" * (val_width - len(value))\n lines.append(f\"| {key}{key_space} | {value}{val_space} |\")\n lines.append(dashes)\n\n if tqdm is not None and hasattr(self.file, \"name\") and self.file.name == \"\":\n # Do not mess up with progress bar\n tqdm.write(\"\\n\".join(lines) + \"\\n\", file=sys.stdout, end=\"\")\n else:\n self.file.write(\"\\n\".join(lines) + \"\\n\")\n\n # Flush the output to the file\n self.file.flush()\n\n def _truncate(self, string: str) -> str:\n if len(string) > self.max_length:\n string = string[: self.max_length - 3] + \"...\"\n return string\n\n def write_sequence(self, sequence: list[str]) -> None:\n for i, elem in enumerate(sequence):\n self.file.write(elem)\n if i < len(sequence) - 1: # add space unless this is the last one\n self.file.write(\" \")\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.own_file:\n self.file.close()\n\nclass CSVOutputFormat(KVWriter):\n \"\"\"\n Log to a file, in a CSV format\n\n :param filename: the file to write the log to\n \"\"\"\n\n def __init__(self, filename: str):\n self.file = open(filename, \"w+\")\n self.keys: list[str] = []\n self.separator = \",\"\n self.quotechar = '\"'\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Add our current row to the history\n key_values = filter_excluded_keys(key_values, key_excluded, \"csv\")\n extra_keys = key_values.keys() - self.keys\n if extra_keys:\n self.keys.extend(extra_keys)\n self.file.seek(0)\n lines = self.file.readlines()\n self.file.seek(0)\n for i, key in enumerate(self.keys):\n if i > 0:\n self.file.write(\",\")\n self.file.write(key)\n self.file.write(\"\\n\")\n for line in lines[1:]:\n self.file.write(line[:-1])\n self.file.write(self.separator * len(extra_keys))\n self.file.write(\"\\n\")\n for i, key in enumerate(self.keys):\n if i > 0:\n self.file.write(\",\")\n value = key_values.get(key)\n\n if isinstance(value, Video):\n raise FormatUnsupportedError([\"csv\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"csv\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"csv\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"csv\"], \"hparam\")\n\n elif isinstance(value, str):\n # escape quotechars by prepending them with another quotechar\n value = value.replace(self.quotechar, self.quotechar + self.quotechar)\n\n # additionally wrap text with quotechars so that any delimiters in the text are ignored by csv readers\n self.file.write(self.quotechar + value + self.quotechar)\n\n elif value is not None:\n self.file.write(str(value))\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n self.file.close()\n\nclass TensorBoardOutputFormat(KVWriter):\n \"\"\"\n Dumps key/value pairs into TensorBoard's numeric format.\n\n :param folder: the folder to write the log to\n \"\"\"\n\n def __init__(self, folder: str):\n assert SummaryWriter is not None, \"tensorboard is not installed, you can use `pip install tensorboard` to do so\"\n self.writer = SummaryWriter(log_dir=folder)\n self._is_closed = False\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n assert not self._is_closed, \"The SummaryWriter was closed, please re-create one.\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and \"tensorboard\" in excluded:\n continue\n\n if isinstance(value, np.ScalarType):\n if isinstance(value, str):\n # str is considered a np.ScalarType\n self.writer.add_text(key, value, step)\n else:\n self.writer.add_scalar(key, value, step)\n\n if isinstance(value, (th.Tensor, np.ndarray)):\n # Convert to Torch so it works with numpy<1.24 and torch<2.0\n self.writer.add_histogram(key, th.as_tensor(value), step)\n\n if isinstance(value, Video):\n self.writer.add_video(key, value.frames, step, value.fps)\n\n if isinstance(value, Figure):\n self.writer.add_figure(key, value.figure, step, close=value.close)\n\n if isinstance(value, Image):\n self.writer.add_image(key, value.image, step, dataformats=value.dataformats)\n\n if isinstance(value, HParam):\n # we don't use `self.writer.add_hparams` to have control over the log_dir\n experiment, session_start_info, session_end_info = hparams(value.hparam_dict, metric_dict=value.metric_dict)\n self.writer.file_writer.add_summary(experiment)\n self.writer.file_writer.add_summary(session_start_info)\n self.writer.file_writer.add_summary(session_end_info)\n\n # Flush the output to the file\n self.writer.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.writer:\n self.writer.close()\n self._is_closed = True\n\ndef configure(folder: str | None = None, format_strings: list[str] | None = None) -> Logger:\n \"\"\"\n Configure the current logger.\n\n :param folder: the save location\n (if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])\n :param format_strings: the output logging format\n (if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])\n :return: The logger object.\n \"\"\"\n if folder is None:\n folder = os.getenv(\"SB3_LOGDIR\")\n if folder is None:\n folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime(\"SB3-%Y-%m-%d-%H-%M-%S-%f\"))\n assert isinstance(folder, str)\n os.makedirs(folder, exist_ok=True)\n\n log_suffix = \"\"\n if format_strings is None:\n format_strings = os.getenv(\"SB3_LOG_FORMAT\", \"stdout,log,csv\").split(\",\")\n\n format_strings = list(filter(None, format_strings))\n output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]\n\n logger = Logger(folder=folder, output_formats=output_formats)\n # Only print when some files will be saved\n if len(format_strings) > 0 and format_strings != [\"stdout\"]:\n logger.log(f\"Logging to {folder}\")\n return logger", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 11330}, "tests/test_preprocessing.py::16": {"resolved_imports": ["stable_baselines3/common/preprocessing.py"], "used_names": ["get_obs_shape", "spaces"], "enclosing_function": "test_get_obs_shape_multibinary", "extracted_code": "# Source: stable_baselines3/common/preprocessing.py\ndef get_obs_shape(\n observation_space: spaces.Space,\n) -> tuple[int, ...] | dict[str, tuple[int, ...]]:\n \"\"\"\n Get the shape of the observation (useful for the buffers).\n\n :param observation_space:\n :return:\n \"\"\"\n if isinstance(observation_space, spaces.Box):\n return observation_space.shape\n elif isinstance(observation_space, spaces.Discrete):\n # Observation is an int\n return (1,)\n elif isinstance(observation_space, spaces.MultiDiscrete):\n # Number of discrete features\n return (len(observation_space.nvec),)\n elif isinstance(observation_space, spaces.MultiBinary):\n # Number of binary features\n return observation_space.shape\n elif isinstance(observation_space, spaces.Dict):\n return {key: get_obs_shape(subspace) for (key, subspace) in observation_space.spaces.items()} # type: ignore[misc]\n\n else:\n raise NotImplementedError(f\"{observation_space} observation space is not supported\")", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 1041}, "tests/test_preprocessing.py::12": {"resolved_imports": ["stable_baselines3/common/preprocessing.py"], "used_names": ["get_obs_shape", "spaces"], "enclosing_function": "test_get_obs_shape_multidiscrete", "extracted_code": "# Source: stable_baselines3/common/preprocessing.py\ndef get_obs_shape(\n observation_space: spaces.Space,\n) -> tuple[int, ...] | dict[str, tuple[int, ...]]:\n \"\"\"\n Get the shape of the observation (useful for the buffers).\n\n :param observation_space:\n :return:\n \"\"\"\n if isinstance(observation_space, spaces.Box):\n return observation_space.shape\n elif isinstance(observation_space, spaces.Discrete):\n # Observation is an int\n return (1,)\n elif isinstance(observation_space, spaces.MultiDiscrete):\n # Number of discrete features\n return (len(observation_space.nvec),)\n elif isinstance(observation_space, spaces.MultiBinary):\n # Number of binary features\n return observation_space.shape\n elif isinstance(observation_space, spaces.Dict):\n return {key: get_obs_shape(subspace) for (key, subspace) in observation_space.spaces.items()} # type: ignore[misc]\n\n else:\n raise NotImplementedError(f\"{observation_space} observation space is not supported\")", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 1041}, "tests/test_train_eval_mode.py::374": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/preprocessing.py", "stable_baselines3/common/torch_layers.py"], "used_names": ["DQN", "SAC", "TD3", "pytest"], "enclosing_function": "test_predict_with_dropout_batch_norm", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 1493}, "tests/test_save_load.py::272": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "os", "pytest"], "enclosing_function": "test_exclude_include_saved_params", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 1159}, "tests/test_monitor.py::46": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["Monitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_monitor", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 5236}, "tests/test_her.py::219": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "DDPG", "DQN", "HerReplayBuffer", "SAC", "TD3", "deepcopy", "make_vec_env", "os", "pytest"], "enclosing_function": "test_save_load", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 7184}, "tests/test_her.py::262": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "HerReplayBuffer", "SAC", "deepcopy", "make_vec_env", "pathlib", "pytest", "warnings"], "enclosing_function": "test_save_load_replay_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 6318}, "tests/test_her.py::25": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["HER", "pytest"], "enclosing_function": "test_import_error", "extracted_code": "", "n_imports_parsed": 16, "n_files_resolved": 8, "n_chars_extracted": 0}, "tests/test_vec_monitor.py::145": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "PPO", "VecMonitor", "evaluate_policy", "warnings"], "enclosing_function": "test_vec_monitor_ppo", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/evaluation.py\ndef evaluate_policy(\n model: \"type_aliases.PolicyPredictor\",\n env: gym.Env | VecEnv,\n n_eval_episodes: int = 10,\n deterministic: bool = True,\n render: bool = False,\n callback: Callable[[dict[str, Any], dict[str, Any]], None] | None = None,\n reward_threshold: float | None = None,\n return_episode_rewards: bool = False,\n warn: bool = True,\n) -> tuple[float, float] | tuple[list[float], list[int]]:\n \"\"\"\n Runs the policy for ``n_eval_episodes`` episodes and outputs the average return\n per episode (sum of undiscounted rewards).\n If a vector env is passed in, this divides the episodes to evaluate onto the\n different elements of the vector env. This static division of work is done to\n remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more\n details and discussion.\n\n .. note::\n If environment has not been wrapped with ``Monitor`` wrapper, reward and\n episode lengths are counted as it appears with ``env.step`` calls. If\n the environment contains wrappers that modify rewards or episode lengths\n (e.g. reward scaling, early episode reset), these will affect the evaluation\n results as well. You can avoid this by wrapping environment with ``Monitor``\n wrapper before anything else.\n\n :param model: The RL agent you want to evaluate. This can be any object\n that implements a ``predict`` method, such as an RL algorithm (``BaseAlgorithm``)\n or policy (``BasePolicy``).\n :param env: The gym environment or ``VecEnv`` environment.\n :param n_eval_episodes: Number of episode to evaluate the agent\n :param deterministic: Whether to use deterministic or stochastic actions\n :param render: Whether to render the environment or not\n :param callback: callback function to perform additional checks,\n called ``n_envs`` times after each step.\n Gets locals() and globals() passed as parameters.\n See https://github.com/DLR-RM/stable-baselines3/issues/1912 for more details.\n :param reward_threshold: Minimum expected reward per episode,\n this will raise an error if the performance is not met\n :param return_episode_rewards: If True, a list of rewards and episode lengths\n per episode will be returned instead of the mean.\n :param warn: If True (default), warns user about lack of a Monitor wrapper in the\n evaluation environment.\n :return: Mean return per episode (sum of rewards), std of reward per episode.\n Returns (list[float], list[int]) when ``return_episode_rewards`` is True, first\n list containing per-episode return and second containing per-episode lengths\n (in number of steps).\n \"\"\"\n is_monitor_wrapped = False\n # Avoid circular import\n from stable_baselines3.common.monitor import Monitor\n\n if not isinstance(env, VecEnv):\n env = DummyVecEnv([lambda: env]) # type: ignore[list-item, return-value]\n\n is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]\n\n if not is_monitor_wrapped and warn:\n warnings.warn(\n \"Evaluation environment is not wrapped with a ``Monitor`` wrapper. \"\n \"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. \"\n \"Consider wrapping environment first with ``Monitor`` wrapper.\",\n UserWarning,\n )\n\n n_envs = env.num_envs\n episode_rewards = []\n episode_lengths = []\n\n episode_counts = np.zeros(n_envs, dtype=\"int\")\n # Divides episodes among different sub environments in the vector as evenly as possible\n episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype=\"int\")\n\n current_rewards = np.zeros(n_envs)\n current_lengths = np.zeros(n_envs, dtype=\"int\")\n observations = env.reset()\n states = None\n episode_starts = np.ones((env.num_envs,), dtype=bool)\n while (episode_counts < episode_count_targets).any():\n actions, states = model.predict(\n observations, # type: ignore[arg-type]\n state=states,\n episode_start=episode_starts,\n deterministic=deterministic,\n )\n new_observations, rewards, dones, infos = env.step(actions)\n current_rewards += rewards\n current_lengths += 1\n for i in range(n_envs):\n if episode_counts[i] < episode_count_targets[i]:\n # unpack values so that the callback can access the local variables\n reward = rewards[i]\n done = dones[i]\n info = infos[i]\n episode_starts[i] = done\n\n if callback is not None:\n callback(locals(), globals())\n\n if dones[i]:\n if is_monitor_wrapped:\n # Atari wrapper can send a \"done\" signal when\n # the agent loses a life, but it does not correspond\n # to the true end of episode\n if \"episode\" in info.keys():\n # Do not trust \"done\" with episode endings.\n # Monitor wrapper includes \"episode\" key in info if environment\n # has been wrapped with it. Use those rewards instead.\n episode_rewards.append(info[\"episode\"][\"r\"])\n episode_lengths.append(info[\"episode\"][\"l\"])\n # Only increment at the real end of an episode\n episode_counts[i] += 1\n else:\n episode_rewards.append(current_rewards[i])\n episode_lengths.append(current_lengths[i])\n episode_counts[i] += 1\n current_rewards[i] = 0\n current_lengths[i] = 0\n\n observations = new_observations\n\n if render:\n env.render()\n\n mean_reward = np.mean(episode_rewards)\n std_reward = np.std(episode_rewards)\n if reward_threshold is not None:\n assert mean_reward > reward_threshold, \"Mean reward below threshold: \" f\"{mean_reward:.2f} < {reward_threshold:.2f}\"\n if return_episode_rewards:\n return episode_rewards, episode_lengths\n return mean_reward, std_reward\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 8995}, "tests/test_vec_normalize.py::164": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "check_vec_norm_equal", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/test_buffers.py::153": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/type_aliases.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DictReplayBuffer", "DictRolloutBuffer", "ReplayBuffer", "RolloutBuffer", "get_device", "make_vec_env", "pytest"], "enclosing_function": "test_device_buffer", "extracted_code": "# Source: stable_baselines3/common/buffers.py\nclass ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n observations: np.ndarray\n next_observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n dones: np.ndarray\n timeouts: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n # Adjust buffer size\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n # there is a bug if both optimize_memory_usage and handle_timeout_termination are true\n # see https://github.com/DLR-RM/stable-baselines3/issues/934\n if optimize_memory_usage and handle_timeout_termination:\n raise ValueError(\n \"ReplayBuffer does not support optimize_memory_usage = True \"\n \"and handle_timeout_termination = True simultaneously.\"\n )\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=observation_space.dtype)\n\n if not optimize_memory_usage:\n # When optimizing memory, `observations` contains also the next observation\n self.next_observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=observation_space.dtype)\n\n self.actions = np.zeros(\n (self.buffer_size, self.n_envs, self.action_dim), dtype=self._maybe_cast_dtype(action_space.dtype)\n )\n\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n total_memory_usage: float = (\n self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n )\n\n if not optimize_memory_usage:\n total_memory_usage += self.next_observations.nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: list[dict[str, Any]],\n ) -> None:\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n next_obs = next_obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n # Copy to avoid modification by reference\n self.observations[self.pos] = np.array(obs)\n\n if self.optimize_memory_usage:\n self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs)\n else:\n self.next_observations[self.pos] = np.array(next_obs)\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.dones[self.pos] = np.array(done)\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample(self, batch_size: int, env: VecNormalize | None = None) -> ReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n Custom sampling when using memory efficient variant,\n as we should not sample the element with index `self.pos`\n See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n if not self.optimize_memory_usage:\n return super().sample(batch_size=batch_size, env=env)\n # Do not sample the element with index `self.pos` as the transitions is invalid\n # (we use only one array to store `obs` and `next_obs`)\n if self.full:\n batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size\n else:\n batch_inds = np.random.randint(0, self.pos, size=batch_size)\n return self._get_samples(batch_inds, env=env)\n\n def _get_samples(self, batch_inds: np.ndarray, env: VecNormalize | None = None) -> ReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n if self.optimize_memory_usage:\n next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, env_indices, :], env)\n else:\n next_obs = self._normalize_obs(self.next_observations[batch_inds, env_indices, :], env)\n\n data = (\n self._normalize_obs(self.observations[batch_inds, env_indices, :], env),\n self.actions[batch_inds, env_indices, :],\n next_obs,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n (self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(-1, 1),\n self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env),\n )\n return ReplayBufferSamples(*tuple(map(self.to_torch, data)))\n\n @staticmethod\n def _maybe_cast_dtype(dtype: np.typing.DTypeLike | None) -> np.typing.DTypeLike | None:\n \"\"\"\n Cast `np.float64` action datatype to `np.float32`,\n keep the others dtype unchanged.\n See GH#1572 for more information.\n\n :param dtype: The original action space dtype\n :return: ``np.float32`` if the dtype was float64,\n the original dtype otherwise.\n \"\"\"\n if dtype == np.float64:\n return np.float32\n return dtype\n\nclass RolloutBuffer(BaseBuffer):\n \"\"\"\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n advantages: np.ndarray\n returns: np.ndarray\n episode_starts: np.ndarray\n log_probs: np.ndarray\n values: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=self.observation_space.dtype)\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super().reset()\n\n def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None:\n \"\"\"\n Post-processing step: compute the lambda-return (TD(lambda) estimate)\n and GAE(lambda) advantage.\n\n Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)\n to compute the advantage. To obtain Monte-Carlo advantage estimate (A(s) = R - V(S))\n where R is the sum of discounted reward with value bootstrap\n (because we don't always have full episode), set ``gae_lambda=1.0`` during initialization.\n\n The TD(lambda) estimator has also two special cases:\n - TD(1) is Monte-Carlo estimate (sum of discounted rewards)\n - TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))\n\n For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.\n\n :param last_values: state value estimation for the last step (one for each env)\n :param dones: if the last step was a terminal step (one bool for each env).\n \"\"\"\n # Convert to numpy\n last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment]\n\n last_gae_lam = 0\n for step in reversed(range(self.buffer_size)):\n if step == self.buffer_size - 1:\n next_non_terminal = 1.0 - dones.astype(np.float32)\n next_values = last_values\n else:\n next_non_terminal = 1.0 - self.episode_starts[step + 1]\n next_values = self.values[step + 1]\n delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step]\n last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam\n self.advantages[step] = last_gae_lam\n # TD(lambda) estimator, see Github PR #375 or \"Telescoping in TD(lambda)\"\n # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA\n self.returns = self.advantages + self.values\n\n def add(\n self,\n obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.observations[self.pos] = np.array(obs)\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get(self, batch_size: int | None = None) -> Generator[RolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n _tensor_names = [\n \"observations\",\n \"actions\",\n \"values\",\n \"log_probs\",\n \"advantages\",\n \"returns\",\n ]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples(\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> RolloutBufferSamples:\n data = (\n self.observations[batch_inds],\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n self.actions[batch_inds].astype(np.float32, copy=False),\n self.values[batch_inds].flatten(),\n self.log_probs[batch_inds].flatten(),\n self.advantages[batch_inds].flatten(),\n self.returns[batch_inds].flatten(),\n )\n return RolloutBufferSamples(*tuple(map(self.to_torch, data)))\n\nclass DictReplayBuffer(ReplayBuffer):\n \"\"\"\n Dict Replay buffer used in off-policy algorithms like SAC/TD3.\n Extends the ReplayBuffer to use dictionary observations\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n Disabled for now (see https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702)\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n next_observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictReplayBuffer must be used with Dict obs space only\"\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n assert not optimize_memory_usage, \"DictReplayBuffer does not support optimize_memory_usage\"\n # disabling as this adds quite a bit of complexity\n # https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = {\n key: np.zeros((self.buffer_size, self.n_envs, *_obs_shape), dtype=observation_space[key].dtype)\n for key, _obs_shape in self.obs_shape.items()\n }\n self.next_observations = {\n key: np.zeros((self.buffer_size, self.n_envs, *_obs_shape), dtype=observation_space[key].dtype)\n for key, _obs_shape in self.obs_shape.items()\n }\n\n self.actions = np.zeros(\n (self.buffer_size, self.n_envs, self.action_dim), dtype=self._maybe_cast_dtype(action_space.dtype)\n )\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n obs_nbytes = 0\n for _, obs in self.observations.items():\n obs_nbytes += obs.nbytes\n\n total_memory_usage: float = obs_nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n if not optimize_memory_usage:\n next_obs_nbytes = 0\n for _, obs in self.observations.items():\n next_obs_nbytes += obs.nbytes\n total_memory_usage += next_obs_nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n next_obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: list[dict[str, Any]],\n ) -> None:\n # Copy to avoid modification by reference\n for key in self.observations.keys():\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs[key] = obs[key].reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = np.array(obs[key])\n\n for key in self.next_observations.keys():\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n next_obs[key] = next_obs[key].reshape((self.n_envs,) + self.obs_shape[key])\n self.next_observations[key][self.pos] = np.array(next_obs[key])\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.dones[self.pos] = np.array(done)\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample( # type: ignore[override]\n self,\n batch_size: int,\n env: VecNormalize | None = None,\n ) -> DictReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n return super(ReplayBuffer, self).sample(batch_size=batch_size, env=env)\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n # Normalize if needed and remove extra dimension (we are using only one env for now)\n obs_ = self._normalize_obs({key: obs[batch_inds, env_indices, :] for key, obs in self.observations.items()}, env)\n next_obs_ = self._normalize_obs(\n {key: obs[batch_inds, env_indices, :] for key, obs in self.next_observations.items()}, env\n )\n\n assert isinstance(obs_, dict)\n assert isinstance(next_obs_, dict)\n # Convert to torch tensor\n observations = {key: self.to_torch(obs) for key, obs in obs_.items()}\n next_observations = {key: self.to_torch(obs) for key, obs in next_obs_.items()}\n\n return DictReplayBufferSamples(\n observations=observations,\n actions=self.to_torch(self.actions[batch_inds, env_indices]),\n next_observations=next_observations,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n dones=self.to_torch(self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(\n -1, 1\n ),\n rewards=self.to_torch(self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env)),\n )\n\nclass DictRolloutBuffer(RolloutBuffer):\n \"\"\"\n Dict Rollout buffer used in on-policy algorithms like A2C/PPO.\n Extends the RolloutBuffer to use dictionary observations\n\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to Monte-Carlo advantage estimate when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictRolloutBuffer must be used with Dict obs space only\"\n\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = {}\n for key, obs_input_shape in self.obs_shape.items():\n self.observations[key] = np.zeros(\n (self.buffer_size, self.n_envs, *obs_input_shape), dtype=self.observation_space[key].dtype\n )\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super(RolloutBuffer, self).reset()\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n for key in self.observations.keys():\n obs_ = np.array(obs[key])\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs_ = obs_.reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = obs_\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get( # type: ignore[override]\n self,\n batch_size: int | None = None,\n ) -> Generator[DictRolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n for key, obs in self.observations.items():\n self.observations[key] = self.swap_and_flatten(obs)\n\n _tensor_names = [\"actions\", \"values\", \"log_probs\", \"advantages\", \"returns\"]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictRolloutBufferSamples:\n return DictRolloutBufferSamples(\n observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()},\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n actions=self.to_torch(self.actions[batch_inds].astype(np.float32, copy=False)),\n old_values=self.to_torch(self.values[batch_inds].flatten()),\n old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()),\n advantages=self.to_torch(self.advantages[batch_inds].flatten()),\n returns=self.to_torch(self.returns[batch_inds].flatten()),\n )\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/utils.py\ndef get_device(device: th.device | str = \"auto\") -> th.device:\n \"\"\"\n Retrieve PyTorch device.\n It checks that the requested device is available first.\n For now, it supports only cpu and cuda.\n By default, it tries to use the gpu.\n\n :param device: One for 'auto', 'cuda', 'cpu'\n :return: Supported Pytorch device\n \"\"\"\n # Cuda by default\n if device == \"auto\":\n device = \"cuda\"\n # Force conversion to th.device\n device = th.device(device)\n\n # Cuda not available\n if device.type == th.device(\"cuda\").type and not th.cuda.is_available():\n return th.device(\"cpu\")\n\n return device", "n_imports_parsed": 12, "n_files_resolved": 7, "n_chars_extracted": 35251}, "tests/test_vec_monitor.py::76": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["BitFlippingEnv", "DummyVecEnv", "VecMonitor", "csv", "os", "uuid"], "enclosing_function": "test_vec_monitor_info_keywords", "extracted_code": "# Source: stable_baselines3/common/envs/bit_flipping_env.py\nclass BitFlippingEnv(Env):\n \"\"\"\n Simple bit flipping env, useful to test HER.\n The goal is to flip all the bits to get a vector of ones.\n In the continuous variant, if the ith action component has a value > 0,\n then the ith bit will be flipped. Uses a ``MultiBinary`` observation space\n by default.\n\n :param n_bits: Number of bits to flip\n :param continuous: Whether to use the continuous actions version or not,\n by default, it uses the discrete one\n :param max_steps: Max number of steps, by default, equal to n_bits\n :param discrete_obs_space: Whether to use the discrete observation\n version or not, ie a one-hot encoding of all possible states\n :param image_obs_space: Whether to use an image observation version\n or not, ie a greyscale image of the state\n :param channel_first: Whether to use channel-first or last image.\n \"\"\"\n\n spec = EnvSpec(\"BitFlippingEnv-v0\", \"no-entry-point\")\n state: np.ndarray\n\n def __init__(\n self,\n n_bits: int = 10,\n continuous: bool = False,\n max_steps: int | None = None,\n discrete_obs_space: bool = False,\n image_obs_space: bool = False,\n channel_first: bool = True,\n render_mode: str = \"human\",\n ):\n super().__init__()\n self.render_mode = render_mode\n # Shape of the observation when using image space\n self.image_shape = (1, 36, 36) if channel_first else (36, 36, 1)\n # The achieved goal is determined by the current state\n # here, it is a special where they are equal\n\n # observation space for observations given to the model\n self.observation_space = self._make_observation_space(discrete_obs_space, image_obs_space, n_bits)\n # observation space used to update internal state\n self._obs_space = spaces.MultiBinary(n_bits)\n\n if continuous:\n self.action_space = spaces.Box(-1, 1, shape=(n_bits,), dtype=np.float32)\n else:\n self.action_space = spaces.Discrete(n_bits)\n self.continuous = continuous\n self.discrete_obs_space = discrete_obs_space\n self.image_obs_space = image_obs_space\n self.desired_goal = np.ones((n_bits,), dtype=self.observation_space[\"desired_goal\"].dtype)\n if max_steps is None:\n max_steps = n_bits\n self.max_steps = max_steps\n self.current_step = 0\n\n def seed(self, seed: int) -> None:\n self._obs_space.seed(seed)\n\n def convert_if_needed(self, state: np.ndarray) -> int | np.ndarray:\n \"\"\"\n Convert to discrete space if needed.\n\n :param state:\n :return:\n \"\"\"\n\n if self.discrete_obs_space:\n # Convert from int8 to int32 for NumPy 2.0\n state = state.astype(np.int32)\n # The internal state is the binary representation of the\n # observed one\n return int(sum(state[i] * 2**i for i in range(len(state))))\n\n if self.image_obs_space:\n size = np.prod(self.image_shape)\n image = np.concatenate((state.astype(np.uint8) * 255, np.zeros(size - len(state), dtype=np.uint8)))\n return image.reshape(self.image_shape).astype(np.uint8)\n return state\n\n def convert_to_bit_vector(self, state: int | np.ndarray, batch_size: int) -> np.ndarray:\n \"\"\"\n Convert to bit vector if needed.\n\n :param state: The state to be converted, which can be either an integer or a numpy array.\n :param batch_size: The batch size.\n :return: The state converted into a bit vector.\n \"\"\"\n # Convert back to bit vector\n if isinstance(state, int):\n bit_vector = np.array(state).reshape(batch_size, -1)\n # Convert to binary representation\n bit_vector = ((bit_vector[:, :] & (1 << np.arange(len(self.state)))) > 0).astype(int)\n elif self.image_obs_space:\n bit_vector = state.reshape(batch_size, -1)[:, : len(self.state)] / 255 # type: ignore[assignment]\n else:\n bit_vector = np.array(state).reshape(batch_size, -1)\n return bit_vector\n\n def _make_observation_space(self, discrete_obs_space: bool, image_obs_space: bool, n_bits: int) -> spaces.Dict:\n \"\"\"\n Helper to create observation space\n\n :param discrete_obs_space: Whether to use the discrete observation version\n :param image_obs_space: Whether to use the image observation version\n :param n_bits: The number of bits used to represent the state\n :return: the environment observation space\n \"\"\"\n if discrete_obs_space and image_obs_space:\n raise ValueError(\"Cannot use both discrete and image observation spaces\")\n\n if discrete_obs_space:\n # In the discrete case, the agent act on the binary\n # representation of the observation\n return spaces.Dict(\n {\n \"observation\": spaces.Discrete(2**n_bits),\n \"achieved_goal\": spaces.Discrete(2**n_bits),\n \"desired_goal\": spaces.Discrete(2**n_bits),\n }\n )\n\n if image_obs_space:\n # When using image as input,\n # one image contains the bits 0 -> 0, 1 -> 255\n # and the rest is filled with zeros\n return spaces.Dict(\n {\n \"observation\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n \"achieved_goal\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n \"desired_goal\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n }\n )\n\n return spaces.Dict(\n {\n \"observation\": spaces.MultiBinary(n_bits),\n \"achieved_goal\": spaces.MultiBinary(n_bits),\n \"desired_goal\": spaces.MultiBinary(n_bits),\n }\n )\n\n def _get_obs(self) -> dict[str, int | np.ndarray]:\n \"\"\"\n Helper to create the observation.\n\n :return: The current observation.\n \"\"\"\n return OrderedDict(\n [\n (\"observation\", self.convert_if_needed(self.state.copy())),\n (\"achieved_goal\", self.convert_if_needed(self.state.copy())),\n (\"desired_goal\", self.convert_if_needed(self.desired_goal.copy())),\n ]\n )\n\n def reset(self, *, seed: int | None = None, options: dict | None = None) -> tuple[dict[str, int | np.ndarray], dict]:\n if seed is not None:\n self._obs_space.seed(seed)\n self.current_step = 0\n self.state = self._obs_space.sample()\n return self._get_obs(), {}\n\n def step(self, action: np.ndarray | int) -> GymStepReturn:\n \"\"\"\n Step into the env.\n\n :param action:\n :return:\n \"\"\"\n if self.continuous:\n self.state[action > 0] = 1 - self.state[action > 0]\n else:\n self.state[action] = 1 - self.state[action]\n obs = self._get_obs()\n reward = float(self.compute_reward(obs[\"achieved_goal\"], obs[\"desired_goal\"], None).item())\n terminated = reward == 0\n self.current_step += 1\n # Episode terminate when we reached the goal or the max number of steps\n info = {\"is_success\": terminated}\n truncated = self.current_step >= self.max_steps\n return obs, reward, terminated, truncated, info\n\n def compute_reward(\n self, achieved_goal: int | np.ndarray, desired_goal: int | np.ndarray, _info: dict[str, Any] | None\n ) -> np.float32:\n # As we are using a vectorized version, we need to keep track of the `batch_size`\n if isinstance(achieved_goal, int):\n batch_size = 1\n elif self.image_obs_space:\n batch_size = achieved_goal.shape[0] if len(achieved_goal.shape) > 3 else 1\n else:\n batch_size = achieved_goal.shape[0] if len(achieved_goal.shape) > 1 else 1\n\n desired_goal = self.convert_to_bit_vector(desired_goal, batch_size)\n achieved_goal = self.convert_to_bit_vector(achieved_goal, batch_size)\n\n # Deceptive reward: it is positive only when the goal is achieved\n # Here we are using a vectorized version\n distance = np.linalg.norm(achieved_goal - desired_goal, axis=-1)\n return -(distance > 0).astype(np.float32)\n\n def render(self) -> np.ndarray | None: # type: ignore[override]\n if self.render_mode == \"rgb_array\":\n return self.state.copy()\n print(self.state)\n return None\n\n def close(self) -> None:\n pass\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 11130}, "tests/test_monitor.py::63": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["Monitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_monitor", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 5236}, "tests/test_logger.py::536": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["HumanOutputFormat", "TextIOBase", "pytest"], "enclosing_function": "test_human_out_custom_text_io", "extracted_code": "# Source: stable_baselines3/common/logger.py\nclass HumanOutputFormat(KVWriter, SeqWriter):\n \"\"\"A human-readable output format producing ASCII tables of key-value pairs.\n\n Set attribute ``max_length`` to change the maximum length of keys and values\n to write to output (or specify it when calling ``__init__``).\n\n :param filename_or_file: the file to write the log to\n :param max_length: the maximum length of keys and values to write to output.\n Outputs longer than this will be truncated. An error will be raised\n if multiple keys are truncated to the same value. The maximum output\n width will be ``2*max_length + 7``. The default of 36 produces output\n no longer than 79 characters wide.\n \"\"\"\n\n def __init__(self, filename_or_file: str | TextIO, max_length: int = 36):\n self.max_length = max_length\n if isinstance(filename_or_file, str):\n self.file = open(filename_or_file, \"w\")\n self.own_file = True\n elif isinstance(filename_or_file, TextIOBase) or hasattr(filename_or_file, \"write\"):\n # Note: in theory `TextIOBase` check should be sufficient,\n # in practice, libraries don't always inherit from it, see GH#1598\n self.file = filename_or_file # type: ignore[assignment]\n self.own_file = False\n else:\n raise ValueError(f\"Expected file or str, got {filename_or_file}\")\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Create strings for printing\n key2str = {}\n tag = \"\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and (\"stdout\" in excluded or \"log\" in excluded):\n continue\n\n elif isinstance(value, Video):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"hparam\")\n\n elif isinstance(value, float):\n # Align left\n value_str = f\"{value:<8.3g}\"\n else:\n value_str = str(value)\n\n if key.find(\"/\") > 0: # Find tag and add it to the dict\n tag = key[: key.find(\"/\") + 1]\n key2str[(tag, self._truncate(tag))] = \"\"\n # Remove tag from key and indent the key\n if len(tag) > 0 and tag in key:\n key = f\"{'':3}{key[len(tag) :]}\"\n\n truncated_key = self._truncate(key)\n if (tag, truncated_key) in key2str:\n raise ValueError(\n f\"Key '{key}' truncated to '{truncated_key}' that already exists. Consider increasing `max_length`.\"\n )\n key2str[(tag, truncated_key)] = self._truncate(value_str)\n\n # Find max widths\n if len(key2str) == 0:\n warnings.warn(\"Tried to write empty key-value dict\")\n return\n else:\n tagless_keys = map(lambda x: x[1], key2str.keys())\n key_width = max(map(len, tagless_keys))\n val_width = max(map(len, key2str.values()))\n\n # Write out the data\n dashes = \"-\" * (key_width + val_width + 7)\n lines = [dashes]\n for (_, key), value in key2str.items():\n key_space = \" \" * (key_width - len(key))\n val_space = \" \" * (val_width - len(value))\n lines.append(f\"| {key}{key_space} | {value}{val_space} |\")\n lines.append(dashes)\n\n if tqdm is not None and hasattr(self.file, \"name\") and self.file.name == \"\":\n # Do not mess up with progress bar\n tqdm.write(\"\\n\".join(lines) + \"\\n\", file=sys.stdout, end=\"\")\n else:\n self.file.write(\"\\n\".join(lines) + \"\\n\")\n\n # Flush the output to the file\n self.file.flush()\n\n def _truncate(self, string: str) -> str:\n if len(string) > self.max_length:\n string = string[: self.max_length - 3] + \"...\"\n return string\n\n def write_sequence(self, sequence: list[str]) -> None:\n for i, elem in enumerate(sequence):\n self.file.write(elem)\n if i < len(sequence) - 1: # add space unless this is the last one\n self.file.write(\" \")\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.own_file:\n self.file.close()", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 4801}, "tests/test_cnn.py::154": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/preprocessing.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DQN", "FakeImageEnv", "SAC", "TD3", "deepcopy", "pytest"], "enclosing_function": "test_features_extractor_target_net", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 10, "n_files_resolved": 4, "n_chars_extracted": 2115}, "tests/test_her.py::218": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "DDPG", "DQN", "HerReplayBuffer", "SAC", "TD3", "deepcopy", "make_vec_env", "os", "pytest"], "enclosing_function": "test_save_load", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 7184}, "tests/test_vec_envs.py::87": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "pytest", "spaces"], "enclosing_function": "test_vecenv_func_checker", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 1159}, "tests/test_vec_envs.py::315": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "check_vecenv_obs", "extracted_code": "", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 0}, "tests/test_vec_stacked_obs.py::38": {"resolved_imports": ["stable_baselines3/common/vec_env/stacked_observations.py"], "used_names": ["spaces"], "enclosing_function": "test_compute_stacking_multidim_box_channel_first", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/test_utils.py::116": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["make_atari_env", "pytest"], "enclosing_function": "test_make_atari_env", "extracted_code": "# Source: stable_baselines3/common/env_util.py\ndef make_atari_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv] | type[SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored VecEnv for Atari.\n It is a wrapper around ``make_vec_env`` that includes common preprocessing for Atari games.\n\n .. note::\n By default, the ``AtariWrapper`` uses ``terminal_on_life_loss=True``, which causes\n ``env.reset()`` to perform a no-op step instead of truly resetting when the environment\n terminates due to a loss of life (but not game over). To ensure ``reset()`` always\n resets the env, pass ``wrapper_kwargs=dict(terminal_on_life_loss=False)``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_kwargs: Optional keyword argument to pass to the ``AtariWrapper``\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :return: The wrapped environment\n \"\"\"\n return make_vec_env(\n env_id,\n n_envs=n_envs,\n seed=seed,\n start_index=start_index,\n monitor_dir=monitor_dir,\n wrapper_class=AtariWrapper,\n env_kwargs=env_kwargs,\n vec_env_cls=vec_env_cls,\n vec_env_kwargs=vec_env_kwargs,\n monitor_kwargs=monitor_kwargs,\n wrapper_kwargs=wrapper_kwargs,\n )", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 2378}, "tests/test_logger.py::161": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["DEBUG", "INFO", "configure"], "enclosing_function": "test_main", "extracted_code": "# Source: stable_baselines3/common/logger.py\nDEBUG = 10\n\nINFO = 20\n\ndef configure(folder: str | None = None, format_strings: list[str] | None = None) -> Logger:\n \"\"\"\n Configure the current logger.\n\n :param folder: the save location\n (if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])\n :param format_strings: the output logging format\n (if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])\n :return: The logger object.\n \"\"\"\n if folder is None:\n folder = os.getenv(\"SB3_LOGDIR\")\n if folder is None:\n folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime(\"SB3-%Y-%m-%d-%H-%M-%S-%f\"))\n assert isinstance(folder, str)\n os.makedirs(folder, exist_ok=True)\n\n log_suffix = \"\"\n if format_strings is None:\n format_strings = os.getenv(\"SB3_LOG_FORMAT\", \"stdout,log,csv\").split(\",\")\n\n format_strings = list(filter(None, format_strings))\n output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]\n\n logger = Logger(folder=folder, output_formats=output_formats)\n # Only print when some files will be saved\n if len(format_strings) > 0 and format_strings != [\"stdout\"]:\n logger.log(f\"Logging to {folder}\")\n return logger", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 1278}, "tests/test_vec_normalize.py::276": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecNormalize", "pytest"], "enclosing_function": "test_vec_env", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 2800}, "tests/test_cnn.py::152": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/preprocessing.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DQN", "FakeImageEnv", "SAC", "TD3", "deepcopy", "pytest"], "enclosing_function": "test_features_extractor_target_net", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 10, "n_files_resolved": 4, "n_chars_extracted": 2115}, "tests/test_logger.py::124": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["A2C", "CSVOutputFormat", "HumanOutputFormat", "TensorBoardOutputFormat", "configure", "os"], "enclosing_function": "test_set_logger", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/logger.py\nclass HumanOutputFormat(KVWriter, SeqWriter):\n \"\"\"A human-readable output format producing ASCII tables of key-value pairs.\n\n Set attribute ``max_length`` to change the maximum length of keys and values\n to write to output (or specify it when calling ``__init__``).\n\n :param filename_or_file: the file to write the log to\n :param max_length: the maximum length of keys and values to write to output.\n Outputs longer than this will be truncated. An error will be raised\n if multiple keys are truncated to the same value. The maximum output\n width will be ``2*max_length + 7``. The default of 36 produces output\n no longer than 79 characters wide.\n \"\"\"\n\n def __init__(self, filename_or_file: str | TextIO, max_length: int = 36):\n self.max_length = max_length\n if isinstance(filename_or_file, str):\n self.file = open(filename_or_file, \"w\")\n self.own_file = True\n elif isinstance(filename_or_file, TextIOBase) or hasattr(filename_or_file, \"write\"):\n # Note: in theory `TextIOBase` check should be sufficient,\n # in practice, libraries don't always inherit from it, see GH#1598\n self.file = filename_or_file # type: ignore[assignment]\n self.own_file = False\n else:\n raise ValueError(f\"Expected file or str, got {filename_or_file}\")\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Create strings for printing\n key2str = {}\n tag = \"\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and (\"stdout\" in excluded or \"log\" in excluded):\n continue\n\n elif isinstance(value, Video):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"hparam\")\n\n elif isinstance(value, float):\n # Align left\n value_str = f\"{value:<8.3g}\"\n else:\n value_str = str(value)\n\n if key.find(\"/\") > 0: # Find tag and add it to the dict\n tag = key[: key.find(\"/\") + 1]\n key2str[(tag, self._truncate(tag))] = \"\"\n # Remove tag from key and indent the key\n if len(tag) > 0 and tag in key:\n key = f\"{'':3}{key[len(tag) :]}\"\n\n truncated_key = self._truncate(key)\n if (tag, truncated_key) in key2str:\n raise ValueError(\n f\"Key '{key}' truncated to '{truncated_key}' that already exists. Consider increasing `max_length`.\"\n )\n key2str[(tag, truncated_key)] = self._truncate(value_str)\n\n # Find max widths\n if len(key2str) == 0:\n warnings.warn(\"Tried to write empty key-value dict\")\n return\n else:\n tagless_keys = map(lambda x: x[1], key2str.keys())\n key_width = max(map(len, tagless_keys))\n val_width = max(map(len, key2str.values()))\n\n # Write out the data\n dashes = \"-\" * (key_width + val_width + 7)\n lines = [dashes]\n for (_, key), value in key2str.items():\n key_space = \" \" * (key_width - len(key))\n val_space = \" \" * (val_width - len(value))\n lines.append(f\"| {key}{key_space} | {value}{val_space} |\")\n lines.append(dashes)\n\n if tqdm is not None and hasattr(self.file, \"name\") and self.file.name == \"\":\n # Do not mess up with progress bar\n tqdm.write(\"\\n\".join(lines) + \"\\n\", file=sys.stdout, end=\"\")\n else:\n self.file.write(\"\\n\".join(lines) + \"\\n\")\n\n # Flush the output to the file\n self.file.flush()\n\n def _truncate(self, string: str) -> str:\n if len(string) > self.max_length:\n string = string[: self.max_length - 3] + \"...\"\n return string\n\n def write_sequence(self, sequence: list[str]) -> None:\n for i, elem in enumerate(sequence):\n self.file.write(elem)\n if i < len(sequence) - 1: # add space unless this is the last one\n self.file.write(\" \")\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.own_file:\n self.file.close()\n\nclass CSVOutputFormat(KVWriter):\n \"\"\"\n Log to a file, in a CSV format\n\n :param filename: the file to write the log to\n \"\"\"\n\n def __init__(self, filename: str):\n self.file = open(filename, \"w+\")\n self.keys: list[str] = []\n self.separator = \",\"\n self.quotechar = '\"'\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Add our current row to the history\n key_values = filter_excluded_keys(key_values, key_excluded, \"csv\")\n extra_keys = key_values.keys() - self.keys\n if extra_keys:\n self.keys.extend(extra_keys)\n self.file.seek(0)\n lines = self.file.readlines()\n self.file.seek(0)\n for i, key in enumerate(self.keys):\n if i > 0:\n self.file.write(\",\")\n self.file.write(key)\n self.file.write(\"\\n\")\n for line in lines[1:]:\n self.file.write(line[:-1])\n self.file.write(self.separator * len(extra_keys))\n self.file.write(\"\\n\")\n for i, key in enumerate(self.keys):\n if i > 0:\n self.file.write(\",\")\n value = key_values.get(key)\n\n if isinstance(value, Video):\n raise FormatUnsupportedError([\"csv\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"csv\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"csv\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"csv\"], \"hparam\")\n\n elif isinstance(value, str):\n # escape quotechars by prepending them with another quotechar\n value = value.replace(self.quotechar, self.quotechar + self.quotechar)\n\n # additionally wrap text with quotechars so that any delimiters in the text are ignored by csv readers\n self.file.write(self.quotechar + value + self.quotechar)\n\n elif value is not None:\n self.file.write(str(value))\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n self.file.close()\n\nclass TensorBoardOutputFormat(KVWriter):\n \"\"\"\n Dumps key/value pairs into TensorBoard's numeric format.\n\n :param folder: the folder to write the log to\n \"\"\"\n\n def __init__(self, folder: str):\n assert SummaryWriter is not None, \"tensorboard is not installed, you can use `pip install tensorboard` to do so\"\n self.writer = SummaryWriter(log_dir=folder)\n self._is_closed = False\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n assert not self._is_closed, \"The SummaryWriter was closed, please re-create one.\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and \"tensorboard\" in excluded:\n continue\n\n if isinstance(value, np.ScalarType):\n if isinstance(value, str):\n # str is considered a np.ScalarType\n self.writer.add_text(key, value, step)\n else:\n self.writer.add_scalar(key, value, step)\n\n if isinstance(value, (th.Tensor, np.ndarray)):\n # Convert to Torch so it works with numpy<1.24 and torch<2.0\n self.writer.add_histogram(key, th.as_tensor(value), step)\n\n if isinstance(value, Video):\n self.writer.add_video(key, value.frames, step, value.fps)\n\n if isinstance(value, Figure):\n self.writer.add_figure(key, value.figure, step, close=value.close)\n\n if isinstance(value, Image):\n self.writer.add_image(key, value.image, step, dataformats=value.dataformats)\n\n if isinstance(value, HParam):\n # we don't use `self.writer.add_hparams` to have control over the log_dir\n experiment, session_start_info, session_end_info = hparams(value.hparam_dict, metric_dict=value.metric_dict)\n self.writer.file_writer.add_summary(experiment)\n self.writer.file_writer.add_summary(session_start_info)\n self.writer.file_writer.add_summary(session_end_info)\n\n # Flush the output to the file\n self.writer.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.writer:\n self.writer.close()\n self._is_closed = True\n\ndef configure(folder: str | None = None, format_strings: list[str] | None = None) -> Logger:\n \"\"\"\n Configure the current logger.\n\n :param folder: the save location\n (if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])\n :param format_strings: the output logging format\n (if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])\n :return: The logger object.\n \"\"\"\n if folder is None:\n folder = os.getenv(\"SB3_LOGDIR\")\n if folder is None:\n folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime(\"SB3-%Y-%m-%d-%H-%M-%S-%f\"))\n assert isinstance(folder, str)\n os.makedirs(folder, exist_ok=True)\n\n log_suffix = \"\"\n if format_strings is None:\n format_strings = os.getenv(\"SB3_LOG_FORMAT\", \"stdout,log,csv\").split(\",\")\n\n format_strings = list(filter(None, format_strings))\n output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]\n\n logger = Logger(folder=folder, output_formats=output_formats)\n # Only print when some files will be saved\n if len(format_strings) > 0 and format_strings != [\"stdout\"]:\n logger.log(f\"Logging to {folder}\")\n return logger", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 11330}, "tests/test_n_step_replay.py::104": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_util.py"], "used_names": ["pytest"], "enclosing_function": "test_nstep_early_termination", "extracted_code": "", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/test_custom_policy.py::76": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/sb2_compat/rmsprop_tf_like.py", "stable_baselines3/common/torch_layers.py"], "used_names": ["create_mlp", "nn"], "enclosing_function": "test_create_mlp", "extracted_code": "# Source: stable_baselines3/common/torch_layers.py\ndef create_mlp(\n input_dim: int,\n output_dim: int,\n net_arch: list[int],\n activation_fn: type[nn.Module] = nn.ReLU,\n squash_output: bool = False,\n with_bias: bool = True,\n pre_linear_modules: list[type[nn.Module]] | None = None,\n post_linear_modules: list[type[nn.Module]] | None = None,\n) -> list[nn.Module]:\n \"\"\"\n Create a multi layer perceptron (MLP), which is\n a collection of fully-connected layers each followed by an activation function.\n\n :param input_dim: Dimension of the input vector\n :param output_dim: Dimension of the output (last layer, for instance, the number of actions)\n :param net_arch: Architecture of the neural net\n It represents the number of units per layer.\n The length of this list is the number of layers.\n :param activation_fn: The activation function\n to use after each layer.\n :param squash_output: Whether to squash the output using a Tanh\n activation function\n :param with_bias: If set to False, the layers will not learn an additive bias\n :param pre_linear_modules: List of nn.Module to add before the linear layers.\n These modules should maintain the input tensor dimension (e.g. BatchNorm).\n The number of input features is passed to the module's constructor.\n Compared to post_linear_modules, they are used before the output layer (output_dim > 0).\n :param post_linear_modules: List of nn.Module to add after the linear layers\n (and before the activation function). These modules should maintain the input\n tensor dimension (e.g. Dropout, LayerNorm). They are not used after the\n output layer (output_dim > 0). The number of input features is passed to\n the module's constructor.\n :return: The list of layers of the neural network\n \"\"\"\n\n pre_linear_modules = pre_linear_modules or []\n post_linear_modules = post_linear_modules or []\n\n modules = []\n if len(net_arch) > 0:\n # BatchNorm maintains input dim\n for module in pre_linear_modules:\n modules.append(module(input_dim))\n\n modules.append(nn.Linear(input_dim, net_arch[0], bias=with_bias))\n\n # LayerNorm, Dropout maintain output dim\n for module in post_linear_modules:\n modules.append(module(net_arch[0]))\n\n modules.append(activation_fn())\n\n for idx in range(len(net_arch) - 1):\n for module in pre_linear_modules:\n modules.append(module(net_arch[idx]))\n\n modules.append(nn.Linear(net_arch[idx], net_arch[idx + 1], bias=with_bias))\n\n for module in post_linear_modules:\n modules.append(module(net_arch[idx + 1]))\n\n modules.append(activation_fn())\n\n if output_dim > 0:\n last_layer_dim = net_arch[-1] if len(net_arch) > 0 else input_dim\n # Only add BatchNorm before output layer\n for module in pre_linear_modules:\n modules.append(module(last_layer_dim))\n\n modules.append(nn.Linear(last_layer_dim, output_dim, bias=with_bias))\n if squash_output:\n modules.append(nn.Tanh())\n return modules", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 3147}, "tests/test_save_load.py::881": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["ConstantSchedule", "DummyVecEnv", "FloatSchedule", "IdentityEnvBox", "PPO", "pytest"], "enclosing_function": "test_save_load_clip_range_portable", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/envs/__init__.py\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n\n# Source: stable_baselines3/common/utils.py\nclass FloatSchedule:\n \"\"\"\n Wrapper that ensures the output of a Schedule is cast to float.\n Can wrap either a constant value or an existing callable Schedule.\n\n :param value_schedule: Constant value or callable schedule\n (e.g. LinearSchedule, ConstantSchedule)\n \"\"\"\n\n def __init__(self, value_schedule: Schedule | float):\n if isinstance(value_schedule, FloatSchedule):\n self.value_schedule: Schedule = value_schedule.value_schedule\n elif isinstance(value_schedule, (float, int)):\n self.value_schedule = ConstantSchedule(float(value_schedule))\n else:\n assert callable(value_schedule), f\"The learning rate schedule must be a float or a callable, not {value_schedule}\"\n self.value_schedule = value_schedule\n\n def __call__(self, progress_remaining: float) -> float:\n # Cast to float to avoid unpickling errors to enable weights_only=True, see GH#1900\n # Some types are have odd behaviors when part of a Schedule, like numpy floats\n return float(self.value_schedule(progress_remaining))\n\n def __repr__(self) -> str:\n return f\"FloatSchedule({self.value_schedule})\"\n\nclass ConstantSchedule:\n \"\"\"\n Constant schedule that always returns the same value.\n Useful for fixed learning rates or clip ranges.\n\n :param val: constant value\n \"\"\"\n\n def __init__(self, val: float):\n self.val = val\n\n def __call__(self, _: float) -> float:\n return self.val\n\n def __repr__(self) -> str:\n return f\"ConstantSchedule(val={self.val})\"\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 3834}, "tests/test_vec_normalize.py::287": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecNormalize", "pytest"], "enclosing_function": "test_vec_env", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 2800}, "tests/test_vec_normalize.py::389": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "HerReplayBuffer", "SAC", "VecNormalize"], "enclosing_function": "test_her_normalization", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 4161}, "tests/test_vec_normalize.py::391": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "HerReplayBuffer", "SAC", "VecNormalize"], "enclosing_function": "test_her_normalization", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 4161}, "tests/test_run.py::222": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/noise.py"], "used_names": ["PPO", "pytest"], "enclosing_function": "test_ppo_warnings", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 549}, "tests/test_vec_monitor.py::44": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecMonitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_vec_monitor", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 2041}, "tests/test_dict_env.py::342": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecNormalize", "spaces"], "enclosing_function": "test_vec_normalize_image", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 10, "n_files_resolved": 6, "n_chars_extracted": 2800}, "tests/test_preprocessing.py::8": {"resolved_imports": ["stable_baselines3/common/preprocessing.py"], "used_names": ["get_obs_shape", "spaces"], "enclosing_function": "test_get_obs_shape_discrete", "extracted_code": "# Source: stable_baselines3/common/preprocessing.py\ndef get_obs_shape(\n observation_space: spaces.Space,\n) -> tuple[int, ...] | dict[str, tuple[int, ...]]:\n \"\"\"\n Get the shape of the observation (useful for the buffers).\n\n :param observation_space:\n :return:\n \"\"\"\n if isinstance(observation_space, spaces.Box):\n return observation_space.shape\n elif isinstance(observation_space, spaces.Discrete):\n # Observation is an int\n return (1,)\n elif isinstance(observation_space, spaces.MultiDiscrete):\n # Number of discrete features\n return (len(observation_space.nvec),)\n elif isinstance(observation_space, spaces.MultiBinary):\n # Number of binary features\n return observation_space.shape\n elif isinstance(observation_space, spaces.Dict):\n return {key: get_obs_shape(subspace) for (key, subspace) in observation_space.spaces.items()} # type: ignore[misc]\n\n else:\n raise NotImplementedError(f\"{observation_space} observation space is not supported\")", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 1041}, "tests/test_her.py::470": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "DQN", "HerReplayBuffer", "make_vec_env", "pytest"], "enclosing_function": "test_performance_her", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 6450}, "tests/test_utils.py::137": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["make_atari_env", "make_vec_env"], "enclosing_function": "test_vec_env_monitor_kwargs", "extracted_code": "# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\ndef make_atari_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv] | type[SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored VecEnv for Atari.\n It is a wrapper around ``make_vec_env`` that includes common preprocessing for Atari games.\n\n .. note::\n By default, the ``AtariWrapper`` uses ``terminal_on_life_loss=True``, which causes\n ``env.reset()`` to perform a no-op step instead of truly resetting when the environment\n terminates due to a loss of life (but not game over). To ensure ``reset()`` always\n resets the env, pass ``wrapper_kwargs=dict(terminal_on_life_loss=False)``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_kwargs: Optional keyword argument to pass to the ``AtariWrapper``\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :return: The wrapped environment\n \"\"\"\n return make_vec_env(\n env_id,\n n_envs=n_envs,\n seed=seed,\n start_index=start_index,\n monitor_dir=monitor_dir,\n wrapper_class=AtariWrapper,\n env_kwargs=env_kwargs,\n vec_env_cls=vec_env_cls,\n vec_env_kwargs=vec_env_kwargs,\n monitor_kwargs=monitor_kwargs,\n wrapper_kwargs=wrapper_kwargs,\n )", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 6701}, "tests/test_vec_monitor.py::112": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecMonitor", "get_monitor_files", "load_results", "os", "uuid"], "enclosing_function": "test_vec_monitor_load_results", "extracted_code": "# Source: stable_baselines3/common/monitor.py\ndef get_monitor_files(path: str) -> list[str]:\n \"\"\"\n get all the monitor files in the given path\n\n :param path: the logging folder\n :return: the log files\n \"\"\"\n return glob(os.path.join(path, \"*\" + Monitor.EXT))\n\ndef load_results(path: str) -> pandas.DataFrame:\n \"\"\"\n Load all Monitor logs from a given directory path matching ``*monitor.csv``\n\n :param path: the directory path containing the log file(s)\n :return: the logged data\n \"\"\"\n monitor_files = get_monitor_files(path)\n if len(monitor_files) == 0:\n raise LoadMonitorResultsError(f\"No monitor files of the form *{Monitor.EXT} found in {path}\")\n data_frames, headers = [], []\n for file_name in monitor_files:\n with open(file_name) as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frames.append(data_frame)\n data_frames = [df for df in data_frames if not df.empty]\n if not data_frames:\n # Only empty monitor files, return empty df\n empty_df = pandas.DataFrame(columns=[\"r\", \"l\", \"t\"])\n # Create index to have the same columns\n empty_df.reset_index(inplace=True)\n return empty_df\n data_frame = pandas.concat(data_frames)\n data_frame.sort_values(\"t\", inplace=True)\n data_frame.reset_index(inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n return data_frame\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 3717}, "tests/test_vec_envs.py::418": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "pytest", "spaces"], "enclosing_function": "test_vecenv_wrapper_getattr", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 1159}, "tests/test_save_load.py::764": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DQN", "make_vec_env", "os"], "enclosing_function": "test_dqn_target_update_interval", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 5028}, "tests/test_save_load.py::860": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "FloatSchedule", "IdentityEnvBox", "PPO", "pytest"], "enclosing_function": "test_save_load_backward_compatible", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/envs/__init__.py\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n\n# Source: stable_baselines3/common/utils.py\nclass FloatSchedule:\n \"\"\"\n Wrapper that ensures the output of a Schedule is cast to float.\n Can wrap either a constant value or an existing callable Schedule.\n\n :param value_schedule: Constant value or callable schedule\n (e.g. LinearSchedule, ConstantSchedule)\n \"\"\"\n\n def __init__(self, value_schedule: Schedule | float):\n if isinstance(value_schedule, FloatSchedule):\n self.value_schedule: Schedule = value_schedule.value_schedule\n elif isinstance(value_schedule, (float, int)):\n self.value_schedule = ConstantSchedule(float(value_schedule))\n else:\n assert callable(value_schedule), f\"The learning rate schedule must be a float or a callable, not {value_schedule}\"\n self.value_schedule = value_schedule\n\n def __call__(self, progress_remaining: float) -> float:\n # Cast to float to avoid unpickling errors to enable weights_only=True, see GH#1900\n # Some types are have odd behaviors when part of a Schedule, like numpy floats\n return float(self.value_schedule(progress_remaining))\n\n def __repr__(self) -> str:\n return f\"FloatSchedule({self.value_schedule})\"\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 3440}, "tests/test_utils.py::44": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "Monitor", "SubprocVecEnv", "make_vec_env", "pytest"], "enclosing_function": "test_make_vec_env", "extracted_code": "# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 11850}, "tests/test_utils.py::195": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "dummy_callback", "extracted_code": "", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 0}, "tests/test_vec_normalize.py::244": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["RunningMeanStd"], "enclosing_function": "test_combining_stats", "extracted_code": "# Source: stable_baselines3/common/running_mean_std.py\nclass RunningMeanStd:\n def __init__(self, epsilon: float = 1e-4, shape: tuple[int, ...] = ()):\n \"\"\"\n Calculates the running mean and std of a data stream\n https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n\n :param epsilon: helps with arithmetic issues\n :param shape: the shape of the data stream's output\n \"\"\"\n self.mean = np.zeros(shape, np.float64)\n self.var = np.ones(shape, np.float64)\n self.count = epsilon\n\n def copy(self) -> \"RunningMeanStd\":\n \"\"\"\n :return: Return a copy of the current object.\n \"\"\"\n new_object = RunningMeanStd(shape=self.mean.shape)\n new_object.mean = self.mean.copy()\n new_object.var = self.var.copy()\n new_object.count = float(self.count)\n return new_object\n\n def combine(self, other: \"RunningMeanStd\") -> None:\n \"\"\"\n Combine stats from another ``RunningMeanStd`` object.\n\n :param other: The other object to combine with.\n \"\"\"\n self.update_from_moments(other.mean, other.var, other.count)\n\n def update(self, arr: np.ndarray) -> None:\n batch_mean = np.mean(arr, axis=0)\n batch_var = np.var(arr, axis=0)\n batch_count = arr.shape[0]\n self.update_from_moments(batch_mean, batch_var, batch_count)\n\n def update_from_moments(self, batch_mean: np.ndarray, batch_var: np.ndarray, batch_count: float) -> None:\n delta = batch_mean - self.mean\n tot_count = self.count + batch_count\n\n new_mean = self.mean + delta * batch_count / tot_count\n m_a = self.var * self.count\n m_b = batch_var * batch_count\n m_2 = m_a + m_b + np.square(delta) * self.count * batch_count / (self.count + batch_count)\n new_var = m_2 / (self.count + batch_count)\n\n new_count = batch_count + self.count\n\n self.mean = new_mean\n self.var = new_var\n self.count = new_count", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 2020}, "tests/test_vec_monitor.py::34": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecMonitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_vec_monitor", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 2041}, "tests/test_vec_envs.py::517": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "Monitor", "SubprocVecEnv", "VecFrameStack", "spaces"], "enclosing_function": "test_vec_env_is_wrapped", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 8178}, "tests/test_deterministic.py::40": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/noise.py"], "used_names": ["A2C", "DQN", "NormalActionNoise", "PPO", "SAC", "TD3", "pytest"], "enclosing_function": "test_deterministic_training_common", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n\n# Source: stable_baselines3/common/noise.py\nclass NormalActionNoise(ActionNoise):\n \"\"\"\n A Gaussian action noise.\n\n :param mean: Mean value of the noise\n :param sigma: Scale of the noise (std here)\n :param dtype: Type of the output noise\n \"\"\"\n\n def __init__(self, mean: np.ndarray, sigma: np.ndarray, dtype: DTypeLike = np.float32) -> None:\n self._mu = mean\n self._sigma = sigma\n self._dtype = dtype\n super().__init__()\n\n def __call__(self) -> np.ndarray:\n return np.random.normal(self._mu, self._sigma).astype(self._dtype)\n\n def __repr__(self) -> str:\n return f\"NormalActionNoise(mu={self._mu}, sigma={self._sigma})\"", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 2773}, "tests/test_preprocessing.py::20": {"resolved_imports": ["stable_baselines3/common/preprocessing.py"], "used_names": ["get_obs_shape", "spaces"], "enclosing_function": "test_get_obs_shape_multidimensional_multibinary", "extracted_code": "# Source: stable_baselines3/common/preprocessing.py\ndef get_obs_shape(\n observation_space: spaces.Space,\n) -> tuple[int, ...] | dict[str, tuple[int, ...]]:\n \"\"\"\n Get the shape of the observation (useful for the buffers).\n\n :param observation_space:\n :return:\n \"\"\"\n if isinstance(observation_space, spaces.Box):\n return observation_space.shape\n elif isinstance(observation_space, spaces.Discrete):\n # Observation is an int\n return (1,)\n elif isinstance(observation_space, spaces.MultiDiscrete):\n # Number of discrete features\n return (len(observation_space.nvec),)\n elif isinstance(observation_space, spaces.MultiBinary):\n # Number of binary features\n return observation_space.shape\n elif isinstance(observation_space, spaces.Dict):\n return {key: get_obs_shape(subspace) for (key, subspace) in observation_space.spaces.items()} # type: ignore[misc]\n\n else:\n raise NotImplementedError(f\"{observation_space} observation space is not supported\")", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 1041}, "tests/test_n_step_replay.py::178": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_util.py"], "used_names": ["numpy"], "enclosing_function": "test_match_normal_buffer", "extracted_code": "", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/test_her.py::266": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "HerReplayBuffer", "SAC", "deepcopy", "make_vec_env", "pathlib", "pytest", "warnings"], "enclosing_function": "test_save_load_replay_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 6318}, "tests/test_env_checker.py::153": {"resolved_imports": ["stable_baselines3/common/env_checker.py"], "used_names": [], "enclosing_function": "__init__", "extracted_code": "", "n_imports_parsed": 6, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/test_her.py::273": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "HerReplayBuffer", "SAC", "deepcopy", "make_vec_env", "pathlib", "pytest", "warnings"], "enclosing_function": "test_save_load_replay_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 6318}, "tests/test_logger.py::163": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["DEBUG", "INFO", "configure"], "enclosing_function": "test_main", "extracted_code": "# Source: stable_baselines3/common/logger.py\nDEBUG = 10\n\nINFO = 20\n\ndef configure(folder: str | None = None, format_strings: list[str] | None = None) -> Logger:\n \"\"\"\n Configure the current logger.\n\n :param folder: the save location\n (if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])\n :param format_strings: the output logging format\n (if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])\n :return: The logger object.\n \"\"\"\n if folder is None:\n folder = os.getenv(\"SB3_LOGDIR\")\n if folder is None:\n folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime(\"SB3-%Y-%m-%d-%H-%M-%S-%f\"))\n assert isinstance(folder, str)\n os.makedirs(folder, exist_ok=True)\n\n log_suffix = \"\"\n if format_strings is None:\n format_strings = os.getenv(\"SB3_LOG_FORMAT\", \"stdout,log,csv\").split(\",\")\n\n format_strings = list(filter(None, format_strings))\n output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]\n\n logger = Logger(folder=folder, output_formats=output_formats)\n # Only print when some files will be saved\n if len(format_strings) > 0 and format_strings != [\"stdout\"]:\n logger.log(f\"Logging to {folder}\")\n return logger", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 1278}, "tests/test_utils.py::398": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["get_parameters_by_name"], "enclosing_function": "test_get_parameters_by_name", "extracted_code": "# Source: stable_baselines3/common/utils.py\ndef get_parameters_by_name(model: th.nn.Module, included_names: Iterable[str]) -> list[th.Tensor]:\n \"\"\"\n Extract parameters from the state dict of ``model``\n if the name contains one of the strings in ``included_names``.\n\n :param model: the model where the parameters come from.\n :param included_names: substrings of names to include.\n :return: List of parameters values (Pytorch tensors)\n that matches the queried names.\n \"\"\"\n return [param for name, param in model.state_dict().items() if any([key in name for key in included_names])]", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 611}, "tests/test_gae.py::185": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/policies.py"], "used_names": ["A2C", "SAC", "pytest"], "enclosing_function": "test_infinite_horizon", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 9, "n_files_resolved": 4, "n_chars_extracted": 1049}, "tests/test_vec_stacked_obs.py::16": {"resolved_imports": ["stable_baselines3/common/vec_env/stacked_observations.py"], "used_names": ["spaces"], "enclosing_function": "test_compute_stacking_box", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/test_vec_monitor.py::93": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecMonitor", "get_monitor_files", "load_results", "os", "uuid"], "enclosing_function": "test_vec_monitor_load_results", "extracted_code": "# Source: stable_baselines3/common/monitor.py\ndef get_monitor_files(path: str) -> list[str]:\n \"\"\"\n get all the monitor files in the given path\n\n :param path: the logging folder\n :return: the log files\n \"\"\"\n return glob(os.path.join(path, \"*\" + Monitor.EXT))\n\ndef load_results(path: str) -> pandas.DataFrame:\n \"\"\"\n Load all Monitor logs from a given directory path matching ``*monitor.csv``\n\n :param path: the directory path containing the log file(s)\n :return: the logged data\n \"\"\"\n monitor_files = get_monitor_files(path)\n if len(monitor_files) == 0:\n raise LoadMonitorResultsError(f\"No monitor files of the form *{Monitor.EXT} found in {path}\")\n data_frames, headers = [], []\n for file_name in monitor_files:\n with open(file_name) as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frames.append(data_frame)\n data_frames = [df for df in data_frames if not df.empty]\n if not data_frames:\n # Only empty monitor files, return empty df\n empty_df = pandas.DataFrame(columns=[\"r\", \"l\", \"t\"])\n # Create index to have the same columns\n empty_df.reset_index(inplace=True)\n return empty_df\n data_frame = pandas.concat(data_frames)\n data_frame.sort_values(\"t\", inplace=True)\n data_frame.reset_index(inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n return data_frame\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 3717}, "tests/test_vec_monitor.py::66": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["BitFlippingEnv", "DummyVecEnv", "VecMonitor", "csv", "os", "uuid"], "enclosing_function": "test_vec_monitor_info_keywords", "extracted_code": "# Source: stable_baselines3/common/envs/bit_flipping_env.py\nclass BitFlippingEnv(Env):\n \"\"\"\n Simple bit flipping env, useful to test HER.\n The goal is to flip all the bits to get a vector of ones.\n In the continuous variant, if the ith action component has a value > 0,\n then the ith bit will be flipped. Uses a ``MultiBinary`` observation space\n by default.\n\n :param n_bits: Number of bits to flip\n :param continuous: Whether to use the continuous actions version or not,\n by default, it uses the discrete one\n :param max_steps: Max number of steps, by default, equal to n_bits\n :param discrete_obs_space: Whether to use the discrete observation\n version or not, ie a one-hot encoding of all possible states\n :param image_obs_space: Whether to use an image observation version\n or not, ie a greyscale image of the state\n :param channel_first: Whether to use channel-first or last image.\n \"\"\"\n\n spec = EnvSpec(\"BitFlippingEnv-v0\", \"no-entry-point\")\n state: np.ndarray\n\n def __init__(\n self,\n n_bits: int = 10,\n continuous: bool = False,\n max_steps: int | None = None,\n discrete_obs_space: bool = False,\n image_obs_space: bool = False,\n channel_first: bool = True,\n render_mode: str = \"human\",\n ):\n super().__init__()\n self.render_mode = render_mode\n # Shape of the observation when using image space\n self.image_shape = (1, 36, 36) if channel_first else (36, 36, 1)\n # The achieved goal is determined by the current state\n # here, it is a special where they are equal\n\n # observation space for observations given to the model\n self.observation_space = self._make_observation_space(discrete_obs_space, image_obs_space, n_bits)\n # observation space used to update internal state\n self._obs_space = spaces.MultiBinary(n_bits)\n\n if continuous:\n self.action_space = spaces.Box(-1, 1, shape=(n_bits,), dtype=np.float32)\n else:\n self.action_space = spaces.Discrete(n_bits)\n self.continuous = continuous\n self.discrete_obs_space = discrete_obs_space\n self.image_obs_space = image_obs_space\n self.desired_goal = np.ones((n_bits,), dtype=self.observation_space[\"desired_goal\"].dtype)\n if max_steps is None:\n max_steps = n_bits\n self.max_steps = max_steps\n self.current_step = 0\n\n def seed(self, seed: int) -> None:\n self._obs_space.seed(seed)\n\n def convert_if_needed(self, state: np.ndarray) -> int | np.ndarray:\n \"\"\"\n Convert to discrete space if needed.\n\n :param state:\n :return:\n \"\"\"\n\n if self.discrete_obs_space:\n # Convert from int8 to int32 for NumPy 2.0\n state = state.astype(np.int32)\n # The internal state is the binary representation of the\n # observed one\n return int(sum(state[i] * 2**i for i in range(len(state))))\n\n if self.image_obs_space:\n size = np.prod(self.image_shape)\n image = np.concatenate((state.astype(np.uint8) * 255, np.zeros(size - len(state), dtype=np.uint8)))\n return image.reshape(self.image_shape).astype(np.uint8)\n return state\n\n def convert_to_bit_vector(self, state: int | np.ndarray, batch_size: int) -> np.ndarray:\n \"\"\"\n Convert to bit vector if needed.\n\n :param state: The state to be converted, which can be either an integer or a numpy array.\n :param batch_size: The batch size.\n :return: The state converted into a bit vector.\n \"\"\"\n # Convert back to bit vector\n if isinstance(state, int):\n bit_vector = np.array(state).reshape(batch_size, -1)\n # Convert to binary representation\n bit_vector = ((bit_vector[:, :] & (1 << np.arange(len(self.state)))) > 0).astype(int)\n elif self.image_obs_space:\n bit_vector = state.reshape(batch_size, -1)[:, : len(self.state)] / 255 # type: ignore[assignment]\n else:\n bit_vector = np.array(state).reshape(batch_size, -1)\n return bit_vector\n\n def _make_observation_space(self, discrete_obs_space: bool, image_obs_space: bool, n_bits: int) -> spaces.Dict:\n \"\"\"\n Helper to create observation space\n\n :param discrete_obs_space: Whether to use the discrete observation version\n :param image_obs_space: Whether to use the image observation version\n :param n_bits: The number of bits used to represent the state\n :return: the environment observation space\n \"\"\"\n if discrete_obs_space and image_obs_space:\n raise ValueError(\"Cannot use both discrete and image observation spaces\")\n\n if discrete_obs_space:\n # In the discrete case, the agent act on the binary\n # representation of the observation\n return spaces.Dict(\n {\n \"observation\": spaces.Discrete(2**n_bits),\n \"achieved_goal\": spaces.Discrete(2**n_bits),\n \"desired_goal\": spaces.Discrete(2**n_bits),\n }\n )\n\n if image_obs_space:\n # When using image as input,\n # one image contains the bits 0 -> 0, 1 -> 255\n # and the rest is filled with zeros\n return spaces.Dict(\n {\n \"observation\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n \"achieved_goal\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n \"desired_goal\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n }\n )\n\n return spaces.Dict(\n {\n \"observation\": spaces.MultiBinary(n_bits),\n \"achieved_goal\": spaces.MultiBinary(n_bits),\n \"desired_goal\": spaces.MultiBinary(n_bits),\n }\n )\n\n def _get_obs(self) -> dict[str, int | np.ndarray]:\n \"\"\"\n Helper to create the observation.\n\n :return: The current observation.\n \"\"\"\n return OrderedDict(\n [\n (\"observation\", self.convert_if_needed(self.state.copy())),\n (\"achieved_goal\", self.convert_if_needed(self.state.copy())),\n (\"desired_goal\", self.convert_if_needed(self.desired_goal.copy())),\n ]\n )\n\n def reset(self, *, seed: int | None = None, options: dict | None = None) -> tuple[dict[str, int | np.ndarray], dict]:\n if seed is not None:\n self._obs_space.seed(seed)\n self.current_step = 0\n self.state = self._obs_space.sample()\n return self._get_obs(), {}\n\n def step(self, action: np.ndarray | int) -> GymStepReturn:\n \"\"\"\n Step into the env.\n\n :param action:\n :return:\n \"\"\"\n if self.continuous:\n self.state[action > 0] = 1 - self.state[action > 0]\n else:\n self.state[action] = 1 - self.state[action]\n obs = self._get_obs()\n reward = float(self.compute_reward(obs[\"achieved_goal\"], obs[\"desired_goal\"], None).item())\n terminated = reward == 0\n self.current_step += 1\n # Episode terminate when we reached the goal or the max number of steps\n info = {\"is_success\": terminated}\n truncated = self.current_step >= self.max_steps\n return obs, reward, terminated, truncated, info\n\n def compute_reward(\n self, achieved_goal: int | np.ndarray, desired_goal: int | np.ndarray, _info: dict[str, Any] | None\n ) -> np.float32:\n # As we are using a vectorized version, we need to keep track of the `batch_size`\n if isinstance(achieved_goal, int):\n batch_size = 1\n elif self.image_obs_space:\n batch_size = achieved_goal.shape[0] if len(achieved_goal.shape) > 3 else 1\n else:\n batch_size = achieved_goal.shape[0] if len(achieved_goal.shape) > 1 else 1\n\n desired_goal = self.convert_to_bit_vector(desired_goal, batch_size)\n achieved_goal = self.convert_to_bit_vector(achieved_goal, batch_size)\n\n # Deceptive reward: it is positive only when the goal is achieved\n # Here we are using a vectorized version\n distance = np.linalg.norm(achieved_goal - desired_goal, axis=-1)\n return -(distance > 0).astype(np.float32)\n\n def render(self) -> np.ndarray | None: # type: ignore[override]\n if self.render_mode == \"rgb_array\":\n return self.state.copy()\n print(self.state)\n return None\n\n def close(self) -> None:\n pass\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 11130}, "tests/test_predict.py::89": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DQN", "DummyVecEnv", "SAC", "TD3", "get_device", "pytest"], "enclosing_function": "test_predict", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/utils.py\ndef get_device(device: th.device | str = \"auto\") -> th.device:\n \"\"\"\n Retrieve PyTorch device.\n It checks that the requested device is available first.\n For now, it supports only cpu and cuda.\n By default, it tries to use the gpu.\n\n :param device: One for 'auto', 'cuda', 'cpu'\n :return: Supported Pytorch device\n \"\"\"\n # Cuda by default\n if device == \"auto\":\n device = \"cuda\"\n # Force conversion to th.device\n device = th.device(device)\n\n # Cuda not available\n if device.type == th.device(\"cuda\").type and not th.cuda.is_available():\n return th.device(\"cpu\")\n\n return device\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 10, "n_files_resolved": 5, "n_chars_extracted": 3334}, "tests/test_callbacks.py::81": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "CallbackList", "CheckpointCallback", "DDPG", "DQN", "EvalCallback", "EveryNTimesteps", "LogEveryNTimesteps", "PPO", "SAC", "StopTrainingOnMaxEpisodes", "StopTrainingOnNoModelImprovement", "StopTrainingOnRewardThreshold", "TD3", "make_vec_env", "os", "pytest", "shutil"], "enclosing_function": "test_callbacks", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n# Source: stable_baselines3/common/callbacks.py\nclass CallbackList(BaseCallback):\n \"\"\"\n Class for chaining callbacks.\n\n :param callbacks: A list of callbacks that will be called\n sequentially.\n \"\"\"\n\n def __init__(self, callbacks: list[BaseCallback]):\n super().__init__()\n assert isinstance(callbacks, list)\n self.callbacks = callbacks\n\n def _init_callback(self) -> None:\n for callback in self.callbacks:\n callback.init_callback(self.model)\n\n # Fix for https://github.com/DLR-RM/stable-baselines3/issues/1791\n # pass through the parent callback to all children\n callback.parent = self.parent\n\n def _on_training_start(self) -> None:\n for callback in self.callbacks:\n callback.on_training_start(self.locals, self.globals)\n\n def _on_rollout_start(self) -> None:\n for callback in self.callbacks:\n callback.on_rollout_start()\n\n def _on_step(self) -> bool:\n continue_training = True\n for callback in self.callbacks:\n # Return False (stop training) if at least one callback returns False\n continue_training = callback.on_step() and continue_training\n return continue_training\n\n def _on_rollout_end(self) -> None:\n for callback in self.callbacks:\n callback.on_rollout_end()\n\n def _on_training_end(self) -> None:\n for callback in self.callbacks:\n callback.on_training_end()\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n for callback in self.callbacks:\n callback.update_locals(locals_)\n\nclass CheckpointCallback(BaseCallback):\n \"\"\"\n Callback for saving a model every ``save_freq`` calls\n to ``env.step()``.\n By default, it only saves model checkpoints,\n you need to pass ``save_replay_buffer=True``,\n and ``save_vecnormalize=True`` to also save replay buffer checkpoints\n and normalization statistics checkpoints.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``save_freq = max(save_freq // n_envs, 1)``\n\n :param save_freq: Save checkpoints every ``save_freq`` call of the callback.\n :param save_path: Path to the folder where the model will be saved.\n :param name_prefix: Common prefix to the saved models\n :param save_replay_buffer: Save the model replay buffer\n :param save_vecnormalize: Save the ``VecNormalize`` statistics\n :param verbose: Verbosity level: 0 for no output, 2 for indicating when saving model checkpoint\n \"\"\"\n\n def __init__(\n self,\n save_freq: int,\n save_path: str,\n name_prefix: str = \"rl_model\",\n save_replay_buffer: bool = False,\n save_vecnormalize: bool = False,\n verbose: int = 0,\n ):\n super().__init__(verbose)\n self.save_freq = save_freq\n self.save_path = save_path\n self.name_prefix = name_prefix\n self.save_replay_buffer = save_replay_buffer\n self.save_vecnormalize = save_vecnormalize\n\n def _init_callback(self) -> None:\n # Create folder if needed\n if self.save_path is not None:\n os.makedirs(self.save_path, exist_ok=True)\n\n def _checkpoint_path(self, checkpoint_type: str = \"\", extension: str = \"\") -> str:\n \"\"\"\n Helper to get checkpoint path for each type of checkpoint.\n\n :param checkpoint_type: empty for the model, \"replay_buffer_\"\n or \"vecnormalize_\" for the other checkpoints.\n :param extension: Checkpoint file extension (zip for model, pkl for others)\n :return: Path to the checkpoint\n \"\"\"\n return os.path.join(self.save_path, f\"{self.name_prefix}_{checkpoint_type}{self.num_timesteps}_steps.{extension}\")\n\n def _on_step(self) -> bool:\n if self.n_calls % self.save_freq == 0:\n model_path = self._checkpoint_path(extension=\"zip\")\n self.model.save(model_path)\n if self.verbose >= 2:\n print(f\"Saving model checkpoint to {model_path}\")\n\n if self.save_replay_buffer and hasattr(self.model, \"replay_buffer\") and self.model.replay_buffer is not None:\n # If model has a replay buffer, save it too\n replay_buffer_path = self._checkpoint_path(\"replay_buffer_\", extension=\"pkl\")\n self.model.save_replay_buffer(replay_buffer_path) # type: ignore[attr-defined]\n if self.verbose > 1:\n print(f\"Saving model replay buffer checkpoint to {replay_buffer_path}\")\n\n if self.save_vecnormalize and self.model.get_vec_normalize_env() is not None:\n # Save the VecNormalize statistics\n vec_normalize_path = self._checkpoint_path(\"vecnormalize_\", extension=\"pkl\")\n self.model.get_vec_normalize_env().save(vec_normalize_path) # type: ignore[union-attr]\n if self.verbose >= 2:\n print(f\"Saving model VecNormalize to {vec_normalize_path}\")\n\n return True\n\nclass EvalCallback(EventCallback):\n \"\"\"\n Callback for evaluating an agent.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger\n when there is a new best model according to the ``mean_reward``\n :param callback_after_eval: Callback to trigger after every evaluation\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``)\n will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model\n according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about evaluation results\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been\n wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env | VecEnv,\n callback_on_new_best: BaseCallback | None = None,\n callback_after_eval: BaseCallback | None = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: str | None = None,\n best_model_save_path: str | None = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super().__init__(callback_after_eval, verbose=verbose)\n\n self.callback_on_new_best = callback_on_new_best\n if self.callback_on_new_best is not None:\n # Give access to the parent\n self.callback_on_new_best.parent = self\n\n self.n_eval_episodes = n_eval_episodes\n self.eval_freq = eval_freq\n self.best_mean_reward = -np.inf\n self.last_mean_reward = -np.inf\n self.deterministic = deterministic\n self.render = render\n self.warn = warn\n\n # Convert to VecEnv for consistency\n if not isinstance(eval_env, VecEnv):\n eval_env = DummyVecEnv([lambda: eval_env]) # type: ignore[list-item, return-value]\n\n self.eval_env = eval_env\n self.best_model_save_path = best_model_save_path\n # Logs will be written in ``evaluations.npz``\n if log_path is not None:\n log_path = os.path.join(log_path, \"evaluations\")\n self.log_path = log_path\n self.evaluations_results: list[list[float]] = []\n self.evaluations_timesteps: list[int] = []\n self.evaluations_length: list[list[int]] = []\n # For computing success rate\n self._is_success_buffer: list[bool] = []\n self.evaluations_successes: list[list[bool]] = []\n\n def _init_callback(self) -> None:\n # Does not work in some corner cases, where the wrapper is not the same\n if not isinstance(self.training_env, type(self.eval_env)):\n warnings.warn(\"Training and eval env are not of the same type\" f\"{self.training_env} != {self.eval_env}\")\n\n # Create folders if needed\n if self.best_model_save_path is not None:\n os.makedirs(self.best_model_save_path, exist_ok=True)\n if self.log_path is not None:\n os.makedirs(os.path.dirname(self.log_path), exist_ok=True)\n\n # Init callback called on new best model\n if self.callback_on_new_best is not None:\n self.callback_on_new_best.init_callback(self.model)\n\n def _log_success_callback(self, locals_: dict[str, Any], globals_: dict[str, Any]) -> None:\n \"\"\"\n Callback passed to the ``evaluate_policy`` function\n in order to log the success rate (when applicable),\n for instance when using HER.\n\n :param locals_:\n :param globals_:\n \"\"\"\n info = locals_[\"info\"]\n\n if locals_[\"done\"]:\n maybe_is_success = info.get(\"is_success\")\n if maybe_is_success is not None:\n self._is_success_buffer.append(maybe_is_success)\n\n def _on_step(self) -> bool:\n continue_training = True\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n if self.model.get_vec_normalize_env() is not None:\n try:\n sync_envs_normalization(self.training_env, self.eval_env)\n except AttributeError as e:\n raise AssertionError(\n \"Training and eval env are not wrapped the same way, \"\n \"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback \"\n \"and warning above.\"\n ) from e\n\n # Reset success rate buffer\n self._is_success_buffer = []\n\n episode_rewards, episode_lengths = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n return_episode_rewards=True,\n warn=self.warn,\n callback=self._log_success_callback,\n )\n\n if self.log_path is not None:\n assert isinstance(episode_rewards, list)\n assert isinstance(episode_lengths, list)\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(episode_rewards)\n self.evaluations_length.append(episode_lengths)\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n **kwargs, # type: ignore[arg-type]\n )\n\n mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)\n mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)\n self.last_mean_reward = float(mean_reward)\n\n if self.verbose >= 1:\n print(f\"Eval num_timesteps={self.num_timesteps}, \" f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n self.logger.record(\"eval/mean_reward\", float(mean_reward))\n self.logger.record(\"eval/mean_ep_length\", mean_ep_length)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose >= 1:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n # Dump log so the evaluation results are printed with the correct timestep\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(self.num_timesteps)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose >= 1:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = float(mean_reward)\n # Trigger callback on new best model, if needed\n if self.callback_on_new_best is not None:\n continue_training = self.callback_on_new_best.on_step()\n\n # Trigger callback after every evaluation, if needed\n if self.callback is not None:\n continue_training = continue_training and self._on_event()\n\n return continue_training\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n if self.callback:\n self.callback.update_locals(locals_)\n\nclass StopTrainingOnRewardThreshold(BaseCallback):\n \"\"\"\n Stop the training once a threshold in episodic reward\n has been reached (i.e. when the model is good enough).\n\n It must be used with the ``EvalCallback``.\n\n :param reward_threshold: Minimum expected reward per episode\n to stop training.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating when training ended because episodic reward\n threshold reached\n \"\"\"\n\n parent: EvalCallback\n\n def __init__(self, reward_threshold: float, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.reward_threshold = reward_threshold\n\n def _on_step(self) -> bool:\n assert self.parent is not None, \"``StopTrainingOnMinimumReward`` callback must be used with an ``EvalCallback``\"\n continue_training = bool(self.parent.best_mean_reward < self.reward_threshold)\n if self.verbose >= 1 and not continue_training:\n print(\n f\"Stopping training because the mean reward {self.parent.best_mean_reward:.2f} \"\n f\"is above the threshold {self.reward_threshold}\"\n )\n return continue_training\n\nclass EveryNTimesteps(EventCallback):\n \"\"\"\n Trigger a callback every ``n_steps`` timesteps\n\n :param n_steps: Number of timesteps between two trigger.\n :param callback: Callback that will be called\n when the event is triggered.\n \"\"\"\n\n def __init__(self, n_steps: int, callback: BaseCallback):\n super().__init__(callback)\n self.n_steps = n_steps\n self.last_time_trigger = 0\n\n def _on_step(self) -> bool:\n if (self.num_timesteps - self.last_time_trigger) >= self.n_steps:\n self.last_time_trigger = self.num_timesteps\n return self._on_event()\n return True\n\nclass LogEveryNTimesteps(EveryNTimesteps):\n \"\"\"\n Log data every ``n_steps`` timesteps\n\n :param n_steps: Number of timesteps between two trigger.\n \"\"\"\n\n def __init__(self, n_steps: int):\n super().__init__(n_steps, callback=ConvertCallback(self._log_data))\n\n def _log_data(self, _locals: dict[str, Any], _globals: dict[str, Any]) -> bool:\n self.model.dump_logs()\n return True\n\nclass StopTrainingOnMaxEpisodes(BaseCallback):\n \"\"\"\n Stop the training once a maximum number of episodes are played.\n\n For multiple environments presumes that, the desired behavior is that the agent trains on each env for ``max_episodes``\n and in total for ``max_episodes * n_envs`` episodes.\n\n :param max_episodes: Maximum number of episodes to stop training.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about when training ended by\n reaching ``max_episodes``\n \"\"\"\n\n def __init__(self, max_episodes: int, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.max_episodes = max_episodes\n self._total_max_episodes = max_episodes\n self.n_episodes = 0\n\n def _init_callback(self) -> None:\n # At start set total max according to number of environments\n self._total_max_episodes = self.max_episodes * self.training_env.num_envs\n\n def _on_step(self) -> bool:\n # Check that the `dones` local variable is defined\n assert \"dones\" in self.locals, \"`dones` variable is not defined, please check your code next to `callback.on_step()`\"\n self.n_episodes += np.sum(self.locals[\"dones\"]).item()\n\n continue_training = self.n_episodes < self._total_max_episodes\n\n if self.verbose >= 1 and not continue_training:\n mean_episodes_per_env = self.n_episodes / self.training_env.num_envs\n mean_ep_str = (\n f\"with an average of {mean_episodes_per_env:.2f} episodes per env\" if self.training_env.num_envs > 1 else \"\"\n )\n\n print(\n f\"Stopping training with a total of {self.num_timesteps} steps because the \"\n f\"{self.locals.get('tb_log_name')} model reached max_episodes={self.max_episodes}, \"\n f\"by playing for {self.n_episodes} episodes \"\n f\"{mean_ep_str}\"\n )\n return continue_training\n\nclass StopTrainingOnNoModelImprovement(BaseCallback):\n \"\"\"\n Stop the training early if there is no new best model (new best mean reward) after more than N consecutive evaluations.\n\n It is possible to define a minimum number of evaluations before start to count evaluations without improvement.\n\n It must be used with the ``EvalCallback``.\n\n :param max_no_improvement_evals: Maximum number of consecutive evaluations without a new best model.\n :param min_evals: Number of evaluations before start to count evaluations without improvements.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating when training ended because no new best model\n \"\"\"\n\n parent: EvalCallback\n\n def __init__(self, max_no_improvement_evals: int, min_evals: int = 0, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.max_no_improvement_evals = max_no_improvement_evals\n self.min_evals = min_evals\n self.last_best_mean_reward = -np.inf\n self.no_improvement_evals = 0\n\n def _on_step(self) -> bool:\n assert self.parent is not None, \"``StopTrainingOnNoModelImprovement`` callback must be used with an ``EvalCallback``\"\n\n continue_training = True\n\n if self.n_calls > self.min_evals:\n if self.parent.best_mean_reward > self.last_best_mean_reward:\n self.no_improvement_evals = 0\n else:\n self.no_improvement_evals += 1\n if self.no_improvement_evals > self.max_no_improvement_evals:\n continue_training = False\n\n self.last_best_mean_reward = self.parent.best_mean_reward\n\n if self.verbose >= 1 and not continue_training:\n print(\n f\"Stopping training because there was no new best model in the last {self.no_improvement_evals:d} evaluations\"\n )\n\n return continue_training\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 26817}, "tests/test_save_load.py::211": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DDPG", "DQN", "DummyVecEnv", "PPO", "SAC", "TD3", "os", "pytest"], "enclosing_function": "test_set_env", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 3393}, "tests/test_callbacks.py::185": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["BitFlippingEnv", "DQN", "DummyVecEnv", "EvalCallback", "HerReplayBuffer"], "enclosing_function": "test_eval_success_logging", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/callbacks.py\nclass EvalCallback(EventCallback):\n \"\"\"\n Callback for evaluating an agent.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger\n when there is a new best model according to the ``mean_reward``\n :param callback_after_eval: Callback to trigger after every evaluation\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``)\n will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model\n according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about evaluation results\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been\n wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env | VecEnv,\n callback_on_new_best: BaseCallback | None = None,\n callback_after_eval: BaseCallback | None = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: str | None = None,\n best_model_save_path: str | None = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super().__init__(callback_after_eval, verbose=verbose)\n\n self.callback_on_new_best = callback_on_new_best\n if self.callback_on_new_best is not None:\n # Give access to the parent\n self.callback_on_new_best.parent = self\n\n self.n_eval_episodes = n_eval_episodes\n self.eval_freq = eval_freq\n self.best_mean_reward = -np.inf\n self.last_mean_reward = -np.inf\n self.deterministic = deterministic\n self.render = render\n self.warn = warn\n\n # Convert to VecEnv for consistency\n if not isinstance(eval_env, VecEnv):\n eval_env = DummyVecEnv([lambda: eval_env]) # type: ignore[list-item, return-value]\n\n self.eval_env = eval_env\n self.best_model_save_path = best_model_save_path\n # Logs will be written in ``evaluations.npz``\n if log_path is not None:\n log_path = os.path.join(log_path, \"evaluations\")\n self.log_path = log_path\n self.evaluations_results: list[list[float]] = []\n self.evaluations_timesteps: list[int] = []\n self.evaluations_length: list[list[int]] = []\n # For computing success rate\n self._is_success_buffer: list[bool] = []\n self.evaluations_successes: list[list[bool]] = []\n\n def _init_callback(self) -> None:\n # Does not work in some corner cases, where the wrapper is not the same\n if not isinstance(self.training_env, type(self.eval_env)):\n warnings.warn(\"Training and eval env are not of the same type\" f\"{self.training_env} != {self.eval_env}\")\n\n # Create folders if needed\n if self.best_model_save_path is not None:\n os.makedirs(self.best_model_save_path, exist_ok=True)\n if self.log_path is not None:\n os.makedirs(os.path.dirname(self.log_path), exist_ok=True)\n\n # Init callback called on new best model\n if self.callback_on_new_best is not None:\n self.callback_on_new_best.init_callback(self.model)\n\n def _log_success_callback(self, locals_: dict[str, Any], globals_: dict[str, Any]) -> None:\n \"\"\"\n Callback passed to the ``evaluate_policy`` function\n in order to log the success rate (when applicable),\n for instance when using HER.\n\n :param locals_:\n :param globals_:\n \"\"\"\n info = locals_[\"info\"]\n\n if locals_[\"done\"]:\n maybe_is_success = info.get(\"is_success\")\n if maybe_is_success is not None:\n self._is_success_buffer.append(maybe_is_success)\n\n def _on_step(self) -> bool:\n continue_training = True\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n if self.model.get_vec_normalize_env() is not None:\n try:\n sync_envs_normalization(self.training_env, self.eval_env)\n except AttributeError as e:\n raise AssertionError(\n \"Training and eval env are not wrapped the same way, \"\n \"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback \"\n \"and warning above.\"\n ) from e\n\n # Reset success rate buffer\n self._is_success_buffer = []\n\n episode_rewards, episode_lengths = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n return_episode_rewards=True,\n warn=self.warn,\n callback=self._log_success_callback,\n )\n\n if self.log_path is not None:\n assert isinstance(episode_rewards, list)\n assert isinstance(episode_lengths, list)\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(episode_rewards)\n self.evaluations_length.append(episode_lengths)\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n **kwargs, # type: ignore[arg-type]\n )\n\n mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)\n mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)\n self.last_mean_reward = float(mean_reward)\n\n if self.verbose >= 1:\n print(f\"Eval num_timesteps={self.num_timesteps}, \" f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n self.logger.record(\"eval/mean_reward\", float(mean_reward))\n self.logger.record(\"eval/mean_ep_length\", mean_ep_length)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose >= 1:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n # Dump log so the evaluation results are printed with the correct timestep\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(self.num_timesteps)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose >= 1:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = float(mean_reward)\n # Trigger callback on new best model, if needed\n if self.callback_on_new_best is not None:\n continue_training = self.callback_on_new_best.on_step()\n\n # Trigger callback after every evaluation, if needed\n if self.callback is not None:\n continue_training = continue_training and self._on_event()\n\n return continue_training\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n if self.callback:\n self.callback.update_locals(locals_)\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 12142}, "tests/test_vec_normalize.py::317": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "test_get_original", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/test_identity.py::30": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DQN", "DummyVecEnv", "IdentityEnv", "IdentityEnvMultiBinary", "IdentityEnvMultiDiscrete", "PPO", "evaluate_policy", "pytest"], "enclosing_function": "test_discrete", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n\n# Source: stable_baselines3/common/evaluation.py\ndef evaluate_policy(\n model: \"type_aliases.PolicyPredictor\",\n env: gym.Env | VecEnv,\n n_eval_episodes: int = 10,\n deterministic: bool = True,\n render: bool = False,\n callback: Callable[[dict[str, Any], dict[str, Any]], None] | None = None,\n reward_threshold: float | None = None,\n return_episode_rewards: bool = False,\n warn: bool = True,\n) -> tuple[float, float] | tuple[list[float], list[int]]:\n \"\"\"\n Runs the policy for ``n_eval_episodes`` episodes and outputs the average return\n per episode (sum of undiscounted rewards).\n If a vector env is passed in, this divides the episodes to evaluate onto the\n different elements of the vector env. This static division of work is done to\n remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more\n details and discussion.\n\n .. note::\n If environment has not been wrapped with ``Monitor`` wrapper, reward and\n episode lengths are counted as it appears with ``env.step`` calls. If\n the environment contains wrappers that modify rewards or episode lengths\n (e.g. reward scaling, early episode reset), these will affect the evaluation\n results as well. You can avoid this by wrapping environment with ``Monitor``\n wrapper before anything else.\n\n :param model: The RL agent you want to evaluate. This can be any object\n that implements a ``predict`` method, such as an RL algorithm (``BaseAlgorithm``)\n or policy (``BasePolicy``).\n :param env: The gym environment or ``VecEnv`` environment.\n :param n_eval_episodes: Number of episode to evaluate the agent\n :param deterministic: Whether to use deterministic or stochastic actions\n :param render: Whether to render the environment or not\n :param callback: callback function to perform additional checks,\n called ``n_envs`` times after each step.\n Gets locals() and globals() passed as parameters.\n See https://github.com/DLR-RM/stable-baselines3/issues/1912 for more details.\n :param reward_threshold: Minimum expected reward per episode,\n this will raise an error if the performance is not met\n :param return_episode_rewards: If True, a list of rewards and episode lengths\n per episode will be returned instead of the mean.\n :param warn: If True (default), warns user about lack of a Monitor wrapper in the\n evaluation environment.\n :return: Mean return per episode (sum of rewards), std of reward per episode.\n Returns (list[float], list[int]) when ``return_episode_rewards`` is True, first\n list containing per-episode return and second containing per-episode lengths\n (in number of steps).\n \"\"\"\n is_monitor_wrapped = False\n # Avoid circular import\n from stable_baselines3.common.monitor import Monitor\n\n if not isinstance(env, VecEnv):\n env = DummyVecEnv([lambda: env]) # type: ignore[list-item, return-value]\n\n is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]\n\n if not is_monitor_wrapped and warn:\n warnings.warn(\n \"Evaluation environment is not wrapped with a ``Monitor`` wrapper. \"\n \"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. \"\n \"Consider wrapping environment first with ``Monitor`` wrapper.\",\n UserWarning,\n )\n\n n_envs = env.num_envs\n episode_rewards = []\n episode_lengths = []\n\n episode_counts = np.zeros(n_envs, dtype=\"int\")\n # Divides episodes among different sub environments in the vector as evenly as possible\n episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype=\"int\")\n\n current_rewards = np.zeros(n_envs)\n current_lengths = np.zeros(n_envs, dtype=\"int\")\n observations = env.reset()\n states = None\n episode_starts = np.ones((env.num_envs,), dtype=bool)\n while (episode_counts < episode_count_targets).any():\n actions, states = model.predict(\n observations, # type: ignore[arg-type]\n state=states,\n episode_start=episode_starts,\n deterministic=deterministic,\n )\n new_observations, rewards, dones, infos = env.step(actions)\n current_rewards += rewards\n current_lengths += 1\n for i in range(n_envs):\n if episode_counts[i] < episode_count_targets[i]:\n # unpack values so that the callback can access the local variables\n reward = rewards[i]\n done = dones[i]\n info = infos[i]\n episode_starts[i] = done\n\n if callback is not None:\n callback(locals(), globals())\n\n if dones[i]:\n if is_monitor_wrapped:\n # Atari wrapper can send a \"done\" signal when\n # the agent loses a life, but it does not correspond\n # to the true end of episode\n if \"episode\" in info.keys():\n # Do not trust \"done\" with episode endings.\n # Monitor wrapper includes \"episode\" key in info if environment\n # has been wrapped with it. Use those rewards instead.\n episode_rewards.append(info[\"episode\"][\"r\"])\n episode_lengths.append(info[\"episode\"][\"l\"])\n # Only increment at the real end of an episode\n episode_counts[i] += 1\n else:\n episode_rewards.append(current_rewards[i])\n episode_lengths.append(current_lengths[i])\n episode_counts[i] += 1\n current_rewards[i] = 0\n current_lengths[i] = 0\n\n observations = new_observations\n\n if render:\n env.render()\n\n mean_reward = np.mean(episode_rewards)\n std_reward = np.std(episode_rewards)\n if reward_threshold is not None:\n assert mean_reward > reward_threshold, \"Mean reward below threshold: \" f\"{mean_reward:.2f} < {reward_threshold:.2f}\"\n if return_episode_rewards:\n return episode_rewards, episode_lengths\n return mean_reward, std_reward\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 7, "n_files_resolved": 5, "n_chars_extracted": 10529}, "tests/test_utils.py::331": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DummyVecEnv", "Monitor", "SubprocVecEnv", "evaluate_policy", "pytest"], "enclosing_function": "test_evaluate_policy_monitors", "extracted_code": "# Source: stable_baselines3/common/evaluation.py\ndef evaluate_policy(\n model: \"type_aliases.PolicyPredictor\",\n env: gym.Env | VecEnv,\n n_eval_episodes: int = 10,\n deterministic: bool = True,\n render: bool = False,\n callback: Callable[[dict[str, Any], dict[str, Any]], None] | None = None,\n reward_threshold: float | None = None,\n return_episode_rewards: bool = False,\n warn: bool = True,\n) -> tuple[float, float] | tuple[list[float], list[int]]:\n \"\"\"\n Runs the policy for ``n_eval_episodes`` episodes and outputs the average return\n per episode (sum of undiscounted rewards).\n If a vector env is passed in, this divides the episodes to evaluate onto the\n different elements of the vector env. This static division of work is done to\n remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more\n details and discussion.\n\n .. note::\n If environment has not been wrapped with ``Monitor`` wrapper, reward and\n episode lengths are counted as it appears with ``env.step`` calls. If\n the environment contains wrappers that modify rewards or episode lengths\n (e.g. reward scaling, early episode reset), these will affect the evaluation\n results as well. You can avoid this by wrapping environment with ``Monitor``\n wrapper before anything else.\n\n :param model: The RL agent you want to evaluate. This can be any object\n that implements a ``predict`` method, such as an RL algorithm (``BaseAlgorithm``)\n or policy (``BasePolicy``).\n :param env: The gym environment or ``VecEnv`` environment.\n :param n_eval_episodes: Number of episode to evaluate the agent\n :param deterministic: Whether to use deterministic or stochastic actions\n :param render: Whether to render the environment or not\n :param callback: callback function to perform additional checks,\n called ``n_envs`` times after each step.\n Gets locals() and globals() passed as parameters.\n See https://github.com/DLR-RM/stable-baselines3/issues/1912 for more details.\n :param reward_threshold: Minimum expected reward per episode,\n this will raise an error if the performance is not met\n :param return_episode_rewards: If True, a list of rewards and episode lengths\n per episode will be returned instead of the mean.\n :param warn: If True (default), warns user about lack of a Monitor wrapper in the\n evaluation environment.\n :return: Mean return per episode (sum of rewards), std of reward per episode.\n Returns (list[float], list[int]) when ``return_episode_rewards`` is True, first\n list containing per-episode return and second containing per-episode lengths\n (in number of steps).\n \"\"\"\n is_monitor_wrapped = False\n # Avoid circular import\n from stable_baselines3.common.monitor import Monitor\n\n if not isinstance(env, VecEnv):\n env = DummyVecEnv([lambda: env]) # type: ignore[list-item, return-value]\n\n is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]\n\n if not is_monitor_wrapped and warn:\n warnings.warn(\n \"Evaluation environment is not wrapped with a ``Monitor`` wrapper. \"\n \"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. \"\n \"Consider wrapping environment first with ``Monitor`` wrapper.\",\n UserWarning,\n )\n\n n_envs = env.num_envs\n episode_rewards = []\n episode_lengths = []\n\n episode_counts = np.zeros(n_envs, dtype=\"int\")\n # Divides episodes among different sub environments in the vector as evenly as possible\n episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype=\"int\")\n\n current_rewards = np.zeros(n_envs)\n current_lengths = np.zeros(n_envs, dtype=\"int\")\n observations = env.reset()\n states = None\n episode_starts = np.ones((env.num_envs,), dtype=bool)\n while (episode_counts < episode_count_targets).any():\n actions, states = model.predict(\n observations, # type: ignore[arg-type]\n state=states,\n episode_start=episode_starts,\n deterministic=deterministic,\n )\n new_observations, rewards, dones, infos = env.step(actions)\n current_rewards += rewards\n current_lengths += 1\n for i in range(n_envs):\n if episode_counts[i] < episode_count_targets[i]:\n # unpack values so that the callback can access the local variables\n reward = rewards[i]\n done = dones[i]\n info = infos[i]\n episode_starts[i] = done\n\n if callback is not None:\n callback(locals(), globals())\n\n if dones[i]:\n if is_monitor_wrapped:\n # Atari wrapper can send a \"done\" signal when\n # the agent loses a life, but it does not correspond\n # to the true end of episode\n if \"episode\" in info.keys():\n # Do not trust \"done\" with episode endings.\n # Monitor wrapper includes \"episode\" key in info if environment\n # has been wrapped with it. Use those rewards instead.\n episode_rewards.append(info[\"episode\"][\"r\"])\n episode_lengths.append(info[\"episode\"][\"l\"])\n # Only increment at the real end of an episode\n episode_counts[i] += 1\n else:\n episode_rewards.append(current_rewards[i])\n episode_lengths.append(current_lengths[i])\n episode_counts[i] += 1\n current_rewards[i] = 0\n current_lengths[i] = 0\n\n observations = new_observations\n\n if render:\n env.render()\n\n mean_reward = np.mean(episode_rewards)\n std_reward = np.std(episode_rewards)\n if reward_threshold is not None:\n assert mean_reward > reward_threshold, \"Mean reward below threshold: \" f\"{mean_reward:.2f} < {reward_threshold:.2f}\"\n if return_episode_rewards:\n return episode_rewards, episode_lengths\n return mean_reward, std_reward\n\n\n# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 13881}, "tests/test_vec_stacked_obs.py::91": {"resolved_imports": ["stable_baselines3/common/vec_env/stacked_observations.py"], "used_names": ["StackedObservations", "spaces"], "enclosing_function": "test_reset_update_box", "extracted_code": "# Source: stable_baselines3/common/vec_env/stacked_observations.py\nclass StackedObservations(Generic[TObs]):\n \"\"\"\n Frame stacking wrapper for data.\n\n Dimension to stack over is either first (channels-first) or last (channels-last), which is detected automatically using\n ``common.preprocessing.is_image_space_channels_first`` if observation is an image space.\n\n :param num_envs: Number of environments\n :param n_stack: Number of frames to stack\n :param observation_space: Environment observation space\n :param channels_order: If \"first\", stack on first image dimension. If \"last\", stack on last dimension.\n If None, automatically detect channel to stack over in case of image observation or default to \"last\".\n For Dict space, channels_order can also be a dictionary.\n \"\"\"\n\n def __init__(\n self,\n num_envs: int,\n n_stack: int,\n observation_space: spaces.Box | spaces.Dict,\n channels_order: str | Mapping[str, str | None] | None = None,\n ) -> None:\n self.n_stack = n_stack\n self.observation_space = observation_space\n if isinstance(observation_space, spaces.Dict):\n if not isinstance(channels_order, Mapping):\n channels_order = {key: channels_order for key in observation_space.spaces.keys()}\n self.sub_stacked_observations = {\n key: StackedObservations(num_envs, n_stack, subspace, channels_order[key]) # type: ignore[arg-type]\n for key, subspace in observation_space.spaces.items()\n }\n self.stacked_observation_space = spaces.Dict(\n {key: substack_obs.stacked_observation_space for key, substack_obs in self.sub_stacked_observations.items()}\n ) # type: spaces.Dict | spaces.Box # make mypy happy\n elif isinstance(observation_space, spaces.Box):\n if isinstance(channels_order, Mapping):\n raise TypeError(\"When the observation space is Box, channels_order can't be a dict.\")\n\n self.channels_first, self.stack_dimension, self.stacked_shape, self.repeat_axis = self.compute_stacking(\n n_stack, observation_space, channels_order\n )\n low = np.repeat(observation_space.low, n_stack, axis=self.repeat_axis)\n high = np.repeat(observation_space.high, n_stack, axis=self.repeat_axis)\n self.stacked_observation_space = spaces.Box(\n low=low,\n high=high,\n dtype=observation_space.dtype, # type: ignore[arg-type]\n )\n self.stacked_obs = np.zeros((num_envs, *self.stacked_shape), dtype=observation_space.dtype)\n else:\n raise TypeError(\n f\"StackedObservations only supports Box and Dict as observation spaces. {observation_space} was provided.\"\n )\n\n @staticmethod\n def compute_stacking(\n n_stack: int, observation_space: spaces.Box, channels_order: str | None = None\n ) -> tuple[bool, int, tuple[int, ...], int]:\n \"\"\"\n Calculates the parameters in order to stack observations\n\n :param n_stack: Number of observations to stack\n :param observation_space: Observation space\n :param channels_order: Order of the channels\n :return: Tuple of channels_first, stack_dimension, stackedobs, repeat_axis\n \"\"\"\n\n if channels_order is None:\n # Detect channel location automatically for images\n if is_image_space(observation_space):\n channels_first = is_image_space_channels_first(observation_space)\n else:\n # Default behavior for non-image space, stack on the last axis\n channels_first = False\n else:\n assert channels_order in {\n \"last\",\n \"first\",\n }, \"`channels_order` must be one of following: 'last', 'first'\"\n\n channels_first = channels_order == \"first\"\n\n # This includes the vec-env dimension (first)\n stack_dimension = 1 if channels_first else -1\n repeat_axis = 0 if channels_first else -1\n stacked_shape = list(observation_space.shape)\n stacked_shape[repeat_axis] *= n_stack\n return channels_first, stack_dimension, tuple(stacked_shape), repeat_axis\n\n def reset(self, observation: TObs) -> TObs:\n \"\"\"\n Reset the stacked_obs, add the reset observation to the stack, and return the stack.\n\n :param observation: Reset observation\n :return: The stacked reset observation\n \"\"\"\n if isinstance(observation, dict):\n return {key: self.sub_stacked_observations[key].reset(obs) for key, obs in observation.items()} # type: ignore[return-value]\n\n self.stacked_obs[...] = 0\n if self.channels_first:\n self.stacked_obs[:, -observation.shape[self.stack_dimension] :, ...] = observation\n else:\n self.stacked_obs[..., -observation.shape[self.stack_dimension] :] = observation\n return self.stacked_obs # type: ignore[return-value]\n\n def update(\n self,\n observations: TObs,\n dones: np.ndarray,\n infos: list[dict[str, Any]],\n ) -> tuple[TObs, list[dict[str, Any]]]:\n \"\"\"\n Add the observations to the stack and use the dones to update the infos.\n\n :param observations: Observations\n :param dones: Dones\n :param infos: Infos\n :return: Tuple of the stacked observations and the updated infos\n \"\"\"\n if isinstance(observations, dict):\n # From [{}, {terminal_obs: {key1: ..., key2: ...}}]\n # to {key1: [{}, {terminal_obs: ...}], key2: [{}, {terminal_obs: ...}]}\n sub_infos = {\n key: [\n {\"terminal_observation\": info[\"terminal_observation\"][key]} if \"terminal_observation\" in info else {}\n for info in infos\n ]\n for key in observations.keys()\n }\n\n stacked_obs = {}\n stacked_infos = {}\n for key, obs in observations.items():\n stacked_obs[key], stacked_infos[key] = self.sub_stacked_observations[key].update(obs, dones, sub_infos[key])\n\n # From {key1: [{}, {terminal_obs: ...}], key2: [{}, {terminal_obs: ...}]}\n # to [{}, {terminal_obs: {key1: ..., key2: ...}}]\n for key in stacked_infos.keys():\n for env_idx in range(len(infos)):\n if \"terminal_observation\" in infos[env_idx]:\n infos[env_idx][\"terminal_observation\"][key] = stacked_infos[key][env_idx][\"terminal_observation\"]\n return stacked_obs, infos # type: ignore[return-value]\n\n shift = -observations.shape[self.stack_dimension]\n self.stacked_obs = np.roll(self.stacked_obs, shift, axis=self.stack_dimension)\n for env_idx, done in enumerate(dones):\n if done:\n if \"terminal_observation\" in infos[env_idx]:\n old_terminal = infos[env_idx][\"terminal_observation\"]\n if self.channels_first:\n previous_stack = self.stacked_obs[env_idx, :shift, ...]\n else:\n previous_stack = self.stacked_obs[env_idx, ..., :shift]\n\n new_terminal = np.concatenate((previous_stack, old_terminal), axis=self.repeat_axis)\n infos[env_idx][\"terminal_observation\"] = new_terminal\n else:\n warnings.warn(\"VecFrameStack wrapping a VecEnv without terminal_observation info\")\n self.stacked_obs[env_idx] = 0\n if self.channels_first:\n self.stacked_obs[:, shift:, ...] = observations\n else:\n self.stacked_obs[..., shift:] = observations\n return self.stacked_obs, infos", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 7882}, "tests/test_save_load.py::428": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DQN", "SAC", "TD3", "pytest", "warnings"], "enclosing_function": "test_warn_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 1493}, "tests/test_her.py::21": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["HER", "pytest"], "enclosing_function": "test_import_error", "extracted_code": "", "n_imports_parsed": 16, "n_files_resolved": 8, "n_chars_extracted": 0}, "tests/test_spaces.py::108": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py"], "used_names": ["A2C", "DDPG", "DQN", "PPO", "SAC", "TD3", "pytest"], "enclosing_function": "test_action_spaces", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):", "n_imports_parsed": 10, "n_files_resolved": 4, "n_chars_extracted": 2231}, "tests/test_vec_envs.py::264": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["VecFrameStack", "VecNormalize", "functools", "pytest"], "enclosing_function": "test_vecenv_terminal_obs", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 2637}, "tests/test_n_step_replay.py::120": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_util.py"], "used_names": ["pytest"], "enclosing_function": "test_nstep_early_truncation", "extracted_code": "", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/test_custom_policy.py::74": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/sb2_compat/rmsprop_tf_like.py", "stable_baselines3/common/torch_layers.py"], "used_names": ["create_mlp", "nn"], "enclosing_function": "test_create_mlp", "extracted_code": "# Source: stable_baselines3/common/torch_layers.py\ndef create_mlp(\n input_dim: int,\n output_dim: int,\n net_arch: list[int],\n activation_fn: type[nn.Module] = nn.ReLU,\n squash_output: bool = False,\n with_bias: bool = True,\n pre_linear_modules: list[type[nn.Module]] | None = None,\n post_linear_modules: list[type[nn.Module]] | None = None,\n) -> list[nn.Module]:\n \"\"\"\n Create a multi layer perceptron (MLP), which is\n a collection of fully-connected layers each followed by an activation function.\n\n :param input_dim: Dimension of the input vector\n :param output_dim: Dimension of the output (last layer, for instance, the number of actions)\n :param net_arch: Architecture of the neural net\n It represents the number of units per layer.\n The length of this list is the number of layers.\n :param activation_fn: The activation function\n to use after each layer.\n :param squash_output: Whether to squash the output using a Tanh\n activation function\n :param with_bias: If set to False, the layers will not learn an additive bias\n :param pre_linear_modules: List of nn.Module to add before the linear layers.\n These modules should maintain the input tensor dimension (e.g. BatchNorm).\n The number of input features is passed to the module's constructor.\n Compared to post_linear_modules, they are used before the output layer (output_dim > 0).\n :param post_linear_modules: List of nn.Module to add after the linear layers\n (and before the activation function). These modules should maintain the input\n tensor dimension (e.g. Dropout, LayerNorm). They are not used after the\n output layer (output_dim > 0). The number of input features is passed to\n the module's constructor.\n :return: The list of layers of the neural network\n \"\"\"\n\n pre_linear_modules = pre_linear_modules or []\n post_linear_modules = post_linear_modules or []\n\n modules = []\n if len(net_arch) > 0:\n # BatchNorm maintains input dim\n for module in pre_linear_modules:\n modules.append(module(input_dim))\n\n modules.append(nn.Linear(input_dim, net_arch[0], bias=with_bias))\n\n # LayerNorm, Dropout maintain output dim\n for module in post_linear_modules:\n modules.append(module(net_arch[0]))\n\n modules.append(activation_fn())\n\n for idx in range(len(net_arch) - 1):\n for module in pre_linear_modules:\n modules.append(module(net_arch[idx]))\n\n modules.append(nn.Linear(net_arch[idx], net_arch[idx + 1], bias=with_bias))\n\n for module in post_linear_modules:\n modules.append(module(net_arch[idx + 1]))\n\n modules.append(activation_fn())\n\n if output_dim > 0:\n last_layer_dim = net_arch[-1] if len(net_arch) > 0 else input_dim\n # Only add BatchNorm before output layer\n for module in pre_linear_modules:\n modules.append(module(last_layer_dim))\n\n modules.append(nn.Linear(last_layer_dim, output_dim, bias=with_bias))\n if squash_output:\n modules.append(nn.Tanh())\n return modules", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 3147}, "tests/test_logger.py::141": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["A2C", "CSVOutputFormat", "HumanOutputFormat", "TensorBoardOutputFormat", "configure", "os"], "enclosing_function": "test_set_logger", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/logger.py\nclass HumanOutputFormat(KVWriter, SeqWriter):\n \"\"\"A human-readable output format producing ASCII tables of key-value pairs.\n\n Set attribute ``max_length`` to change the maximum length of keys and values\n to write to output (or specify it when calling ``__init__``).\n\n :param filename_or_file: the file to write the log to\n :param max_length: the maximum length of keys and values to write to output.\n Outputs longer than this will be truncated. An error will be raised\n if multiple keys are truncated to the same value. The maximum output\n width will be ``2*max_length + 7``. The default of 36 produces output\n no longer than 79 characters wide.\n \"\"\"\n\n def __init__(self, filename_or_file: str | TextIO, max_length: int = 36):\n self.max_length = max_length\n if isinstance(filename_or_file, str):\n self.file = open(filename_or_file, \"w\")\n self.own_file = True\n elif isinstance(filename_or_file, TextIOBase) or hasattr(filename_or_file, \"write\"):\n # Note: in theory `TextIOBase` check should be sufficient,\n # in practice, libraries don't always inherit from it, see GH#1598\n self.file = filename_or_file # type: ignore[assignment]\n self.own_file = False\n else:\n raise ValueError(f\"Expected file or str, got {filename_or_file}\")\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Create strings for printing\n key2str = {}\n tag = \"\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and (\"stdout\" in excluded or \"log\" in excluded):\n continue\n\n elif isinstance(value, Video):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"hparam\")\n\n elif isinstance(value, float):\n # Align left\n value_str = f\"{value:<8.3g}\"\n else:\n value_str = str(value)\n\n if key.find(\"/\") > 0: # Find tag and add it to the dict\n tag = key[: key.find(\"/\") + 1]\n key2str[(tag, self._truncate(tag))] = \"\"\n # Remove tag from key and indent the key\n if len(tag) > 0 and tag in key:\n key = f\"{'':3}{key[len(tag) :]}\"\n\n truncated_key = self._truncate(key)\n if (tag, truncated_key) in key2str:\n raise ValueError(\n f\"Key '{key}' truncated to '{truncated_key}' that already exists. Consider increasing `max_length`.\"\n )\n key2str[(tag, truncated_key)] = self._truncate(value_str)\n\n # Find max widths\n if len(key2str) == 0:\n warnings.warn(\"Tried to write empty key-value dict\")\n return\n else:\n tagless_keys = map(lambda x: x[1], key2str.keys())\n key_width = max(map(len, tagless_keys))\n val_width = max(map(len, key2str.values()))\n\n # Write out the data\n dashes = \"-\" * (key_width + val_width + 7)\n lines = [dashes]\n for (_, key), value in key2str.items():\n key_space = \" \" * (key_width - len(key))\n val_space = \" \" * (val_width - len(value))\n lines.append(f\"| {key}{key_space} | {value}{val_space} |\")\n lines.append(dashes)\n\n if tqdm is not None and hasattr(self.file, \"name\") and self.file.name == \"\":\n # Do not mess up with progress bar\n tqdm.write(\"\\n\".join(lines) + \"\\n\", file=sys.stdout, end=\"\")\n else:\n self.file.write(\"\\n\".join(lines) + \"\\n\")\n\n # Flush the output to the file\n self.file.flush()\n\n def _truncate(self, string: str) -> str:\n if len(string) > self.max_length:\n string = string[: self.max_length - 3] + \"...\"\n return string\n\n def write_sequence(self, sequence: list[str]) -> None:\n for i, elem in enumerate(sequence):\n self.file.write(elem)\n if i < len(sequence) - 1: # add space unless this is the last one\n self.file.write(\" \")\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.own_file:\n self.file.close()\n\nclass CSVOutputFormat(KVWriter):\n \"\"\"\n Log to a file, in a CSV format\n\n :param filename: the file to write the log to\n \"\"\"\n\n def __init__(self, filename: str):\n self.file = open(filename, \"w+\")\n self.keys: list[str] = []\n self.separator = \",\"\n self.quotechar = '\"'\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Add our current row to the history\n key_values = filter_excluded_keys(key_values, key_excluded, \"csv\")\n extra_keys = key_values.keys() - self.keys\n if extra_keys:\n self.keys.extend(extra_keys)\n self.file.seek(0)\n lines = self.file.readlines()\n self.file.seek(0)\n for i, key in enumerate(self.keys):\n if i > 0:\n self.file.write(\",\")\n self.file.write(key)\n self.file.write(\"\\n\")\n for line in lines[1:]:\n self.file.write(line[:-1])\n self.file.write(self.separator * len(extra_keys))\n self.file.write(\"\\n\")\n for i, key in enumerate(self.keys):\n if i > 0:\n self.file.write(\",\")\n value = key_values.get(key)\n\n if isinstance(value, Video):\n raise FormatUnsupportedError([\"csv\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"csv\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"csv\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"csv\"], \"hparam\")\n\n elif isinstance(value, str):\n # escape quotechars by prepending them with another quotechar\n value = value.replace(self.quotechar, self.quotechar + self.quotechar)\n\n # additionally wrap text with quotechars so that any delimiters in the text are ignored by csv readers\n self.file.write(self.quotechar + value + self.quotechar)\n\n elif value is not None:\n self.file.write(str(value))\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n self.file.close()\n\nclass TensorBoardOutputFormat(KVWriter):\n \"\"\"\n Dumps key/value pairs into TensorBoard's numeric format.\n\n :param folder: the folder to write the log to\n \"\"\"\n\n def __init__(self, folder: str):\n assert SummaryWriter is not None, \"tensorboard is not installed, you can use `pip install tensorboard` to do so\"\n self.writer = SummaryWriter(log_dir=folder)\n self._is_closed = False\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n assert not self._is_closed, \"The SummaryWriter was closed, please re-create one.\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and \"tensorboard\" in excluded:\n continue\n\n if isinstance(value, np.ScalarType):\n if isinstance(value, str):\n # str is considered a np.ScalarType\n self.writer.add_text(key, value, step)\n else:\n self.writer.add_scalar(key, value, step)\n\n if isinstance(value, (th.Tensor, np.ndarray)):\n # Convert to Torch so it works with numpy<1.24 and torch<2.0\n self.writer.add_histogram(key, th.as_tensor(value), step)\n\n if isinstance(value, Video):\n self.writer.add_video(key, value.frames, step, value.fps)\n\n if isinstance(value, Figure):\n self.writer.add_figure(key, value.figure, step, close=value.close)\n\n if isinstance(value, Image):\n self.writer.add_image(key, value.image, step, dataformats=value.dataformats)\n\n if isinstance(value, HParam):\n # we don't use `self.writer.add_hparams` to have control over the log_dir\n experiment, session_start_info, session_end_info = hparams(value.hparam_dict, metric_dict=value.metric_dict)\n self.writer.file_writer.add_summary(experiment)\n self.writer.file_writer.add_summary(session_start_info)\n self.writer.file_writer.add_summary(session_end_info)\n\n # Flush the output to the file\n self.writer.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.writer:\n self.writer.close()\n self._is_closed = True\n\ndef configure(folder: str | None = None, format_strings: list[str] | None = None) -> Logger:\n \"\"\"\n Configure the current logger.\n\n :param folder: the save location\n (if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])\n :param format_strings: the output logging format\n (if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])\n :return: The logger object.\n \"\"\"\n if folder is None:\n folder = os.getenv(\"SB3_LOGDIR\")\n if folder is None:\n folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime(\"SB3-%Y-%m-%d-%H-%M-%S-%f\"))\n assert isinstance(folder, str)\n os.makedirs(folder, exist_ok=True)\n\n log_suffix = \"\"\n if format_strings is None:\n format_strings = os.getenv(\"SB3_LOG_FORMAT\", \"stdout,log,csv\").split(\",\")\n\n format_strings = list(filter(None, format_strings))\n output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]\n\n logger = Logger(folder=folder, output_formats=output_formats)\n # Only print when some files will be saved\n if len(format_strings) > 0 and format_strings != [\"stdout\"]:\n logger.log(f\"Logging to {folder}\")\n return logger", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 11330}, "tests/test_utils.py::131": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["make_atari_env", "make_vec_env"], "enclosing_function": "test_vec_env_monitor_kwargs", "extracted_code": "# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\ndef make_atari_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv] | type[SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored VecEnv for Atari.\n It is a wrapper around ``make_vec_env`` that includes common preprocessing for Atari games.\n\n .. note::\n By default, the ``AtariWrapper`` uses ``terminal_on_life_loss=True``, which causes\n ``env.reset()`` to perform a no-op step instead of truly resetting when the environment\n terminates due to a loss of life (but not game over). To ensure ``reset()`` always\n resets the env, pass ``wrapper_kwargs=dict(terminal_on_life_loss=False)``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_kwargs: Optional keyword argument to pass to the ``AtariWrapper``\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :return: The wrapped environment\n \"\"\"\n return make_vec_env(\n env_id,\n n_envs=n_envs,\n seed=seed,\n start_index=start_index,\n monitor_dir=monitor_dir,\n wrapper_class=AtariWrapper,\n env_kwargs=env_kwargs,\n vec_env_cls=vec_env_cls,\n vec_env_kwargs=vec_env_kwargs,\n monitor_kwargs=monitor_kwargs,\n wrapper_kwargs=wrapper_kwargs,\n )", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 6701}, "tests/test_dict_env.py::98": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["BitFlippingEnv", "PPO", "pytest"], "enclosing_function": "test_policy_hint", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 10, "n_files_resolved": 6, "n_chars_extracted": 1138}, "tests/test_custom_policy.py::82": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/sb2_compat/rmsprop_tf_like.py", "stable_baselines3/common/torch_layers.py"], "used_names": ["create_mlp", "nn"], "enclosing_function": "test_create_mlp", "extracted_code": "# Source: stable_baselines3/common/torch_layers.py\ndef create_mlp(\n input_dim: int,\n output_dim: int,\n net_arch: list[int],\n activation_fn: type[nn.Module] = nn.ReLU,\n squash_output: bool = False,\n with_bias: bool = True,\n pre_linear_modules: list[type[nn.Module]] | None = None,\n post_linear_modules: list[type[nn.Module]] | None = None,\n) -> list[nn.Module]:\n \"\"\"\n Create a multi layer perceptron (MLP), which is\n a collection of fully-connected layers each followed by an activation function.\n\n :param input_dim: Dimension of the input vector\n :param output_dim: Dimension of the output (last layer, for instance, the number of actions)\n :param net_arch: Architecture of the neural net\n It represents the number of units per layer.\n The length of this list is the number of layers.\n :param activation_fn: The activation function\n to use after each layer.\n :param squash_output: Whether to squash the output using a Tanh\n activation function\n :param with_bias: If set to False, the layers will not learn an additive bias\n :param pre_linear_modules: List of nn.Module to add before the linear layers.\n These modules should maintain the input tensor dimension (e.g. BatchNorm).\n The number of input features is passed to the module's constructor.\n Compared to post_linear_modules, they are used before the output layer (output_dim > 0).\n :param post_linear_modules: List of nn.Module to add after the linear layers\n (and before the activation function). These modules should maintain the input\n tensor dimension (e.g. Dropout, LayerNorm). They are not used after the\n output layer (output_dim > 0). The number of input features is passed to\n the module's constructor.\n :return: The list of layers of the neural network\n \"\"\"\n\n pre_linear_modules = pre_linear_modules or []\n post_linear_modules = post_linear_modules or []\n\n modules = []\n if len(net_arch) > 0:\n # BatchNorm maintains input dim\n for module in pre_linear_modules:\n modules.append(module(input_dim))\n\n modules.append(nn.Linear(input_dim, net_arch[0], bias=with_bias))\n\n # LayerNorm, Dropout maintain output dim\n for module in post_linear_modules:\n modules.append(module(net_arch[0]))\n\n modules.append(activation_fn())\n\n for idx in range(len(net_arch) - 1):\n for module in pre_linear_modules:\n modules.append(module(net_arch[idx]))\n\n modules.append(nn.Linear(net_arch[idx], net_arch[idx + 1], bias=with_bias))\n\n for module in post_linear_modules:\n modules.append(module(net_arch[idx + 1]))\n\n modules.append(activation_fn())\n\n if output_dim > 0:\n last_layer_dim = net_arch[-1] if len(net_arch) > 0 else input_dim\n # Only add BatchNorm before output layer\n for module in pre_linear_modules:\n modules.append(module(last_layer_dim))\n\n modules.append(nn.Linear(last_layer_dim, output_dim, bias=with_bias))\n if squash_output:\n modules.append(nn.Tanh())\n return modules", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 3147}, "tests/test_vec_envs.py::167": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "Monitor", "VecFrameStack", "pytest", "spaces"], "enclosing_function": "test_vecenv_custom_calls", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 7364}, "tests/test_callbacks.py::127": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DummyVecEnv", "EvalCallback", "IdentityEnv"], "enclosing_function": "test_eval_callback_vec_env", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/callbacks.py\nclass EvalCallback(EventCallback):\n \"\"\"\n Callback for evaluating an agent.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger\n when there is a new best model according to the ``mean_reward``\n :param callback_after_eval: Callback to trigger after every evaluation\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``)\n will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model\n according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about evaluation results\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been\n wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env | VecEnv,\n callback_on_new_best: BaseCallback | None = None,\n callback_after_eval: BaseCallback | None = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: str | None = None,\n best_model_save_path: str | None = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super().__init__(callback_after_eval, verbose=verbose)\n\n self.callback_on_new_best = callback_on_new_best\n if self.callback_on_new_best is not None:\n # Give access to the parent\n self.callback_on_new_best.parent = self\n\n self.n_eval_episodes = n_eval_episodes\n self.eval_freq = eval_freq\n self.best_mean_reward = -np.inf\n self.last_mean_reward = -np.inf\n self.deterministic = deterministic\n self.render = render\n self.warn = warn\n\n # Convert to VecEnv for consistency\n if not isinstance(eval_env, VecEnv):\n eval_env = DummyVecEnv([lambda: eval_env]) # type: ignore[list-item, return-value]\n\n self.eval_env = eval_env\n self.best_model_save_path = best_model_save_path\n # Logs will be written in ``evaluations.npz``\n if log_path is not None:\n log_path = os.path.join(log_path, \"evaluations\")\n self.log_path = log_path\n self.evaluations_results: list[list[float]] = []\n self.evaluations_timesteps: list[int] = []\n self.evaluations_length: list[list[int]] = []\n # For computing success rate\n self._is_success_buffer: list[bool] = []\n self.evaluations_successes: list[list[bool]] = []\n\n def _init_callback(self) -> None:\n # Does not work in some corner cases, where the wrapper is not the same\n if not isinstance(self.training_env, type(self.eval_env)):\n warnings.warn(\"Training and eval env are not of the same type\" f\"{self.training_env} != {self.eval_env}\")\n\n # Create folders if needed\n if self.best_model_save_path is not None:\n os.makedirs(self.best_model_save_path, exist_ok=True)\n if self.log_path is not None:\n os.makedirs(os.path.dirname(self.log_path), exist_ok=True)\n\n # Init callback called on new best model\n if self.callback_on_new_best is not None:\n self.callback_on_new_best.init_callback(self.model)\n\n def _log_success_callback(self, locals_: dict[str, Any], globals_: dict[str, Any]) -> None:\n \"\"\"\n Callback passed to the ``evaluate_policy`` function\n in order to log the success rate (when applicable),\n for instance when using HER.\n\n :param locals_:\n :param globals_:\n \"\"\"\n info = locals_[\"info\"]\n\n if locals_[\"done\"]:\n maybe_is_success = info.get(\"is_success\")\n if maybe_is_success is not None:\n self._is_success_buffer.append(maybe_is_success)\n\n def _on_step(self) -> bool:\n continue_training = True\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n if self.model.get_vec_normalize_env() is not None:\n try:\n sync_envs_normalization(self.training_env, self.eval_env)\n except AttributeError as e:\n raise AssertionError(\n \"Training and eval env are not wrapped the same way, \"\n \"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback \"\n \"and warning above.\"\n ) from e\n\n # Reset success rate buffer\n self._is_success_buffer = []\n\n episode_rewards, episode_lengths = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n return_episode_rewards=True,\n warn=self.warn,\n callback=self._log_success_callback,\n )\n\n if self.log_path is not None:\n assert isinstance(episode_rewards, list)\n assert isinstance(episode_lengths, list)\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(episode_rewards)\n self.evaluations_length.append(episode_lengths)\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n **kwargs, # type: ignore[arg-type]\n )\n\n mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)\n mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)\n self.last_mean_reward = float(mean_reward)\n\n if self.verbose >= 1:\n print(f\"Eval num_timesteps={self.num_timesteps}, \" f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n self.logger.record(\"eval/mean_reward\", float(mean_reward))\n self.logger.record(\"eval/mean_ep_length\", mean_ep_length)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose >= 1:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n # Dump log so the evaluation results are printed with the correct timestep\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(self.num_timesteps)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose >= 1:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = float(mean_reward)\n # Trigger callback on new best model, if needed\n if self.callback_on_new_best is not None:\n continue_training = self.callback_on_new_best.on_step()\n\n # Trigger callback after every evaluation, if needed\n if self.callback is not None:\n continue_training = continue_training and self._on_event()\n\n return continue_training\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n if self.callback:\n self.callback.update_locals(locals_)\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 12003}, "tests/test_sde.py::103": {"resolved_imports": ["stable_baselines3/__init__.py"], "used_names": ["A2C", "PPO", "SAC", "pytest"], "enclosing_function": "test_state_dependent_noise", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 6, "n_files_resolved": 1, "n_chars_extracted": 1467}, "tests/test_utils.py::394": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["get_parameters_by_name"], "enclosing_function": "test_get_parameters_by_name", "extracted_code": "# Source: stable_baselines3/common/utils.py\ndef get_parameters_by_name(model: th.nn.Module, included_names: Iterable[str]) -> list[th.Tensor]:\n \"\"\"\n Extract parameters from the state dict of ``model``\n if the name contains one of the strings in ``included_names``.\n\n :param model: the model where the parameters come from.\n :param included_names: substrings of names to include.\n :return: List of parameters values (Pytorch tensors)\n that matches the queried names.\n \"\"\"\n return [param for name, param in model.state_dict().items() if any([key in name for key in included_names])]", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 611}, "tests/test_save_load.py::433": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DQN", "SAC", "TD3", "pytest", "warnings"], "enclosing_function": "test_warn_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 1493}, "tests/test_monitor.py::53": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["Monitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_monitor", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 5236}, "tests/test_callbacks.py::108": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "CallbackList", "CheckpointCallback", "DDPG", "DQN", "EvalCallback", "EveryNTimesteps", "LogEveryNTimesteps", "PPO", "SAC", "StopTrainingOnMaxEpisodes", "StopTrainingOnNoModelImprovement", "StopTrainingOnRewardThreshold", "TD3", "make_vec_env", "os", "pytest", "shutil"], "enclosing_function": "test_callbacks", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n# Source: stable_baselines3/common/callbacks.py\nclass CallbackList(BaseCallback):\n \"\"\"\n Class for chaining callbacks.\n\n :param callbacks: A list of callbacks that will be called\n sequentially.\n \"\"\"\n\n def __init__(self, callbacks: list[BaseCallback]):\n super().__init__()\n assert isinstance(callbacks, list)\n self.callbacks = callbacks\n\n def _init_callback(self) -> None:\n for callback in self.callbacks:\n callback.init_callback(self.model)\n\n # Fix for https://github.com/DLR-RM/stable-baselines3/issues/1791\n # pass through the parent callback to all children\n callback.parent = self.parent\n\n def _on_training_start(self) -> None:\n for callback in self.callbacks:\n callback.on_training_start(self.locals, self.globals)\n\n def _on_rollout_start(self) -> None:\n for callback in self.callbacks:\n callback.on_rollout_start()\n\n def _on_step(self) -> bool:\n continue_training = True\n for callback in self.callbacks:\n # Return False (stop training) if at least one callback returns False\n continue_training = callback.on_step() and continue_training\n return continue_training\n\n def _on_rollout_end(self) -> None:\n for callback in self.callbacks:\n callback.on_rollout_end()\n\n def _on_training_end(self) -> None:\n for callback in self.callbacks:\n callback.on_training_end()\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n for callback in self.callbacks:\n callback.update_locals(locals_)\n\nclass CheckpointCallback(BaseCallback):\n \"\"\"\n Callback for saving a model every ``save_freq`` calls\n to ``env.step()``.\n By default, it only saves model checkpoints,\n you need to pass ``save_replay_buffer=True``,\n and ``save_vecnormalize=True`` to also save replay buffer checkpoints\n and normalization statistics checkpoints.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``save_freq = max(save_freq // n_envs, 1)``\n\n :param save_freq: Save checkpoints every ``save_freq`` call of the callback.\n :param save_path: Path to the folder where the model will be saved.\n :param name_prefix: Common prefix to the saved models\n :param save_replay_buffer: Save the model replay buffer\n :param save_vecnormalize: Save the ``VecNormalize`` statistics\n :param verbose: Verbosity level: 0 for no output, 2 for indicating when saving model checkpoint\n \"\"\"\n\n def __init__(\n self,\n save_freq: int,\n save_path: str,\n name_prefix: str = \"rl_model\",\n save_replay_buffer: bool = False,\n save_vecnormalize: bool = False,\n verbose: int = 0,\n ):\n super().__init__(verbose)\n self.save_freq = save_freq\n self.save_path = save_path\n self.name_prefix = name_prefix\n self.save_replay_buffer = save_replay_buffer\n self.save_vecnormalize = save_vecnormalize\n\n def _init_callback(self) -> None:\n # Create folder if needed\n if self.save_path is not None:\n os.makedirs(self.save_path, exist_ok=True)\n\n def _checkpoint_path(self, checkpoint_type: str = \"\", extension: str = \"\") -> str:\n \"\"\"\n Helper to get checkpoint path for each type of checkpoint.\n\n :param checkpoint_type: empty for the model, \"replay_buffer_\"\n or \"vecnormalize_\" for the other checkpoints.\n :param extension: Checkpoint file extension (zip for model, pkl for others)\n :return: Path to the checkpoint\n \"\"\"\n return os.path.join(self.save_path, f\"{self.name_prefix}_{checkpoint_type}{self.num_timesteps}_steps.{extension}\")\n\n def _on_step(self) -> bool:\n if self.n_calls % self.save_freq == 0:\n model_path = self._checkpoint_path(extension=\"zip\")\n self.model.save(model_path)\n if self.verbose >= 2:\n print(f\"Saving model checkpoint to {model_path}\")\n\n if self.save_replay_buffer and hasattr(self.model, \"replay_buffer\") and self.model.replay_buffer is not None:\n # If model has a replay buffer, save it too\n replay_buffer_path = self._checkpoint_path(\"replay_buffer_\", extension=\"pkl\")\n self.model.save_replay_buffer(replay_buffer_path) # type: ignore[attr-defined]\n if self.verbose > 1:\n print(f\"Saving model replay buffer checkpoint to {replay_buffer_path}\")\n\n if self.save_vecnormalize and self.model.get_vec_normalize_env() is not None:\n # Save the VecNormalize statistics\n vec_normalize_path = self._checkpoint_path(\"vecnormalize_\", extension=\"pkl\")\n self.model.get_vec_normalize_env().save(vec_normalize_path) # type: ignore[union-attr]\n if self.verbose >= 2:\n print(f\"Saving model VecNormalize to {vec_normalize_path}\")\n\n return True\n\nclass EvalCallback(EventCallback):\n \"\"\"\n Callback for evaluating an agent.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger\n when there is a new best model according to the ``mean_reward``\n :param callback_after_eval: Callback to trigger after every evaluation\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``)\n will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model\n according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about evaluation results\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been\n wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env | VecEnv,\n callback_on_new_best: BaseCallback | None = None,\n callback_after_eval: BaseCallback | None = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: str | None = None,\n best_model_save_path: str | None = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super().__init__(callback_after_eval, verbose=verbose)\n\n self.callback_on_new_best = callback_on_new_best\n if self.callback_on_new_best is not None:\n # Give access to the parent\n self.callback_on_new_best.parent = self\n\n self.n_eval_episodes = n_eval_episodes\n self.eval_freq = eval_freq\n self.best_mean_reward = -np.inf\n self.last_mean_reward = -np.inf\n self.deterministic = deterministic\n self.render = render\n self.warn = warn\n\n # Convert to VecEnv for consistency\n if not isinstance(eval_env, VecEnv):\n eval_env = DummyVecEnv([lambda: eval_env]) # type: ignore[list-item, return-value]\n\n self.eval_env = eval_env\n self.best_model_save_path = best_model_save_path\n # Logs will be written in ``evaluations.npz``\n if log_path is not None:\n log_path = os.path.join(log_path, \"evaluations\")\n self.log_path = log_path\n self.evaluations_results: list[list[float]] = []\n self.evaluations_timesteps: list[int] = []\n self.evaluations_length: list[list[int]] = []\n # For computing success rate\n self._is_success_buffer: list[bool] = []\n self.evaluations_successes: list[list[bool]] = []\n\n def _init_callback(self) -> None:\n # Does not work in some corner cases, where the wrapper is not the same\n if not isinstance(self.training_env, type(self.eval_env)):\n warnings.warn(\"Training and eval env are not of the same type\" f\"{self.training_env} != {self.eval_env}\")\n\n # Create folders if needed\n if self.best_model_save_path is not None:\n os.makedirs(self.best_model_save_path, exist_ok=True)\n if self.log_path is not None:\n os.makedirs(os.path.dirname(self.log_path), exist_ok=True)\n\n # Init callback called on new best model\n if self.callback_on_new_best is not None:\n self.callback_on_new_best.init_callback(self.model)\n\n def _log_success_callback(self, locals_: dict[str, Any], globals_: dict[str, Any]) -> None:\n \"\"\"\n Callback passed to the ``evaluate_policy`` function\n in order to log the success rate (when applicable),\n for instance when using HER.\n\n :param locals_:\n :param globals_:\n \"\"\"\n info = locals_[\"info\"]\n\n if locals_[\"done\"]:\n maybe_is_success = info.get(\"is_success\")\n if maybe_is_success is not None:\n self._is_success_buffer.append(maybe_is_success)\n\n def _on_step(self) -> bool:\n continue_training = True\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n if self.model.get_vec_normalize_env() is not None:\n try:\n sync_envs_normalization(self.training_env, self.eval_env)\n except AttributeError as e:\n raise AssertionError(\n \"Training and eval env are not wrapped the same way, \"\n \"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback \"\n \"and warning above.\"\n ) from e\n\n # Reset success rate buffer\n self._is_success_buffer = []\n\n episode_rewards, episode_lengths = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n return_episode_rewards=True,\n warn=self.warn,\n callback=self._log_success_callback,\n )\n\n if self.log_path is not None:\n assert isinstance(episode_rewards, list)\n assert isinstance(episode_lengths, list)\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(episode_rewards)\n self.evaluations_length.append(episode_lengths)\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n **kwargs, # type: ignore[arg-type]\n )\n\n mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)\n mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)\n self.last_mean_reward = float(mean_reward)\n\n if self.verbose >= 1:\n print(f\"Eval num_timesteps={self.num_timesteps}, \" f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n self.logger.record(\"eval/mean_reward\", float(mean_reward))\n self.logger.record(\"eval/mean_ep_length\", mean_ep_length)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose >= 1:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n # Dump log so the evaluation results are printed with the correct timestep\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(self.num_timesteps)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose >= 1:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = float(mean_reward)\n # Trigger callback on new best model, if needed\n if self.callback_on_new_best is not None:\n continue_training = self.callback_on_new_best.on_step()\n\n # Trigger callback after every evaluation, if needed\n if self.callback is not None:\n continue_training = continue_training and self._on_event()\n\n return continue_training\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n if self.callback:\n self.callback.update_locals(locals_)\n\nclass StopTrainingOnRewardThreshold(BaseCallback):\n \"\"\"\n Stop the training once a threshold in episodic reward\n has been reached (i.e. when the model is good enough).\n\n It must be used with the ``EvalCallback``.\n\n :param reward_threshold: Minimum expected reward per episode\n to stop training.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating when training ended because episodic reward\n threshold reached\n \"\"\"\n\n parent: EvalCallback\n\n def __init__(self, reward_threshold: float, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.reward_threshold = reward_threshold\n\n def _on_step(self) -> bool:\n assert self.parent is not None, \"``StopTrainingOnMinimumReward`` callback must be used with an ``EvalCallback``\"\n continue_training = bool(self.parent.best_mean_reward < self.reward_threshold)\n if self.verbose >= 1 and not continue_training:\n print(\n f\"Stopping training because the mean reward {self.parent.best_mean_reward:.2f} \"\n f\"is above the threshold {self.reward_threshold}\"\n )\n return continue_training\n\nclass EveryNTimesteps(EventCallback):\n \"\"\"\n Trigger a callback every ``n_steps`` timesteps\n\n :param n_steps: Number of timesteps between two trigger.\n :param callback: Callback that will be called\n when the event is triggered.\n \"\"\"\n\n def __init__(self, n_steps: int, callback: BaseCallback):\n super().__init__(callback)\n self.n_steps = n_steps\n self.last_time_trigger = 0\n\n def _on_step(self) -> bool:\n if (self.num_timesteps - self.last_time_trigger) >= self.n_steps:\n self.last_time_trigger = self.num_timesteps\n return self._on_event()\n return True\n\nclass LogEveryNTimesteps(EveryNTimesteps):\n \"\"\"\n Log data every ``n_steps`` timesteps\n\n :param n_steps: Number of timesteps between two trigger.\n \"\"\"\n\n def __init__(self, n_steps: int):\n super().__init__(n_steps, callback=ConvertCallback(self._log_data))\n\n def _log_data(self, _locals: dict[str, Any], _globals: dict[str, Any]) -> bool:\n self.model.dump_logs()\n return True\n\nclass StopTrainingOnMaxEpisodes(BaseCallback):\n \"\"\"\n Stop the training once a maximum number of episodes are played.\n\n For multiple environments presumes that, the desired behavior is that the agent trains on each env for ``max_episodes``\n and in total for ``max_episodes * n_envs`` episodes.\n\n :param max_episodes: Maximum number of episodes to stop training.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about when training ended by\n reaching ``max_episodes``\n \"\"\"\n\n def __init__(self, max_episodes: int, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.max_episodes = max_episodes\n self._total_max_episodes = max_episodes\n self.n_episodes = 0\n\n def _init_callback(self) -> None:\n # At start set total max according to number of environments\n self._total_max_episodes = self.max_episodes * self.training_env.num_envs\n\n def _on_step(self) -> bool:\n # Check that the `dones` local variable is defined\n assert \"dones\" in self.locals, \"`dones` variable is not defined, please check your code next to `callback.on_step()`\"\n self.n_episodes += np.sum(self.locals[\"dones\"]).item()\n\n continue_training = self.n_episodes < self._total_max_episodes\n\n if self.verbose >= 1 and not continue_training:\n mean_episodes_per_env = self.n_episodes / self.training_env.num_envs\n mean_ep_str = (\n f\"with an average of {mean_episodes_per_env:.2f} episodes per env\" if self.training_env.num_envs > 1 else \"\"\n )\n\n print(\n f\"Stopping training with a total of {self.num_timesteps} steps because the \"\n f\"{self.locals.get('tb_log_name')} model reached max_episodes={self.max_episodes}, \"\n f\"by playing for {self.n_episodes} episodes \"\n f\"{mean_ep_str}\"\n )\n return continue_training\n\nclass StopTrainingOnNoModelImprovement(BaseCallback):\n \"\"\"\n Stop the training early if there is no new best model (new best mean reward) after more than N consecutive evaluations.\n\n It is possible to define a minimum number of evaluations before start to count evaluations without improvement.\n\n It must be used with the ``EvalCallback``.\n\n :param max_no_improvement_evals: Maximum number of consecutive evaluations without a new best model.\n :param min_evals: Number of evaluations before start to count evaluations without improvements.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating when training ended because no new best model\n \"\"\"\n\n parent: EvalCallback\n\n def __init__(self, max_no_improvement_evals: int, min_evals: int = 0, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.max_no_improvement_evals = max_no_improvement_evals\n self.min_evals = min_evals\n self.last_best_mean_reward = -np.inf\n self.no_improvement_evals = 0\n\n def _on_step(self) -> bool:\n assert self.parent is not None, \"``StopTrainingOnNoModelImprovement`` callback must be used with an ``EvalCallback``\"\n\n continue_training = True\n\n if self.n_calls > self.min_evals:\n if self.parent.best_mean_reward > self.last_best_mean_reward:\n self.no_improvement_evals = 0\n else:\n self.no_improvement_evals += 1\n if self.no_improvement_evals > self.max_no_improvement_evals:\n continue_training = False\n\n self.last_best_mean_reward = self.parent.best_mean_reward\n\n if self.verbose >= 1 and not continue_training:\n print(\n f\"Stopping training because there was no new best model in the last {self.no_improvement_evals:d} evaluations\"\n )\n\n return continue_training\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 26817}, "tests/test_vec_envs.py::132": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "Monitor", "VecFrameStack", "pytest", "spaces"], "enclosing_function": "test_vecenv_custom_calls", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 7364}, "tests/test_logger.py::261": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["FormatUnsupportedError", "Video", "make_output_format", "pytest"], "enclosing_function": "test_unsupported_video_format", "extracted_code": "# Source: stable_baselines3/common/logger.py\nclass Video:\n \"\"\"\n Video data class storing the video frames and the frame per seconds\n\n :param frames: frames to create the video from\n :param fps: frames per second\n \"\"\"\n\n def __init__(self, frames: th.Tensor, fps: float):\n self.frames = frames\n self.fps = fps\n\nclass FormatUnsupportedError(NotImplementedError):\n \"\"\"\n Custom error to display informative message when\n a value is not supported by some formats.\n\n :param unsupported_formats: A sequence of unsupported formats,\n for instance ``[\"stdout\"]``.\n :param value_description: Description of the value that cannot be logged by this format.\n \"\"\"\n\n def __init__(self, unsupported_formats: Sequence[str], value_description: str):\n if len(unsupported_formats) > 1:\n format_str = f\"formats {', '.join(unsupported_formats)} are\"\n else:\n format_str = f\"format {unsupported_formats[0]} is\"\n super().__init__(\n f\"The {format_str} not supported for the {value_description} value logged.\\n\"\n f\"You can exclude formats via the `exclude` parameter of the logger's `record` function.\"\n )\n\ndef make_output_format(_format: str, log_dir: str, log_suffix: str = \"\") -> KVWriter:\n \"\"\"\n return a logger for the requested format\n\n :param _format: the requested format to log to ('stdout', 'log', 'json' or 'csv' or 'tensorboard')\n :param log_dir: the logging directory\n :param log_suffix: the suffix for the log file\n :return: the logger\n \"\"\"\n os.makedirs(log_dir, exist_ok=True)\n if _format == \"stdout\":\n return HumanOutputFormat(sys.stdout)\n elif _format == \"log\":\n return HumanOutputFormat(os.path.join(log_dir, f\"log{log_suffix}.txt\"))\n elif _format == \"json\":\n return JSONOutputFormat(os.path.join(log_dir, f\"progress{log_suffix}.json\"))\n elif _format == \"csv\":\n return CSVOutputFormat(os.path.join(log_dir, f\"progress{log_suffix}.csv\"))\n elif _format == \"tensorboard\":\n return TensorBoardOutputFormat(log_dir)\n else:\n raise ValueError(f\"Unknown format specified: {_format}\")", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 2178}, "tests/test_vec_envs.py::420": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "pytest", "spaces"], "enclosing_function": "test_vecenv_wrapper_getattr", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 1159}, "tests/test_vec_envs.py::419": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "pytest", "spaces"], "enclosing_function": "test_vecenv_wrapper_getattr", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 1159}, "tests/test_vec_monitor.py::94": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecMonitor", "get_monitor_files", "load_results", "os", "uuid"], "enclosing_function": "test_vec_monitor_load_results", "extracted_code": "# Source: stable_baselines3/common/monitor.py\ndef get_monitor_files(path: str) -> list[str]:\n \"\"\"\n get all the monitor files in the given path\n\n :param path: the logging folder\n :return: the log files\n \"\"\"\n return glob(os.path.join(path, \"*\" + Monitor.EXT))\n\ndef load_results(path: str) -> pandas.DataFrame:\n \"\"\"\n Load all Monitor logs from a given directory path matching ``*monitor.csv``\n\n :param path: the directory path containing the log file(s)\n :return: the logged data\n \"\"\"\n monitor_files = get_monitor_files(path)\n if len(monitor_files) == 0:\n raise LoadMonitorResultsError(f\"No monitor files of the form *{Monitor.EXT} found in {path}\")\n data_frames, headers = [], []\n for file_name in monitor_files:\n with open(file_name) as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frames.append(data_frame)\n data_frames = [df for df in data_frames if not df.empty]\n if not data_frames:\n # Only empty monitor files, return empty df\n empty_df = pandas.DataFrame(columns=[\"r\", \"l\", \"t\"])\n # Create index to have the same columns\n empty_df.reset_index(inplace=True)\n return empty_df\n data_frame = pandas.concat(data_frames)\n data_frame.sort_values(\"t\", inplace=True)\n data_frame.reset_index(inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n return data_frame\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 3717}, "tests/test_vec_normalize.py::350": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "test_normalize_external", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/test_deterministic.py::39": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/noise.py"], "used_names": ["A2C", "DQN", "NormalActionNoise", "PPO", "SAC", "TD3", "pytest"], "enclosing_function": "test_deterministic_training_common", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n\n# Source: stable_baselines3/common/noise.py\nclass NormalActionNoise(ActionNoise):\n \"\"\"\n A Gaussian action noise.\n\n :param mean: Mean value of the noise\n :param sigma: Scale of the noise (std here)\n :param dtype: Type of the output noise\n \"\"\"\n\n def __init__(self, mean: np.ndarray, sigma: np.ndarray, dtype: DTypeLike = np.float32) -> None:\n self._mu = mean\n self._sigma = sigma\n self._dtype = dtype\n super().__init__()\n\n def __call__(self) -> np.ndarray:\n return np.random.normal(self._mu, self._sigma).astype(self._dtype)\n\n def __repr__(self) -> str:\n return f\"NormalActionNoise(mu={self._mu}, sigma={self._sigma})\"", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 2773}, "tests/test_custom_policy.py::81": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/sb2_compat/rmsprop_tf_like.py", "stable_baselines3/common/torch_layers.py"], "used_names": ["create_mlp", "nn"], "enclosing_function": "test_create_mlp", "extracted_code": "# Source: stable_baselines3/common/torch_layers.py\ndef create_mlp(\n input_dim: int,\n output_dim: int,\n net_arch: list[int],\n activation_fn: type[nn.Module] = nn.ReLU,\n squash_output: bool = False,\n with_bias: bool = True,\n pre_linear_modules: list[type[nn.Module]] | None = None,\n post_linear_modules: list[type[nn.Module]] | None = None,\n) -> list[nn.Module]:\n \"\"\"\n Create a multi layer perceptron (MLP), which is\n a collection of fully-connected layers each followed by an activation function.\n\n :param input_dim: Dimension of the input vector\n :param output_dim: Dimension of the output (last layer, for instance, the number of actions)\n :param net_arch: Architecture of the neural net\n It represents the number of units per layer.\n The length of this list is the number of layers.\n :param activation_fn: The activation function\n to use after each layer.\n :param squash_output: Whether to squash the output using a Tanh\n activation function\n :param with_bias: If set to False, the layers will not learn an additive bias\n :param pre_linear_modules: List of nn.Module to add before the linear layers.\n These modules should maintain the input tensor dimension (e.g. BatchNorm).\n The number of input features is passed to the module's constructor.\n Compared to post_linear_modules, they are used before the output layer (output_dim > 0).\n :param post_linear_modules: List of nn.Module to add after the linear layers\n (and before the activation function). These modules should maintain the input\n tensor dimension (e.g. Dropout, LayerNorm). They are not used after the\n output layer (output_dim > 0). The number of input features is passed to\n the module's constructor.\n :return: The list of layers of the neural network\n \"\"\"\n\n pre_linear_modules = pre_linear_modules or []\n post_linear_modules = post_linear_modules or []\n\n modules = []\n if len(net_arch) > 0:\n # BatchNorm maintains input dim\n for module in pre_linear_modules:\n modules.append(module(input_dim))\n\n modules.append(nn.Linear(input_dim, net_arch[0], bias=with_bias))\n\n # LayerNorm, Dropout maintain output dim\n for module in post_linear_modules:\n modules.append(module(net_arch[0]))\n\n modules.append(activation_fn())\n\n for idx in range(len(net_arch) - 1):\n for module in pre_linear_modules:\n modules.append(module(net_arch[idx]))\n\n modules.append(nn.Linear(net_arch[idx], net_arch[idx + 1], bias=with_bias))\n\n for module in post_linear_modules:\n modules.append(module(net_arch[idx + 1]))\n\n modules.append(activation_fn())\n\n if output_dim > 0:\n last_layer_dim = net_arch[-1] if len(net_arch) > 0 else input_dim\n # Only add BatchNorm before output layer\n for module in pre_linear_modules:\n modules.append(module(last_layer_dim))\n\n modules.append(nn.Linear(last_layer_dim, output_dim, bias=with_bias))\n if squash_output:\n modules.append(nn.Tanh())\n return modules", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 3147}, "tests/test_her.py::194": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "DDPG", "DQN", "HerReplayBuffer", "SAC", "TD3", "deepcopy", "make_vec_env", "os", "pytest"], "enclosing_function": "test_save_load", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 7184}, "tests/test_buffers.py::241": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/type_aliases.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DictRolloutBuffer", "RolloutBuffer", "pytest"], "enclosing_function": "test_custom_rollout_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/buffers.py\nclass RolloutBuffer(BaseBuffer):\n \"\"\"\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n advantages: np.ndarray\n returns: np.ndarray\n episode_starts: np.ndarray\n log_probs: np.ndarray\n values: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=self.observation_space.dtype)\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super().reset()\n\n def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None:\n \"\"\"\n Post-processing step: compute the lambda-return (TD(lambda) estimate)\n and GAE(lambda) advantage.\n\n Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)\n to compute the advantage. To obtain Monte-Carlo advantage estimate (A(s) = R - V(S))\n where R is the sum of discounted reward with value bootstrap\n (because we don't always have full episode), set ``gae_lambda=1.0`` during initialization.\n\n The TD(lambda) estimator has also two special cases:\n - TD(1) is Monte-Carlo estimate (sum of discounted rewards)\n - TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))\n\n For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.\n\n :param last_values: state value estimation for the last step (one for each env)\n :param dones: if the last step was a terminal step (one bool for each env).\n \"\"\"\n # Convert to numpy\n last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment]\n\n last_gae_lam = 0\n for step in reversed(range(self.buffer_size)):\n if step == self.buffer_size - 1:\n next_non_terminal = 1.0 - dones.astype(np.float32)\n next_values = last_values\n else:\n next_non_terminal = 1.0 - self.episode_starts[step + 1]\n next_values = self.values[step + 1]\n delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step]\n last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam\n self.advantages[step] = last_gae_lam\n # TD(lambda) estimator, see Github PR #375 or \"Telescoping in TD(lambda)\"\n # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA\n self.returns = self.advantages + self.values\n\n def add(\n self,\n obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.observations[self.pos] = np.array(obs)\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get(self, batch_size: int | None = None) -> Generator[RolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n _tensor_names = [\n \"observations\",\n \"actions\",\n \"values\",\n \"log_probs\",\n \"advantages\",\n \"returns\",\n ]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples(\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> RolloutBufferSamples:\n data = (\n self.observations[batch_inds],\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n self.actions[batch_inds].astype(np.float32, copy=False),\n self.values[batch_inds].flatten(),\n self.log_probs[batch_inds].flatten(),\n self.advantages[batch_inds].flatten(),\n self.returns[batch_inds].flatten(),\n )\n return RolloutBufferSamples(*tuple(map(self.to_torch, data)))\n\nclass DictRolloutBuffer(RolloutBuffer):\n \"\"\"\n Dict Rollout buffer used in on-policy algorithms like A2C/PPO.\n Extends the RolloutBuffer to use dictionary observations\n\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to Monte-Carlo advantage estimate when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictRolloutBuffer must be used with Dict obs space only\"\n\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = {}\n for key, obs_input_shape in self.obs_shape.items():\n self.observations[key] = np.zeros(\n (self.buffer_size, self.n_envs, *obs_input_shape), dtype=self.observation_space[key].dtype\n )\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super(RolloutBuffer, self).reset()\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n for key in self.observations.keys():\n obs_ = np.array(obs[key])\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs_ = obs_.reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = obs_\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get( # type: ignore[override]\n self,\n batch_size: int | None = None,\n ) -> Generator[DictRolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n for key, obs in self.observations.items():\n self.observations[key] = self.swap_and_flatten(obs)\n\n _tensor_names = [\"actions\", \"values\", \"log_probs\", \"advantages\", \"returns\"]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictRolloutBufferSamples:\n return DictRolloutBufferSamples(\n observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()},\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n actions=self.to_torch(self.actions[batch_inds].astype(np.float32, copy=False)),\n old_values=self.to_torch(self.values[batch_inds].flatten()),\n old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()),\n advantages=self.to_torch(self.advantages[batch_inds].flatten()),\n returns=self.to_torch(self.returns[batch_inds].flatten()),\n )", "n_imports_parsed": 12, "n_files_resolved": 7, "n_chars_extracted": 14971}, "tests/test_vec_stacked_obs.py::36": {"resolved_imports": ["stable_baselines3/common/vec_env/stacked_observations.py"], "used_names": ["spaces"], "enclosing_function": "test_compute_stacking_multidim_box_channel_first", "extracted_code": "", "n_imports_parsed": 3, "n_files_resolved": 1, "n_chars_extracted": 0}, "tests/test_utils.py::126": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["MaxAndSkipEnv", "make_vec_env"], "enclosing_function": "test_vec_env_wrapper_kwargs", "extracted_code": "# Source: stable_baselines3/common/atari_wrappers.py\nclass MaxAndSkipEnv(gym.Wrapper[np.ndarray, int, np.ndarray, int]):\n \"\"\"\n Return only every ``skip``-th frame (frameskipping)\n and return the max between the two last frames.\n\n :param env: Environment to wrap\n :param skip: Number of ``skip``-th frame\n The same action will be taken ``skip`` times.\n \"\"\"\n\n def __init__(self, env: gym.Env, skip: int = 4) -> None:\n super().__init__(env)\n # most recent raw observations (for max pooling across time steps)\n assert env.observation_space.dtype is not None, \"No dtype specified for the observation space\"\n assert env.observation_space.shape is not None, \"No shape defined for the observation space\"\n self._obs_buffer = np.zeros((2, *env.observation_space.shape), dtype=env.observation_space.dtype)\n self._skip = skip\n\n def step(self, action: int) -> AtariStepReturn:\n \"\"\"\n Step the environment with the given action\n Repeat action, sum reward, and max over last observations.\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n total_reward = 0.0\n terminated = truncated = False\n for i in range(self._skip):\n obs, reward, terminated, truncated, info = self.env.step(action)\n done = terminated or truncated\n if i == self._skip - 2:\n self._obs_buffer[0] = obs\n if i == self._skip - 1:\n self._obs_buffer[1] = obs\n total_reward += float(reward)\n if done:\n break\n # Note that the observation on the done=True frame\n # doesn't matter\n max_frame = self._obs_buffer.max(axis=0)\n\n return max_frame, total_reward, terminated, truncated, info\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 6222}, "tests/test_vec_normalize.py::322": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/running_mean_std.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": [], "enclosing_function": "test_get_original", "extracted_code": "", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 0}, "tests/test_buffers.py::207": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/type_aliases.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DictReplayBuffer", "DictRolloutBuffer", "ReplayBuffer", "RolloutBuffer", "numpy", "pytest", "spaces"], "enclosing_function": "test_buffer_dtypes", "extracted_code": "# Source: stable_baselines3/common/buffers.py\nclass ReplayBuffer(BaseBuffer):\n \"\"\"\n Replay buffer used in off-policy algorithms like SAC/TD3.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n of the replay buffer which reduces by almost a factor two the memory used,\n at a cost of more complexity.\n See https://github.com/DLR-RM/stable-baselines3/issues/37#issuecomment-637501195\n and https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n Cannot be used in combination with handle_timeout_termination.\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n observations: np.ndarray\n next_observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n dones: np.ndarray\n timeouts: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n # Adjust buffer size\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n # there is a bug if both optimize_memory_usage and handle_timeout_termination are true\n # see https://github.com/DLR-RM/stable-baselines3/issues/934\n if optimize_memory_usage and handle_timeout_termination:\n raise ValueError(\n \"ReplayBuffer does not support optimize_memory_usage = True \"\n \"and handle_timeout_termination = True simultaneously.\"\n )\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=observation_space.dtype)\n\n if not optimize_memory_usage:\n # When optimizing memory, `observations` contains also the next observation\n self.next_observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=observation_space.dtype)\n\n self.actions = np.zeros(\n (self.buffer_size, self.n_envs, self.action_dim), dtype=self._maybe_cast_dtype(action_space.dtype)\n )\n\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n total_memory_usage: float = (\n self.observations.nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n )\n\n if not optimize_memory_usage:\n total_memory_usage += self.next_observations.nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add(\n self,\n obs: np.ndarray,\n next_obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: list[dict[str, Any]],\n ) -> None:\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n next_obs = next_obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n # Copy to avoid modification by reference\n self.observations[self.pos] = np.array(obs)\n\n if self.optimize_memory_usage:\n self.observations[(self.pos + 1) % self.buffer_size] = np.array(next_obs)\n else:\n self.next_observations[self.pos] = np.array(next_obs)\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.dones[self.pos] = np.array(done)\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample(self, batch_size: int, env: VecNormalize | None = None) -> ReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n Custom sampling when using memory efficient variant,\n as we should not sample the element with index `self.pos`\n See https://github.com/DLR-RM/stable-baselines3/pull/28#issuecomment-637559274\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n if not self.optimize_memory_usage:\n return super().sample(batch_size=batch_size, env=env)\n # Do not sample the element with index `self.pos` as the transitions is invalid\n # (we use only one array to store `obs` and `next_obs`)\n if self.full:\n batch_inds = (np.random.randint(1, self.buffer_size, size=batch_size) + self.pos) % self.buffer_size\n else:\n batch_inds = np.random.randint(0, self.pos, size=batch_size)\n return self._get_samples(batch_inds, env=env)\n\n def _get_samples(self, batch_inds: np.ndarray, env: VecNormalize | None = None) -> ReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n if self.optimize_memory_usage:\n next_obs = self._normalize_obs(self.observations[(batch_inds + 1) % self.buffer_size, env_indices, :], env)\n else:\n next_obs = self._normalize_obs(self.next_observations[batch_inds, env_indices, :], env)\n\n data = (\n self._normalize_obs(self.observations[batch_inds, env_indices, :], env),\n self.actions[batch_inds, env_indices, :],\n next_obs,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n (self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(-1, 1),\n self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env),\n )\n return ReplayBufferSamples(*tuple(map(self.to_torch, data)))\n\n @staticmethod\n def _maybe_cast_dtype(dtype: np.typing.DTypeLike | None) -> np.typing.DTypeLike | None:\n \"\"\"\n Cast `np.float64` action datatype to `np.float32`,\n keep the others dtype unchanged.\n See GH#1572 for more information.\n\n :param dtype: The original action space dtype\n :return: ``np.float32`` if the dtype was float64,\n the original dtype otherwise.\n \"\"\"\n if dtype == np.float64:\n return np.float32\n return dtype\n\nclass RolloutBuffer(BaseBuffer):\n \"\"\"\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n advantages: np.ndarray\n returns: np.ndarray\n episode_starts: np.ndarray\n log_probs: np.ndarray\n values: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=self.observation_space.dtype)\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super().reset()\n\n def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None:\n \"\"\"\n Post-processing step: compute the lambda-return (TD(lambda) estimate)\n and GAE(lambda) advantage.\n\n Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)\n to compute the advantage. To obtain Monte-Carlo advantage estimate (A(s) = R - V(S))\n where R is the sum of discounted reward with value bootstrap\n (because we don't always have full episode), set ``gae_lambda=1.0`` during initialization.\n\n The TD(lambda) estimator has also two special cases:\n - TD(1) is Monte-Carlo estimate (sum of discounted rewards)\n - TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))\n\n For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.\n\n :param last_values: state value estimation for the last step (one for each env)\n :param dones: if the last step was a terminal step (one bool for each env).\n \"\"\"\n # Convert to numpy\n last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment]\n\n last_gae_lam = 0\n for step in reversed(range(self.buffer_size)):\n if step == self.buffer_size - 1:\n next_non_terminal = 1.0 - dones.astype(np.float32)\n next_values = last_values\n else:\n next_non_terminal = 1.0 - self.episode_starts[step + 1]\n next_values = self.values[step + 1]\n delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step]\n last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam\n self.advantages[step] = last_gae_lam\n # TD(lambda) estimator, see Github PR #375 or \"Telescoping in TD(lambda)\"\n # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA\n self.returns = self.advantages + self.values\n\n def add(\n self,\n obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.observations[self.pos] = np.array(obs)\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get(self, batch_size: int | None = None) -> Generator[RolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n _tensor_names = [\n \"observations\",\n \"actions\",\n \"values\",\n \"log_probs\",\n \"advantages\",\n \"returns\",\n ]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples(\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> RolloutBufferSamples:\n data = (\n self.observations[batch_inds],\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n self.actions[batch_inds].astype(np.float32, copy=False),\n self.values[batch_inds].flatten(),\n self.log_probs[batch_inds].flatten(),\n self.advantages[batch_inds].flatten(),\n self.returns[batch_inds].flatten(),\n )\n return RolloutBufferSamples(*tuple(map(self.to_torch, data)))\n\nclass DictReplayBuffer(ReplayBuffer):\n \"\"\"\n Dict Replay buffer used in off-policy algorithms like SAC/TD3.\n Extends the ReplayBuffer to use dictionary observations\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Enable a memory efficient variant\n Disabled for now (see https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702)\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n next_observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n n_envs: int = 1,\n optimize_memory_usage: bool = False,\n handle_timeout_termination: bool = True,\n ):\n super(ReplayBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictReplayBuffer must be used with Dict obs space only\"\n self.buffer_size = max(buffer_size // n_envs, 1)\n\n # Check that the replay buffer can fit into the memory\n if psutil is not None:\n mem_available = psutil.virtual_memory().available\n\n assert not optimize_memory_usage, \"DictReplayBuffer does not support optimize_memory_usage\"\n # disabling as this adds quite a bit of complexity\n # https://github.com/DLR-RM/stable-baselines3/pull/243#discussion_r531535702\n self.optimize_memory_usage = optimize_memory_usage\n\n self.observations = {\n key: np.zeros((self.buffer_size, self.n_envs, *_obs_shape), dtype=observation_space[key].dtype)\n for key, _obs_shape in self.obs_shape.items()\n }\n self.next_observations = {\n key: np.zeros((self.buffer_size, self.n_envs, *_obs_shape), dtype=observation_space[key].dtype)\n for key, _obs_shape in self.obs_shape.items()\n }\n\n self.actions = np.zeros(\n (self.buffer_size, self.n_envs, self.action_dim), dtype=self._maybe_cast_dtype(action_space.dtype)\n )\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.dones = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n # Handle timeouts termination properly if needed\n # see https://github.com/DLR-RM/stable-baselines3/issues/284\n self.handle_timeout_termination = handle_timeout_termination\n self.timeouts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n\n if psutil is not None:\n obs_nbytes = 0\n for _, obs in self.observations.items():\n obs_nbytes += obs.nbytes\n\n total_memory_usage: float = obs_nbytes + self.actions.nbytes + self.rewards.nbytes + self.dones.nbytes\n if not optimize_memory_usage:\n next_obs_nbytes = 0\n for _, obs in self.observations.items():\n next_obs_nbytes += obs.nbytes\n total_memory_usage += next_obs_nbytes\n\n if total_memory_usage > mem_available:\n # Convert to GB\n total_memory_usage /= 1e9\n mem_available /= 1e9\n warnings.warn(\n \"This system does not have apparently enough memory to store the complete \"\n f\"replay buffer {total_memory_usage:.2f}GB > {mem_available:.2f}GB\"\n )\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n next_obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n done: np.ndarray,\n infos: list[dict[str, Any]],\n ) -> None:\n # Copy to avoid modification by reference\n for key in self.observations.keys():\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs[key] = obs[key].reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = np.array(obs[key])\n\n for key in self.next_observations.keys():\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n next_obs[key] = next_obs[key].reshape((self.n_envs,) + self.obs_shape[key])\n self.next_observations[key][self.pos] = np.array(next_obs[key])\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.dones[self.pos] = np.array(done)\n\n if self.handle_timeout_termination:\n self.timeouts[self.pos] = np.array([info.get(\"TimeLimit.truncated\", False) for info in infos])\n\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n self.pos = 0\n\n def sample( # type: ignore[override]\n self,\n batch_size: int,\n env: VecNormalize | None = None,\n ) -> DictReplayBufferSamples:\n \"\"\"\n Sample elements from the replay buffer.\n\n :param batch_size: Number of element to sample\n :param env: associated gym VecEnv\n to normalize the observations/rewards when sampling\n :return:\n \"\"\"\n return super(ReplayBuffer, self).sample(batch_size=batch_size, env=env)\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictReplayBufferSamples:\n # Sample randomly the env idx\n env_indices = np.random.randint(0, high=self.n_envs, size=(len(batch_inds),))\n\n # Normalize if needed and remove extra dimension (we are using only one env for now)\n obs_ = self._normalize_obs({key: obs[batch_inds, env_indices, :] for key, obs in self.observations.items()}, env)\n next_obs_ = self._normalize_obs(\n {key: obs[batch_inds, env_indices, :] for key, obs in self.next_observations.items()}, env\n )\n\n assert isinstance(obs_, dict)\n assert isinstance(next_obs_, dict)\n # Convert to torch tensor\n observations = {key: self.to_torch(obs) for key, obs in obs_.items()}\n next_observations = {key: self.to_torch(obs) for key, obs in next_obs_.items()}\n\n return DictReplayBufferSamples(\n observations=observations,\n actions=self.to_torch(self.actions[batch_inds, env_indices]),\n next_observations=next_observations,\n # Only use dones that are not due to timeouts\n # deactivated by default (timeouts is initialized as an array of False)\n dones=self.to_torch(self.dones[batch_inds, env_indices] * (1 - self.timeouts[batch_inds, env_indices])).reshape(\n -1, 1\n ),\n rewards=self.to_torch(self._normalize_reward(self.rewards[batch_inds, env_indices].reshape(-1, 1), env)),\n )\n\nclass DictRolloutBuffer(RolloutBuffer):\n \"\"\"\n Dict Rollout buffer used in on-policy algorithms like A2C/PPO.\n Extends the RolloutBuffer to use dictionary observations\n\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to Monte-Carlo advantage estimate when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictRolloutBuffer must be used with Dict obs space only\"\n\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = {}\n for key, obs_input_shape in self.obs_shape.items():\n self.observations[key] = np.zeros(\n (self.buffer_size, self.n_envs, *obs_input_shape), dtype=self.observation_space[key].dtype\n )\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super(RolloutBuffer, self).reset()\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n for key in self.observations.keys():\n obs_ = np.array(obs[key])\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs_ = obs_.reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = obs_\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get( # type: ignore[override]\n self,\n batch_size: int | None = None,\n ) -> Generator[DictRolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n for key, obs in self.observations.items():\n self.observations[key] = self.swap_and_flatten(obs)\n\n _tensor_names = [\"actions\", \"values\", \"log_probs\", \"advantages\", \"returns\"]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictRolloutBufferSamples:\n return DictRolloutBufferSamples(\n observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()},\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n actions=self.to_torch(self.actions[batch_inds].astype(np.float32, copy=False)),\n old_values=self.to_torch(self.values[batch_inds].flatten()),\n old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()),\n advantages=self.to_torch(self.advantages[batch_inds].flatten()),\n returns=self.to_torch(self.returns[batch_inds].flatten()),\n )", "n_imports_parsed": 12, "n_files_resolved": 7, "n_chars_extracted": 30201}, "tests/test_sde.py::60": {"resolved_imports": ["stable_baselines3/__init__.py"], "used_names": ["PPO", "pytest"], "enclosing_function": "test_sde_check", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 6, "n_files_resolved": 1, "n_chars_extracted": 549}, "tests/test_spaces.py::174": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py"], "used_names": ["A2C", "pytest", "spaces"], "enclosing_function": "test_multidim_binary_not_supported", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 10, "n_files_resolved": 4, "n_chars_extracted": 562}, "tests/test_cnn.py::354": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/preprocessing.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DQN", "DummyVecEnv", "FakeImageEnv", "PPO", "SAC", "TD3", "VecNormalize", "pytest"], "enclosing_function": "test_image_like_input", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 10, "n_files_resolved": 4, "n_chars_extracted": 5510}, "tests/test_logger.py::114": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["A2C", "CSVOutputFormat", "HumanOutputFormat", "TensorBoardOutputFormat", "configure", "os"], "enclosing_function": "test_set_logger", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/logger.py\nclass HumanOutputFormat(KVWriter, SeqWriter):\n \"\"\"A human-readable output format producing ASCII tables of key-value pairs.\n\n Set attribute ``max_length`` to change the maximum length of keys and values\n to write to output (or specify it when calling ``__init__``).\n\n :param filename_or_file: the file to write the log to\n :param max_length: the maximum length of keys and values to write to output.\n Outputs longer than this will be truncated. An error will be raised\n if multiple keys are truncated to the same value. The maximum output\n width will be ``2*max_length + 7``. The default of 36 produces output\n no longer than 79 characters wide.\n \"\"\"\n\n def __init__(self, filename_or_file: str | TextIO, max_length: int = 36):\n self.max_length = max_length\n if isinstance(filename_or_file, str):\n self.file = open(filename_or_file, \"w\")\n self.own_file = True\n elif isinstance(filename_or_file, TextIOBase) or hasattr(filename_or_file, \"write\"):\n # Note: in theory `TextIOBase` check should be sufficient,\n # in practice, libraries don't always inherit from it, see GH#1598\n self.file = filename_or_file # type: ignore[assignment]\n self.own_file = False\n else:\n raise ValueError(f\"Expected file or str, got {filename_or_file}\")\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Create strings for printing\n key2str = {}\n tag = \"\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and (\"stdout\" in excluded or \"log\" in excluded):\n continue\n\n elif isinstance(value, Video):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"stdout\", \"log\"], \"hparam\")\n\n elif isinstance(value, float):\n # Align left\n value_str = f\"{value:<8.3g}\"\n else:\n value_str = str(value)\n\n if key.find(\"/\") > 0: # Find tag and add it to the dict\n tag = key[: key.find(\"/\") + 1]\n key2str[(tag, self._truncate(tag))] = \"\"\n # Remove tag from key and indent the key\n if len(tag) > 0 and tag in key:\n key = f\"{'':3}{key[len(tag) :]}\"\n\n truncated_key = self._truncate(key)\n if (tag, truncated_key) in key2str:\n raise ValueError(\n f\"Key '{key}' truncated to '{truncated_key}' that already exists. Consider increasing `max_length`.\"\n )\n key2str[(tag, truncated_key)] = self._truncate(value_str)\n\n # Find max widths\n if len(key2str) == 0:\n warnings.warn(\"Tried to write empty key-value dict\")\n return\n else:\n tagless_keys = map(lambda x: x[1], key2str.keys())\n key_width = max(map(len, tagless_keys))\n val_width = max(map(len, key2str.values()))\n\n # Write out the data\n dashes = \"-\" * (key_width + val_width + 7)\n lines = [dashes]\n for (_, key), value in key2str.items():\n key_space = \" \" * (key_width - len(key))\n val_space = \" \" * (val_width - len(value))\n lines.append(f\"| {key}{key_space} | {value}{val_space} |\")\n lines.append(dashes)\n\n if tqdm is not None and hasattr(self.file, \"name\") and self.file.name == \"\":\n # Do not mess up with progress bar\n tqdm.write(\"\\n\".join(lines) + \"\\n\", file=sys.stdout, end=\"\")\n else:\n self.file.write(\"\\n\".join(lines) + \"\\n\")\n\n # Flush the output to the file\n self.file.flush()\n\n def _truncate(self, string: str) -> str:\n if len(string) > self.max_length:\n string = string[: self.max_length - 3] + \"...\"\n return string\n\n def write_sequence(self, sequence: list[str]) -> None:\n for i, elem in enumerate(sequence):\n self.file.write(elem)\n if i < len(sequence) - 1: # add space unless this is the last one\n self.file.write(\" \")\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.own_file:\n self.file.close()\n\nclass CSVOutputFormat(KVWriter):\n \"\"\"\n Log to a file, in a CSV format\n\n :param filename: the file to write the log to\n \"\"\"\n\n def __init__(self, filename: str):\n self.file = open(filename, \"w+\")\n self.keys: list[str] = []\n self.separator = \",\"\n self.quotechar = '\"'\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n # Add our current row to the history\n key_values = filter_excluded_keys(key_values, key_excluded, \"csv\")\n extra_keys = key_values.keys() - self.keys\n if extra_keys:\n self.keys.extend(extra_keys)\n self.file.seek(0)\n lines = self.file.readlines()\n self.file.seek(0)\n for i, key in enumerate(self.keys):\n if i > 0:\n self.file.write(\",\")\n self.file.write(key)\n self.file.write(\"\\n\")\n for line in lines[1:]:\n self.file.write(line[:-1])\n self.file.write(self.separator * len(extra_keys))\n self.file.write(\"\\n\")\n for i, key in enumerate(self.keys):\n if i > 0:\n self.file.write(\",\")\n value = key_values.get(key)\n\n if isinstance(value, Video):\n raise FormatUnsupportedError([\"csv\"], \"video\")\n\n elif isinstance(value, Figure):\n raise FormatUnsupportedError([\"csv\"], \"figure\")\n\n elif isinstance(value, Image):\n raise FormatUnsupportedError([\"csv\"], \"image\")\n\n elif isinstance(value, HParam):\n raise FormatUnsupportedError([\"csv\"], \"hparam\")\n\n elif isinstance(value, str):\n # escape quotechars by prepending them with another quotechar\n value = value.replace(self.quotechar, self.quotechar + self.quotechar)\n\n # additionally wrap text with quotechars so that any delimiters in the text are ignored by csv readers\n self.file.write(self.quotechar + value + self.quotechar)\n\n elif value is not None:\n self.file.write(str(value))\n self.file.write(\"\\n\")\n self.file.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n self.file.close()\n\nclass TensorBoardOutputFormat(KVWriter):\n \"\"\"\n Dumps key/value pairs into TensorBoard's numeric format.\n\n :param folder: the folder to write the log to\n \"\"\"\n\n def __init__(self, folder: str):\n assert SummaryWriter is not None, \"tensorboard is not installed, you can use `pip install tensorboard` to do so\"\n self.writer = SummaryWriter(log_dir=folder)\n self._is_closed = False\n\n def write(self, key_values: dict[str, Any], key_excluded: dict[str, tuple[str, ...]], step: int = 0) -> None:\n assert not self._is_closed, \"The SummaryWriter was closed, please re-create one.\"\n for (key, value), (_, excluded) in zip(sorted(key_values.items()), sorted(key_excluded.items()), strict=True):\n if excluded is not None and \"tensorboard\" in excluded:\n continue\n\n if isinstance(value, np.ScalarType):\n if isinstance(value, str):\n # str is considered a np.ScalarType\n self.writer.add_text(key, value, step)\n else:\n self.writer.add_scalar(key, value, step)\n\n if isinstance(value, (th.Tensor, np.ndarray)):\n # Convert to Torch so it works with numpy<1.24 and torch<2.0\n self.writer.add_histogram(key, th.as_tensor(value), step)\n\n if isinstance(value, Video):\n self.writer.add_video(key, value.frames, step, value.fps)\n\n if isinstance(value, Figure):\n self.writer.add_figure(key, value.figure, step, close=value.close)\n\n if isinstance(value, Image):\n self.writer.add_image(key, value.image, step, dataformats=value.dataformats)\n\n if isinstance(value, HParam):\n # we don't use `self.writer.add_hparams` to have control over the log_dir\n experiment, session_start_info, session_end_info = hparams(value.hparam_dict, metric_dict=value.metric_dict)\n self.writer.file_writer.add_summary(experiment)\n self.writer.file_writer.add_summary(session_start_info)\n self.writer.file_writer.add_summary(session_end_info)\n\n # Flush the output to the file\n self.writer.flush()\n\n def close(self) -> None:\n \"\"\"\n closes the file\n \"\"\"\n if self.writer:\n self.writer.close()\n self._is_closed = True\n\ndef configure(folder: str | None = None, format_strings: list[str] | None = None) -> Logger:\n \"\"\"\n Configure the current logger.\n\n :param folder: the save location\n (if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])\n :param format_strings: the output logging format\n (if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])\n :return: The logger object.\n \"\"\"\n if folder is None:\n folder = os.getenv(\"SB3_LOGDIR\")\n if folder is None:\n folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime(\"SB3-%Y-%m-%d-%H-%M-%S-%f\"))\n assert isinstance(folder, str)\n os.makedirs(folder, exist_ok=True)\n\n log_suffix = \"\"\n if format_strings is None:\n format_strings = os.getenv(\"SB3_LOG_FORMAT\", \"stdout,log,csv\").split(\",\")\n\n format_strings = list(filter(None, format_strings))\n output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]\n\n logger = Logger(folder=folder, output_formats=output_formats)\n # Only print when some files will be saved\n if len(format_strings) > 0 and format_strings != [\"stdout\"]:\n logger.log(f\"Logging to {folder}\")\n return logger", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 11330}, "tests/test_envs.py::57": {"resolved_imports": ["stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py"], "used_names": ["check_env", "pytest", "warnings"], "enclosing_function": "test_custom_envs", "extracted_code": "# Source: stable_baselines3/common/env_checker.py\ndef check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:\n \"\"\"\n Check that an environment follows Gym API.\n This is particularly useful when using a custom environment.\n Please take a look at https://gymnasium.farama.org/api/env/\n for more information about the API.\n\n It also optionally check that the environment is compatible with Stable-Baselines.\n\n :param env: The Gym environment that will be checked\n :param warn: Whether to output additional warnings\n mainly related to the interaction with Stable Baselines\n :param skip_render_check: Whether to skip the checks for the render method.\n True by default (useful for the CI)\n \"\"\"\n assert isinstance(\n env, gym.Env\n ), \"Your environment must inherit from the gymnasium.Env class cf. https://gymnasium.farama.org/api/env/\"\n\n # ============= Check the spaces (observation and action) ================\n _check_spaces(env)\n\n # Define aliases for convenience\n observation_space = env.observation_space\n action_space = env.action_space\n\n try:\n env.reset(seed=0)\n except TypeError as e:\n raise TypeError(\"The reset() method must accept a `seed` parameter\") from e\n\n # Warn the user if needed.\n # A warning means that the environment may run but not work properly with Stable Baselines algorithms\n should_skip = False\n if warn:\n should_skip = _check_unsupported_spaces(env, observation_space, action_space)\n\n obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {\"\": observation_space}\n for key, space in obs_spaces.items():\n if isinstance(space, spaces.Box):\n _check_box_obs(space, key)\n\n # Check for the action space, it may lead to hard-to-debug issues\n if isinstance(action_space, spaces.Box) and (\n np.any(np.abs(action_space.low) != np.abs(action_space.high))\n or np.any(action_space.low != -1)\n or np.any(action_space.high != 1)\n ):\n warnings.warn(\n \"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) \"\n \"cf. https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\"\n )\n\n if isinstance(action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([action_space.low, action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n if isinstance(action_space, spaces.Box) and action_space.dtype != np.dtype(np.float32):\n warnings.warn(\n f\"Your action space has dtype {action_space.dtype}, we recommend using np.float32 to avoid cast errors.\"\n )\n\n # If Sequence or Graph observation space, do not check the observation any further\n if should_skip:\n return\n\n # ============ Check the returned values ===============\n _check_returned_values(env, observation_space, action_space)\n\n # ==== Check the render method and the declared render modes ====\n if not skip_render_check:\n _check_render(env, warn) # pragma: no cover\n\n try:\n check_for_nested_spaces(env.observation_space)\n # The check doesn't support nested observations/dict actions\n # A warning about it has already been emitted\n _check_nan(env)\n except NotImplementedError:\n pass", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 3498}, "tests/test_vec_extract_dict_obs.py::79": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["VecExtractDictObs"], "enclosing_function": "test_extract_dict_obs", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\n\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",", "n_imports_parsed": 4, "n_files_resolved": 2, "n_chars_extracted": 1003}, "tests/test_her.py::116": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "GoalSelectionStrategy", "HerReplayBuffer", "NormalActionNoise", "SAC", "make_vec_env", "pytest"], "enclosing_function": "test_goal_selection_strategy", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n\n# Source: stable_baselines3/common/noise.py\nclass NormalActionNoise(ActionNoise):\n \"\"\"\n A Gaussian action noise.\n\n :param mean: Mean value of the noise\n :param sigma: Scale of the noise (std here)\n :param dtype: Type of the output noise\n \"\"\"\n\n def __init__(self, mean: np.ndarray, sigma: np.ndarray, dtype: DTypeLike = np.float32) -> None:\n self._mu = mean\n self._sigma = sigma\n self._dtype = dtype\n super().__init__()\n\n def __call__(self) -> np.ndarray:\n return np.random.normal(self._mu, self._sigma).astype(self._dtype)\n\n def __repr__(self) -> str:\n return f\"NormalActionNoise(mu={self._mu}, sigma={self._sigma})\"\n\n\n# Source: stable_baselines3/her/goal_selection_strategy.py\nclass GoalSelectionStrategy(Enum):\n \"\"\"\n The strategies for selecting new goals when\n creating artificial transitions.\n \"\"\"\n\n # Select a goal that was achieved\n # after the current step, in the same episode\n FUTURE = 0\n # Select the goal that was achieved\n # at the end of the episode\n FINAL = 1\n # Select a goal that was achieved in the episode\n EPISODE = 2", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 7462}, "tests/test_run.py::201": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/noise.py"], "used_names": ["DDPG", "DQN", "NormalActionNoise", "SAC", "TD3", "make_vec_env", "pytest"], "enclosing_function": "test_offpolicy_multi_env", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/noise.py\nclass NormalActionNoise(ActionNoise):\n \"\"\"\n A Gaussian action noise.\n\n :param mean: Mean value of the noise\n :param sigma: Scale of the noise (std here)\n :param dtype: Type of the output noise\n \"\"\"\n\n def __init__(self, mean: np.ndarray, sigma: np.ndarray, dtype: DTypeLike = np.float32) -> None:\n self._mu = mean\n self._sigma = sigma\n self._dtype = dtype\n super().__init__()\n\n def __call__(self) -> np.ndarray:\n return np.random.normal(self._mu, self._sigma).astype(self._dtype)\n\n def __repr__(self) -> str:\n return f\"NormalActionNoise(mu={self._mu}, sigma={self._sigma})\"", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 6966}, "tests/test_predict.py::83": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DQN", "DummyVecEnv", "SAC", "TD3", "get_device", "pytest"], "enclosing_function": "test_predict", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/utils.py\ndef get_device(device: th.device | str = \"auto\") -> th.device:\n \"\"\"\n Retrieve PyTorch device.\n It checks that the requested device is available first.\n For now, it supports only cpu and cuda.\n By default, it tries to use the gpu.\n\n :param device: One for 'auto', 'cuda', 'cpu'\n :return: Supported Pytorch device\n \"\"\"\n # Cuda by default\n if device == \"auto\":\n device = \"cuda\"\n # Force conversion to th.device\n device = th.device(device)\n\n # Cuda not available\n if device.type == th.device(\"cuda\").type and not th.cuda.is_available():\n return th.device(\"cpu\")\n\n return device\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 10, "n_files_resolved": 5, "n_chars_extracted": 3334}, "tests/test_envs.py::45": {"resolved_imports": ["stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py"], "used_names": ["check_env", "pytest", "warnings"], "enclosing_function": "test_env", "extracted_code": "# Source: stable_baselines3/common/env_checker.py\ndef check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:\n \"\"\"\n Check that an environment follows Gym API.\n This is particularly useful when using a custom environment.\n Please take a look at https://gymnasium.farama.org/api/env/\n for more information about the API.\n\n It also optionally check that the environment is compatible with Stable-Baselines.\n\n :param env: The Gym environment that will be checked\n :param warn: Whether to output additional warnings\n mainly related to the interaction with Stable Baselines\n :param skip_render_check: Whether to skip the checks for the render method.\n True by default (useful for the CI)\n \"\"\"\n assert isinstance(\n env, gym.Env\n ), \"Your environment must inherit from the gymnasium.Env class cf. https://gymnasium.farama.org/api/env/\"\n\n # ============= Check the spaces (observation and action) ================\n _check_spaces(env)\n\n # Define aliases for convenience\n observation_space = env.observation_space\n action_space = env.action_space\n\n try:\n env.reset(seed=0)\n except TypeError as e:\n raise TypeError(\"The reset() method must accept a `seed` parameter\") from e\n\n # Warn the user if needed.\n # A warning means that the environment may run but not work properly with Stable Baselines algorithms\n should_skip = False\n if warn:\n should_skip = _check_unsupported_spaces(env, observation_space, action_space)\n\n obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {\"\": observation_space}\n for key, space in obs_spaces.items():\n if isinstance(space, spaces.Box):\n _check_box_obs(space, key)\n\n # Check for the action space, it may lead to hard-to-debug issues\n if isinstance(action_space, spaces.Box) and (\n np.any(np.abs(action_space.low) != np.abs(action_space.high))\n or np.any(action_space.low != -1)\n or np.any(action_space.high != 1)\n ):\n warnings.warn(\n \"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) \"\n \"cf. https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\"\n )\n\n if isinstance(action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([action_space.low, action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n if isinstance(action_space, spaces.Box) and action_space.dtype != np.dtype(np.float32):\n warnings.warn(\n f\"Your action space has dtype {action_space.dtype}, we recommend using np.float32 to avoid cast errors.\"\n )\n\n # If Sequence or Graph observation space, do not check the observation any further\n if should_skip:\n return\n\n # ============ Check the returned values ===============\n _check_returned_values(env, observation_space, action_space)\n\n # ==== Check the render method and the declared render modes ====\n if not skip_render_check:\n _check_render(env, warn) # pragma: no cover\n\n try:\n check_for_nested_spaces(env.observation_space)\n # The check doesn't support nested observations/dict actions\n # A warning about it has already been emitted\n _check_nan(env)\n except NotImplementedError:\n pass", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 3498}, "tests/test_vec_envs.py::652": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["VecFrameStack", "VecNormalize", "make_vec_env", "os", "pytest", "warnings"], "enclosing_function": "test_render", "extracted_code": "# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 7008}, "tests/test_save_load.py::688": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["io", "open_path", "pytest"], "enclosing_function": "test_open_file", "extracted_code": "# Source: stable_baselines3/common/save_util.py\ndef open_path(\n path: str | pathlib.Path | io.BufferedIOBase, mode: str, verbose: int = 0, suffix: str | None = None\n) -> io.BufferedWriter | io.BufferedReader | io.BytesIO | io.BufferedRandom:\n \"\"\"\n Opens a path for reading or writing with a preferred suffix and raises debug information.\n If the provided path is a derivative of io.BufferedIOBase it ensures that the file\n matches the provided mode, i.e. If the mode is read (\"r\", \"read\") it checks that the path is readable.\n If the mode is write (\"w\", \"write\") it checks that the file is writable.\n\n If the provided path is a string or a pathlib.Path, it ensures that it exists. If the mode is \"read\"\n it checks that it exists, if it doesn't exist it attempts to read path.suffix if a suffix is provided.\n If the mode is \"write\" and the path does not exist, it creates all the parent folders. If the path\n points to a folder, it changes the path to path_2. If the path already exists and verbose >= 2,\n it raises a warning.\n\n :param path: the path to open.\n if save_path is a str or pathlib.Path and mode is \"w\", single dispatch ensures that the\n path actually exists. If path is a io.BufferedIOBase the path exists.\n :param mode: how to open the file. \"w\"|\"write\" for writing, \"r\"|\"read\" for reading.\n :param verbose: Verbosity level: 0 for no output, 1 for info messages, 2 for debug messages\n :param suffix: The preferred suffix. If mode is \"w\" then the opened file has the suffix.\n If mode is \"r\" then we attempt to open the path. If an error is raised and the suffix\n is not None, we attempt to open the path with the suffix.\n :return:\n \"\"\"\n # Note(antonin): the true annotation should be IO[bytes]\n # but there is not easy way to check that\n allowed_types = (io.BufferedWriter, io.BufferedReader, io.BytesIO, io.BufferedRandom)\n if not isinstance(path, allowed_types):\n raise TypeError(f\"Path {path} parameter has invalid type: expected one of {allowed_types}.\")\n if path.closed:\n raise ValueError(f\"File stream {path} is closed.\")\n mode = mode.lower()\n try:\n mode = {\"write\": \"w\", \"read\": \"r\", \"w\": \"w\", \"r\": \"r\"}[mode]\n except KeyError as e:\n raise ValueError(\"Expected mode to be either 'w' or 'r'.\") from e\n if ((\"w\" == mode) and not path.writable()) or ((\"r\" == mode) and not path.readable()):\n error_msg = \"writable\" if \"w\" == mode else \"readable\"\n raise ValueError(f\"Expected a {error_msg} file.\")\n return path", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 2576}, "tests/test_buffers.py::238": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/type_aliases.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DictRolloutBuffer", "RolloutBuffer", "pytest"], "enclosing_function": "test_custom_rollout_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/buffers.py\nclass RolloutBuffer(BaseBuffer):\n \"\"\"\n Rollout buffer used in on-policy algorithms like A2C/PPO.\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to classic advantage when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observations: np.ndarray\n actions: np.ndarray\n rewards: np.ndarray\n advantages: np.ndarray\n returns: np.ndarray\n episode_starts: np.ndarray\n log_probs: np.ndarray\n values: np.ndarray\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Space,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super().__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = np.zeros((self.buffer_size, self.n_envs, *self.obs_shape), dtype=self.observation_space.dtype)\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super().reset()\n\n def compute_returns_and_advantage(self, last_values: th.Tensor, dones: np.ndarray) -> None:\n \"\"\"\n Post-processing step: compute the lambda-return (TD(lambda) estimate)\n and GAE(lambda) advantage.\n\n Uses Generalized Advantage Estimation (https://arxiv.org/abs/1506.02438)\n to compute the advantage. To obtain Monte-Carlo advantage estimate (A(s) = R - V(S))\n where R is the sum of discounted reward with value bootstrap\n (because we don't always have full episode), set ``gae_lambda=1.0`` during initialization.\n\n The TD(lambda) estimator has also two special cases:\n - TD(1) is Monte-Carlo estimate (sum of discounted rewards)\n - TD(0) is one-step estimate with bootstrapping (r_t + gamma * v(s_{t+1}))\n\n For more information, see discussion in https://github.com/DLR-RM/stable-baselines3/pull/375.\n\n :param last_values: state value estimation for the last step (one for each env)\n :param dones: if the last step was a terminal step (one bool for each env).\n \"\"\"\n # Convert to numpy\n last_values = last_values.clone().cpu().numpy().flatten() # type: ignore[assignment]\n\n last_gae_lam = 0\n for step in reversed(range(self.buffer_size)):\n if step == self.buffer_size - 1:\n next_non_terminal = 1.0 - dones.astype(np.float32)\n next_values = last_values\n else:\n next_non_terminal = 1.0 - self.episode_starts[step + 1]\n next_values = self.values[step + 1]\n delta = self.rewards[step] + self.gamma * next_values * next_non_terminal - self.values[step]\n last_gae_lam = delta + self.gamma * self.gae_lambda * next_non_terminal * last_gae_lam\n self.advantages[step] = last_gae_lam\n # TD(lambda) estimator, see Github PR #375 or \"Telescoping in TD(lambda)\"\n # in David Silver Lecture 4: https://www.youtube.com/watch?v=PnHCvfgC_ZA\n self.returns = self.advantages + self.values\n\n def add(\n self,\n obs: np.ndarray,\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space, spaces.Discrete):\n obs = obs.reshape((self.n_envs, *self.obs_shape))\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.observations[self.pos] = np.array(obs)\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get(self, batch_size: int | None = None) -> Generator[RolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n _tensor_names = [\n \"observations\",\n \"actions\",\n \"values\",\n \"log_probs\",\n \"advantages\",\n \"returns\",\n ]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples(\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> RolloutBufferSamples:\n data = (\n self.observations[batch_inds],\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n self.actions[batch_inds].astype(np.float32, copy=False),\n self.values[batch_inds].flatten(),\n self.log_probs[batch_inds].flatten(),\n self.advantages[batch_inds].flatten(),\n self.returns[batch_inds].flatten(),\n )\n return RolloutBufferSamples(*tuple(map(self.to_torch, data)))\n\nclass DictRolloutBuffer(RolloutBuffer):\n \"\"\"\n Dict Rollout buffer used in on-policy algorithms like A2C/PPO.\n Extends the RolloutBuffer to use dictionary observations\n\n It corresponds to ``buffer_size`` transitions collected\n using the current policy.\n This experience will be discarded after the policy update.\n In order to use PPO objective, we also store the current value of each state\n and the log probability of each taken action.\n\n The term rollout here refers to the model-free notion and should not\n be used with the concept of rollout used in model-based RL or planning.\n Hence, it is only involved in policy and value function training but not action selection.\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param gae_lambda: Factor for trade-off of bias vs variance for Generalized Advantage Estimator\n Equivalent to Monte-Carlo advantage estimate when set to 1.\n :param gamma: Discount factor\n :param n_envs: Number of parallel environments\n \"\"\"\n\n observation_space: spaces.Dict\n obs_shape: dict[str, tuple[int, ...]] # type: ignore[assignment]\n observations: dict[str, np.ndarray] # type: ignore[assignment]\n\n def __init__(\n self,\n buffer_size: int,\n observation_space: spaces.Dict,\n action_space: spaces.Space,\n device: th.device | str = \"auto\",\n gae_lambda: float = 1,\n gamma: float = 0.99,\n n_envs: int = 1,\n ):\n super(RolloutBuffer, self).__init__(buffer_size, observation_space, action_space, device, n_envs=n_envs)\n\n assert isinstance(self.obs_shape, dict), \"DictRolloutBuffer must be used with Dict obs space only\"\n\n self.gae_lambda = gae_lambda\n self.gamma = gamma\n\n self.generator_ready = False\n self.reset()\n\n def reset(self) -> None:\n self.observations = {}\n for key, obs_input_shape in self.obs_shape.items():\n self.observations[key] = np.zeros(\n (self.buffer_size, self.n_envs, *obs_input_shape), dtype=self.observation_space[key].dtype\n )\n self.actions = np.zeros((self.buffer_size, self.n_envs, self.action_dim), dtype=self.action_space.dtype)\n self.rewards = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.returns = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.episode_starts = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.values = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.log_probs = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.advantages = np.zeros((self.buffer_size, self.n_envs), dtype=np.float32)\n self.generator_ready = False\n super(RolloutBuffer, self).reset()\n\n def add( # type: ignore[override]\n self,\n obs: dict[str, np.ndarray],\n action: np.ndarray,\n reward: np.ndarray,\n episode_start: np.ndarray,\n value: th.Tensor,\n log_prob: th.Tensor,\n ) -> None:\n \"\"\"\n :param obs: Observation\n :param action: Action\n :param reward:\n :param episode_start: Start of episode signal.\n :param value: estimated value of the current state\n following the current policy.\n :param log_prob: log probability of the action\n following the current policy.\n \"\"\"\n if len(log_prob.shape) == 0:\n # Reshape 0-d tensor to avoid error\n log_prob = log_prob.reshape(-1, 1)\n\n for key in self.observations.keys():\n obs_ = np.array(obs[key])\n # Reshape needed when using multiple envs with discrete observations\n # as numpy cannot broadcast (n_discrete,) to (n_discrete, 1)\n if isinstance(self.observation_space.spaces[key], spaces.Discrete):\n obs_ = obs_.reshape((self.n_envs,) + self.obs_shape[key])\n self.observations[key][self.pos] = obs_\n\n # Reshape to handle multi-dim and discrete action spaces, see GH #970 #1392\n action = action.reshape((self.n_envs, self.action_dim))\n\n self.actions[self.pos] = np.array(action)\n self.rewards[self.pos] = np.array(reward)\n self.episode_starts[self.pos] = np.array(episode_start)\n self.values[self.pos] = value.clone().cpu().numpy().flatten()\n self.log_probs[self.pos] = log_prob.clone().cpu().numpy()\n self.pos += 1\n if self.pos == self.buffer_size:\n self.full = True\n\n def get( # type: ignore[override]\n self,\n batch_size: int | None = None,\n ) -> Generator[DictRolloutBufferSamples, None, None]:\n assert self.full, \"\"\n indices = np.random.permutation(self.buffer_size * self.n_envs)\n # Prepare the data\n if not self.generator_ready:\n for key, obs in self.observations.items():\n self.observations[key] = self.swap_and_flatten(obs)\n\n _tensor_names = [\"actions\", \"values\", \"log_probs\", \"advantages\", \"returns\"]\n\n for tensor in _tensor_names:\n self.__dict__[tensor] = self.swap_and_flatten(self.__dict__[tensor])\n self.generator_ready = True\n\n # Return everything, don't create minibatches\n if batch_size is None:\n batch_size = self.buffer_size * self.n_envs\n\n start_idx = 0\n while start_idx < self.buffer_size * self.n_envs:\n yield self._get_samples(indices[start_idx : start_idx + batch_size])\n start_idx += batch_size\n\n def _get_samples( # type: ignore[override]\n self,\n batch_inds: np.ndarray,\n env: VecNormalize | None = None,\n ) -> DictRolloutBufferSamples:\n return DictRolloutBufferSamples(\n observations={key: self.to_torch(obs[batch_inds]) for (key, obs) in self.observations.items()},\n # Cast to float32 (backward compatible), this would lead to RuntimeError for MultiBinary space\n actions=self.to_torch(self.actions[batch_inds].astype(np.float32, copy=False)),\n old_values=self.to_torch(self.values[batch_inds].flatten()),\n old_log_prob=self.to_torch(self.log_probs[batch_inds].flatten()),\n advantages=self.to_torch(self.advantages[batch_inds].flatten()),\n returns=self.to_torch(self.returns[batch_inds].flatten()),\n )", "n_imports_parsed": 12, "n_files_resolved": 7, "n_chars_extracted": 14971}, "tests/test_utils.py::121": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["make_vec_env"], "enclosing_function": "test_vec_env_kwargs", "extracted_code": "# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 4368}, "tests/test_vec_envs.py::640": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["VecFrameStack", "VecNormalize", "make_vec_env", "os", "pytest", "warnings"], "enclosing_function": "test_render", "extracted_code": "# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n Retrieve a ``VecEnvWrapper`` object by recursively searching.\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\n\ndef unwrap_vec_normalize(env: VecEnv) -> VecNormalize | None:\n \"\"\"\n Retrieve a ``VecNormalize`` object by recursively searching.\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n\n\n :param env: The VecEnv that is going to be unwrapped\n :return: The ``VecNormalize`` object if the ``VecEnv`` is wrapped with ``VecNormalize``, None otherwise\n \"\"\"\n return unwrap_vec_wrapper(env, VecNormalize)\n\n\ndef is_vecenv_wrapped(env: VecEnv, vec_wrapper_class: type[VecEnvWrapper]) -> bool:\n \"\"\"\n Check if an environment is already wrapped in a given ``VecEnvWrapper``.\n\n :param env: The VecEnv that is going to be checked", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 7008}, "tests/test_logger.py::376": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["make_output_format", "pytest"], "enclosing_function": "test_key_length", "extracted_code": "# Source: stable_baselines3/common/logger.py\ndef make_output_format(_format: str, log_dir: str, log_suffix: str = \"\") -> KVWriter:\n \"\"\"\n return a logger for the requested format\n\n :param _format: the requested format to log to ('stdout', 'log', 'json' or 'csv' or 'tensorboard')\n :param log_dir: the logging directory\n :param log_suffix: the suffix for the log file\n :return: the logger\n \"\"\"\n os.makedirs(log_dir, exist_ok=True)\n if _format == \"stdout\":\n return HumanOutputFormat(sys.stdout)\n elif _format == \"log\":\n return HumanOutputFormat(os.path.join(log_dir, f\"log{log_suffix}.txt\"))\n elif _format == \"json\":\n return JSONOutputFormat(os.path.join(log_dir, f\"progress{log_suffix}.json\"))\n elif _format == \"csv\":\n return CSVOutputFormat(os.path.join(log_dir, f\"progress{log_suffix}.csv\"))\n elif _format == \"tensorboard\":\n return TensorBoardOutputFormat(log_dir)\n else:\n raise ValueError(f\"Unknown format specified: {_format}\")", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 1013}, "tests/test_envs.py::203": {"resolved_imports": ["stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py"], "used_names": ["FakeImageEnv", "check_env", "pytest", "spaces", "warnings"], "enclosing_function": "test_non_default_action_spaces", "extracted_code": "# Source: stable_baselines3/common/env_checker.py\ndef check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:\n \"\"\"\n Check that an environment follows Gym API.\n This is particularly useful when using a custom environment.\n Please take a look at https://gymnasium.farama.org/api/env/\n for more information about the API.\n\n It also optionally check that the environment is compatible with Stable-Baselines.\n\n :param env: The Gym environment that will be checked\n :param warn: Whether to output additional warnings\n mainly related to the interaction with Stable Baselines\n :param skip_render_check: Whether to skip the checks for the render method.\n True by default (useful for the CI)\n \"\"\"\n assert isinstance(\n env, gym.Env\n ), \"Your environment must inherit from the gymnasium.Env class cf. https://gymnasium.farama.org/api/env/\"\n\n # ============= Check the spaces (observation and action) ================\n _check_spaces(env)\n\n # Define aliases for convenience\n observation_space = env.observation_space\n action_space = env.action_space\n\n try:\n env.reset(seed=0)\n except TypeError as e:\n raise TypeError(\"The reset() method must accept a `seed` parameter\") from e\n\n # Warn the user if needed.\n # A warning means that the environment may run but not work properly with Stable Baselines algorithms\n should_skip = False\n if warn:\n should_skip = _check_unsupported_spaces(env, observation_space, action_space)\n\n obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {\"\": observation_space}\n for key, space in obs_spaces.items():\n if isinstance(space, spaces.Box):\n _check_box_obs(space, key)\n\n # Check for the action space, it may lead to hard-to-debug issues\n if isinstance(action_space, spaces.Box) and (\n np.any(np.abs(action_space.low) != np.abs(action_space.high))\n or np.any(action_space.low != -1)\n or np.any(action_space.high != 1)\n ):\n warnings.warn(\n \"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) \"\n \"cf. https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\"\n )\n\n if isinstance(action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([action_space.low, action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n if isinstance(action_space, spaces.Box) and action_space.dtype != np.dtype(np.float32):\n warnings.warn(\n f\"Your action space has dtype {action_space.dtype}, we recommend using np.float32 to avoid cast errors.\"\n )\n\n # If Sequence or Graph observation space, do not check the observation any further\n if should_skip:\n return\n\n # ============ Check the returned values ===============\n _check_returned_values(env, observation_space, action_space)\n\n # ==== Check the render method and the declared render modes ====\n if not skip_render_check:\n _check_render(env, warn) # pragma: no cover\n\n try:\n check_for_nested_spaces(env.observation_space)\n # The check doesn't support nested observations/dict actions\n # A warning about it has already been emitted\n _check_nan(env)\n except NotImplementedError:\n pass\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 4120}, "tests/test_n_step_replay.py::27": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_util.py"], "used_names": ["DQN", "NStepReplayBuffer", "SAC", "TD3", "make_vec_env", "pytest"], "enclosing_function": "test_run", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/buffers.py\nclass NStepReplayBuffer(ReplayBuffer):\n \"\"\"\n Replay buffer used for computing n-step returns in off-policy algorithms like SAC/DQN.\n\n The n-step return combines multiple steps of future rewards,\n discounted by the discount factor gamma.\n This can help improve sample efficiency and credit assignment.\n\n This implementation uses the same storage space as a normal replay buffer,\n and NumPy vectorized operations at sampling time to efficiently compute the\n n-step return, without requiring extra memory.\n\n This implementation is inspired by:\n - https://github.com/younggyoseo/FastTD3\n - https://github.com/DLR-RM/stable-baselines3/pull/81\n\n It avoids potential issues such as:\n - https://github.com/younggyoseo/FastTD3/issues/6\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Not supported\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n :param n_steps: Number of steps to accumulate rewards for n-step returns\n :param gamma: Discount factor for future rewards\n \"\"\"\n\n def __init__(self, *args, n_steps: int = 3, gamma: float = 0.99, **kwargs):\n super().__init__(*args, **kwargs)\n self.n_steps = n_steps\n self.gamma = gamma\n if self.optimize_memory_usage:\n raise NotImplementedError(\"NStepReplayBuffer doesn't support optimize_memory_usage=True\")\n\n def _get_samples(self, batch_inds: np.ndarray, env: VecNormalize | None = None) -> ReplayBufferSamples:\n \"\"\"\n Sample a batch of transitions and compute n-step returns.\n\n For each sampled transition, the method computes the cumulative discounted reward over\n the next `n_steps`, properly handling episode termination and timeouts.\n The next observation and done flag correspond to the last transition in the computed n-step trajectory.\n\n :param batch_inds: Indices of samples to retrieve\n :param env: Optional VecNormalize environment for normalizing observations/rewards\n :return: A batch of samples with n-step returns and corresponding observations/actions\n \"\"\"\n # Randomly choose env indices for each sample\n env_indices = np.random.randint(0, self.n_envs, size=batch_inds.shape)\n\n # Note: the self.pos index is dangerous (will overlap two different episodes when buffer is full)\n # so we set self.pos-1 to truncated=True (temporarily) if done=False and truncated=False\n last_valid_index = self.pos - 1\n original_timeout_values = self.timeouts[last_valid_index].copy()\n self.timeouts[last_valid_index] = np.logical_or(original_timeout_values, np.logical_not(self.dones[last_valid_index]))\n\n # Compute n-step indices with wrap-around\n steps = np.arange(self.n_steps).reshape(1, -1) # shape: [1, n_steps]\n indices = (batch_inds[:, None] + steps) % self.buffer_size # shape: [batch, n_steps]\n\n # Retrieve sequences of transitions\n rewards_seq = self._normalize_reward(self.rewards[indices, env_indices[:, None]], env) # [batch, n_steps]\n dones_seq = self.dones[indices, env_indices[:, None]] # [batch, n_steps]\n truncated_seq = self.timeouts[indices, env_indices[:, None]] # [batch, n_steps]\n\n # Compute masks: 1 until first done/truncation (inclusive)\n done_or_truncated = np.logical_or(dones_seq, truncated_seq)\n done_idx = done_or_truncated.argmax(axis=1)\n # If no done/truncation, keep full sequence\n has_done_or_truncated = done_or_truncated.any(axis=1)\n done_idx = np.where(has_done_or_truncated, done_idx, self.n_steps - 1)\n\n mask = np.arange(self.n_steps).reshape(1, -1) <= done_idx[:, None] # shape: [batch, n_steps]\n # Compute discount factors for bootstrapping (using target Q-Value)\n # It is gamma ** n_steps by default but should be adjusted in case of early termination/truncation.\n target_q_discounts = self.gamma ** mask.sum(axis=1, keepdims=True).astype(np.float32) # [batch, 1]\n\n # Apply discount\n discounts = self.gamma ** np.arange(self.n_steps, dtype=np.float32).reshape(1, -1) # [1, n_steps]\n discounted_rewards = rewards_seq * discounts * mask\n n_step_returns = discounted_rewards.sum(axis=1, keepdims=True) # [batch, 1]\n\n # Compute indices of next_obs/done at the final point of the n-step transition\n last_indices = (batch_inds + done_idx) % self.buffer_size\n next_obs = self._normalize_obs(self.next_observations[last_indices, env_indices], env)\n next_dones = self.dones[last_indices, env_indices][:, None].astype(np.float32)\n next_timeouts = self.timeouts[last_indices, env_indices][:, None].astype(np.float32)\n final_dones = next_dones * (1.0 - next_timeouts)\n\n # Revert back tmp changes to avoid sampling across episodes\n self.timeouts[last_valid_index] = original_timeout_values\n\n # Gather observations and actions\n obs = self._normalize_obs(self.observations[batch_inds, env_indices], env)\n actions = self.actions[batch_inds, env_indices]\n\n return ReplayBufferSamples(\n observations=self.to_torch(obs), # type: ignore[arg-type]\n actions=self.to_torch(actions),\n next_observations=self.to_torch(next_obs), # type: ignore[arg-type]\n dones=self.to_torch(final_dones),\n rewards=self.to_torch(n_step_returns),\n discounts=self.to_torch(target_q_discounts),\n )\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 11747}, "tests/test_vec_check_nan.py::45": {"resolved_imports": ["stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecCheckNan", "pytest"], "enclosing_function": "test_check_nan", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",", "n_imports_parsed": 5, "n_files_resolved": 1, "n_chars_extracted": 2176}, "tests/test_logger.py::212": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["make_output_format", "pytest"], "enclosing_function": "test_make_output_fail", "extracted_code": "# Source: stable_baselines3/common/logger.py\ndef make_output_format(_format: str, log_dir: str, log_suffix: str = \"\") -> KVWriter:\n \"\"\"\n return a logger for the requested format\n\n :param _format: the requested format to log to ('stdout', 'log', 'json' or 'csv' or 'tensorboard')\n :param log_dir: the logging directory\n :param log_suffix: the suffix for the log file\n :return: the logger\n \"\"\"\n os.makedirs(log_dir, exist_ok=True)\n if _format == \"stdout\":\n return HumanOutputFormat(sys.stdout)\n elif _format == \"log\":\n return HumanOutputFormat(os.path.join(log_dir, f\"log{log_suffix}.txt\"))\n elif _format == \"json\":\n return JSONOutputFormat(os.path.join(log_dir, f\"progress{log_suffix}.json\"))\n elif _format == \"csv\":\n return CSVOutputFormat(os.path.join(log_dir, f\"progress{log_suffix}.csv\"))\n elif _format == \"tensorboard\":\n return TensorBoardOutputFormat(log_dir)\n else:\n raise ValueError(f\"Unknown format specified: {_format}\")", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 1013}, "tests/test_her.py::271": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "HerReplayBuffer", "SAC", "deepcopy", "make_vec_env", "pathlib", "pytest", "warnings"], "enclosing_function": "test_save_load_replay_buffer", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 6318}, "tests/test_callbacks.py::161": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "SAC", "pytest"], "enclosing_function": "test_callbacks_can_cancel_runs", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 1049}, "tests/test_callbacks.py::106": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "CallbackList", "CheckpointCallback", "DDPG", "DQN", "EvalCallback", "EveryNTimesteps", "LogEveryNTimesteps", "PPO", "SAC", "StopTrainingOnMaxEpisodes", "StopTrainingOnNoModelImprovement", "StopTrainingOnRewardThreshold", "TD3", "make_vec_env", "os", "pytest", "shutil"], "enclosing_function": "test_callbacks", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n# Source: stable_baselines3/common/callbacks.py\nclass CallbackList(BaseCallback):\n \"\"\"\n Class for chaining callbacks.\n\n :param callbacks: A list of callbacks that will be called\n sequentially.\n \"\"\"\n\n def __init__(self, callbacks: list[BaseCallback]):\n super().__init__()\n assert isinstance(callbacks, list)\n self.callbacks = callbacks\n\n def _init_callback(self) -> None:\n for callback in self.callbacks:\n callback.init_callback(self.model)\n\n # Fix for https://github.com/DLR-RM/stable-baselines3/issues/1791\n # pass through the parent callback to all children\n callback.parent = self.parent\n\n def _on_training_start(self) -> None:\n for callback in self.callbacks:\n callback.on_training_start(self.locals, self.globals)\n\n def _on_rollout_start(self) -> None:\n for callback in self.callbacks:\n callback.on_rollout_start()\n\n def _on_step(self) -> bool:\n continue_training = True\n for callback in self.callbacks:\n # Return False (stop training) if at least one callback returns False\n continue_training = callback.on_step() and continue_training\n return continue_training\n\n def _on_rollout_end(self) -> None:\n for callback in self.callbacks:\n callback.on_rollout_end()\n\n def _on_training_end(self) -> None:\n for callback in self.callbacks:\n callback.on_training_end()\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n for callback in self.callbacks:\n callback.update_locals(locals_)\n\nclass CheckpointCallback(BaseCallback):\n \"\"\"\n Callback for saving a model every ``save_freq`` calls\n to ``env.step()``.\n By default, it only saves model checkpoints,\n you need to pass ``save_replay_buffer=True``,\n and ``save_vecnormalize=True`` to also save replay buffer checkpoints\n and normalization statistics checkpoints.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``save_freq = max(save_freq // n_envs, 1)``\n\n :param save_freq: Save checkpoints every ``save_freq`` call of the callback.\n :param save_path: Path to the folder where the model will be saved.\n :param name_prefix: Common prefix to the saved models\n :param save_replay_buffer: Save the model replay buffer\n :param save_vecnormalize: Save the ``VecNormalize`` statistics\n :param verbose: Verbosity level: 0 for no output, 2 for indicating when saving model checkpoint\n \"\"\"\n\n def __init__(\n self,\n save_freq: int,\n save_path: str,\n name_prefix: str = \"rl_model\",\n save_replay_buffer: bool = False,\n save_vecnormalize: bool = False,\n verbose: int = 0,\n ):\n super().__init__(verbose)\n self.save_freq = save_freq\n self.save_path = save_path\n self.name_prefix = name_prefix\n self.save_replay_buffer = save_replay_buffer\n self.save_vecnormalize = save_vecnormalize\n\n def _init_callback(self) -> None:\n # Create folder if needed\n if self.save_path is not None:\n os.makedirs(self.save_path, exist_ok=True)\n\n def _checkpoint_path(self, checkpoint_type: str = \"\", extension: str = \"\") -> str:\n \"\"\"\n Helper to get checkpoint path for each type of checkpoint.\n\n :param checkpoint_type: empty for the model, \"replay_buffer_\"\n or \"vecnormalize_\" for the other checkpoints.\n :param extension: Checkpoint file extension (zip for model, pkl for others)\n :return: Path to the checkpoint\n \"\"\"\n return os.path.join(self.save_path, f\"{self.name_prefix}_{checkpoint_type}{self.num_timesteps}_steps.{extension}\")\n\n def _on_step(self) -> bool:\n if self.n_calls % self.save_freq == 0:\n model_path = self._checkpoint_path(extension=\"zip\")\n self.model.save(model_path)\n if self.verbose >= 2:\n print(f\"Saving model checkpoint to {model_path}\")\n\n if self.save_replay_buffer and hasattr(self.model, \"replay_buffer\") and self.model.replay_buffer is not None:\n # If model has a replay buffer, save it too\n replay_buffer_path = self._checkpoint_path(\"replay_buffer_\", extension=\"pkl\")\n self.model.save_replay_buffer(replay_buffer_path) # type: ignore[attr-defined]\n if self.verbose > 1:\n print(f\"Saving model replay buffer checkpoint to {replay_buffer_path}\")\n\n if self.save_vecnormalize and self.model.get_vec_normalize_env() is not None:\n # Save the VecNormalize statistics\n vec_normalize_path = self._checkpoint_path(\"vecnormalize_\", extension=\"pkl\")\n self.model.get_vec_normalize_env().save(vec_normalize_path) # type: ignore[union-attr]\n if self.verbose >= 2:\n print(f\"Saving model VecNormalize to {vec_normalize_path}\")\n\n return True\n\nclass EvalCallback(EventCallback):\n \"\"\"\n Callback for evaluating an agent.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger\n when there is a new best model according to the ``mean_reward``\n :param callback_after_eval: Callback to trigger after every evaluation\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``)\n will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model\n according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about evaluation results\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been\n wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env | VecEnv,\n callback_on_new_best: BaseCallback | None = None,\n callback_after_eval: BaseCallback | None = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: str | None = None,\n best_model_save_path: str | None = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super().__init__(callback_after_eval, verbose=verbose)\n\n self.callback_on_new_best = callback_on_new_best\n if self.callback_on_new_best is not None:\n # Give access to the parent\n self.callback_on_new_best.parent = self\n\n self.n_eval_episodes = n_eval_episodes\n self.eval_freq = eval_freq\n self.best_mean_reward = -np.inf\n self.last_mean_reward = -np.inf\n self.deterministic = deterministic\n self.render = render\n self.warn = warn\n\n # Convert to VecEnv for consistency\n if not isinstance(eval_env, VecEnv):\n eval_env = DummyVecEnv([lambda: eval_env]) # type: ignore[list-item, return-value]\n\n self.eval_env = eval_env\n self.best_model_save_path = best_model_save_path\n # Logs will be written in ``evaluations.npz``\n if log_path is not None:\n log_path = os.path.join(log_path, \"evaluations\")\n self.log_path = log_path\n self.evaluations_results: list[list[float]] = []\n self.evaluations_timesteps: list[int] = []\n self.evaluations_length: list[list[int]] = []\n # For computing success rate\n self._is_success_buffer: list[bool] = []\n self.evaluations_successes: list[list[bool]] = []\n\n def _init_callback(self) -> None:\n # Does not work in some corner cases, where the wrapper is not the same\n if not isinstance(self.training_env, type(self.eval_env)):\n warnings.warn(\"Training and eval env are not of the same type\" f\"{self.training_env} != {self.eval_env}\")\n\n # Create folders if needed\n if self.best_model_save_path is not None:\n os.makedirs(self.best_model_save_path, exist_ok=True)\n if self.log_path is not None:\n os.makedirs(os.path.dirname(self.log_path), exist_ok=True)\n\n # Init callback called on new best model\n if self.callback_on_new_best is not None:\n self.callback_on_new_best.init_callback(self.model)\n\n def _log_success_callback(self, locals_: dict[str, Any], globals_: dict[str, Any]) -> None:\n \"\"\"\n Callback passed to the ``evaluate_policy`` function\n in order to log the success rate (when applicable),\n for instance when using HER.\n\n :param locals_:\n :param globals_:\n \"\"\"\n info = locals_[\"info\"]\n\n if locals_[\"done\"]:\n maybe_is_success = info.get(\"is_success\")\n if maybe_is_success is not None:\n self._is_success_buffer.append(maybe_is_success)\n\n def _on_step(self) -> bool:\n continue_training = True\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n if self.model.get_vec_normalize_env() is not None:\n try:\n sync_envs_normalization(self.training_env, self.eval_env)\n except AttributeError as e:\n raise AssertionError(\n \"Training and eval env are not wrapped the same way, \"\n \"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback \"\n \"and warning above.\"\n ) from e\n\n # Reset success rate buffer\n self._is_success_buffer = []\n\n episode_rewards, episode_lengths = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n return_episode_rewards=True,\n warn=self.warn,\n callback=self._log_success_callback,\n )\n\n if self.log_path is not None:\n assert isinstance(episode_rewards, list)\n assert isinstance(episode_lengths, list)\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(episode_rewards)\n self.evaluations_length.append(episode_lengths)\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n **kwargs, # type: ignore[arg-type]\n )\n\n mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)\n mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)\n self.last_mean_reward = float(mean_reward)\n\n if self.verbose >= 1:\n print(f\"Eval num_timesteps={self.num_timesteps}, \" f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n self.logger.record(\"eval/mean_reward\", float(mean_reward))\n self.logger.record(\"eval/mean_ep_length\", mean_ep_length)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose >= 1:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n # Dump log so the evaluation results are printed with the correct timestep\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(self.num_timesteps)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose >= 1:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = float(mean_reward)\n # Trigger callback on new best model, if needed\n if self.callback_on_new_best is not None:\n continue_training = self.callback_on_new_best.on_step()\n\n # Trigger callback after every evaluation, if needed\n if self.callback is not None:\n continue_training = continue_training and self._on_event()\n\n return continue_training\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n if self.callback:\n self.callback.update_locals(locals_)\n\nclass StopTrainingOnRewardThreshold(BaseCallback):\n \"\"\"\n Stop the training once a threshold in episodic reward\n has been reached (i.e. when the model is good enough).\n\n It must be used with the ``EvalCallback``.\n\n :param reward_threshold: Minimum expected reward per episode\n to stop training.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating when training ended because episodic reward\n threshold reached\n \"\"\"\n\n parent: EvalCallback\n\n def __init__(self, reward_threshold: float, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.reward_threshold = reward_threshold\n\n def _on_step(self) -> bool:\n assert self.parent is not None, \"``StopTrainingOnMinimumReward`` callback must be used with an ``EvalCallback``\"\n continue_training = bool(self.parent.best_mean_reward < self.reward_threshold)\n if self.verbose >= 1 and not continue_training:\n print(\n f\"Stopping training because the mean reward {self.parent.best_mean_reward:.2f} \"\n f\"is above the threshold {self.reward_threshold}\"\n )\n return continue_training\n\nclass EveryNTimesteps(EventCallback):\n \"\"\"\n Trigger a callback every ``n_steps`` timesteps\n\n :param n_steps: Number of timesteps between two trigger.\n :param callback: Callback that will be called\n when the event is triggered.\n \"\"\"\n\n def __init__(self, n_steps: int, callback: BaseCallback):\n super().__init__(callback)\n self.n_steps = n_steps\n self.last_time_trigger = 0\n\n def _on_step(self) -> bool:\n if (self.num_timesteps - self.last_time_trigger) >= self.n_steps:\n self.last_time_trigger = self.num_timesteps\n return self._on_event()\n return True\n\nclass LogEveryNTimesteps(EveryNTimesteps):\n \"\"\"\n Log data every ``n_steps`` timesteps\n\n :param n_steps: Number of timesteps between two trigger.\n \"\"\"\n\n def __init__(self, n_steps: int):\n super().__init__(n_steps, callback=ConvertCallback(self._log_data))\n\n def _log_data(self, _locals: dict[str, Any], _globals: dict[str, Any]) -> bool:\n self.model.dump_logs()\n return True\n\nclass StopTrainingOnMaxEpisodes(BaseCallback):\n \"\"\"\n Stop the training once a maximum number of episodes are played.\n\n For multiple environments presumes that, the desired behavior is that the agent trains on each env for ``max_episodes``\n and in total for ``max_episodes * n_envs`` episodes.\n\n :param max_episodes: Maximum number of episodes to stop training.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about when training ended by\n reaching ``max_episodes``\n \"\"\"\n\n def __init__(self, max_episodes: int, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.max_episodes = max_episodes\n self._total_max_episodes = max_episodes\n self.n_episodes = 0\n\n def _init_callback(self) -> None:\n # At start set total max according to number of environments\n self._total_max_episodes = self.max_episodes * self.training_env.num_envs\n\n def _on_step(self) -> bool:\n # Check that the `dones` local variable is defined\n assert \"dones\" in self.locals, \"`dones` variable is not defined, please check your code next to `callback.on_step()`\"\n self.n_episodes += np.sum(self.locals[\"dones\"]).item()\n\n continue_training = self.n_episodes < self._total_max_episodes\n\n if self.verbose >= 1 and not continue_training:\n mean_episodes_per_env = self.n_episodes / self.training_env.num_envs\n mean_ep_str = (\n f\"with an average of {mean_episodes_per_env:.2f} episodes per env\" if self.training_env.num_envs > 1 else \"\"\n )\n\n print(\n f\"Stopping training with a total of {self.num_timesteps} steps because the \"\n f\"{self.locals.get('tb_log_name')} model reached max_episodes={self.max_episodes}, \"\n f\"by playing for {self.n_episodes} episodes \"\n f\"{mean_ep_str}\"\n )\n return continue_training\n\nclass StopTrainingOnNoModelImprovement(BaseCallback):\n \"\"\"\n Stop the training early if there is no new best model (new best mean reward) after more than N consecutive evaluations.\n\n It is possible to define a minimum number of evaluations before start to count evaluations without improvement.\n\n It must be used with the ``EvalCallback``.\n\n :param max_no_improvement_evals: Maximum number of consecutive evaluations without a new best model.\n :param min_evals: Number of evaluations before start to count evaluations without improvements.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating when training ended because no new best model\n \"\"\"\n\n parent: EvalCallback\n\n def __init__(self, max_no_improvement_evals: int, min_evals: int = 0, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.max_no_improvement_evals = max_no_improvement_evals\n self.min_evals = min_evals\n self.last_best_mean_reward = -np.inf\n self.no_improvement_evals = 0\n\n def _on_step(self) -> bool:\n assert self.parent is not None, \"``StopTrainingOnNoModelImprovement`` callback must be used with an ``EvalCallback``\"\n\n continue_training = True\n\n if self.n_calls > self.min_evals:\n if self.parent.best_mean_reward > self.last_best_mean_reward:\n self.no_improvement_evals = 0\n else:\n self.no_improvement_evals += 1\n if self.no_improvement_evals > self.max_no_improvement_evals:\n continue_training = False\n\n self.last_best_mean_reward = self.parent.best_mean_reward\n\n if self.verbose >= 1 and not continue_training:\n print(\n f\"Stopping training because there was no new best model in the last {self.no_improvement_evals:d} evaluations\"\n )\n\n return continue_training\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 26817}, "tests/test_monitor.py::44": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["Monitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_monitor", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 5236}, "tests/test_vec_monitor.py::47": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecMonitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_vec_monitor", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 2041}, "tests/test_callbacks.py::183": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["BitFlippingEnv", "DQN", "DummyVecEnv", "EvalCallback", "HerReplayBuffer"], "enclosing_function": "test_eval_success_logging", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n \"Since Stable Baselines 2.1.0, `HER` is now a replay buffer class `HerReplayBuffer`.\\n \"\n \"Please check the documentation for more information: https://stable-baselines3.readthedocs.io/\"\n )\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/callbacks.py\nclass EvalCallback(EventCallback):\n \"\"\"\n Callback for evaluating an agent.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger\n when there is a new best model according to the ``mean_reward``\n :param callback_after_eval: Callback to trigger after every evaluation\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``)\n will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model\n according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about evaluation results\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been\n wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env | VecEnv,\n callback_on_new_best: BaseCallback | None = None,\n callback_after_eval: BaseCallback | None = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: str | None = None,\n best_model_save_path: str | None = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super().__init__(callback_after_eval, verbose=verbose)\n\n self.callback_on_new_best = callback_on_new_best\n if self.callback_on_new_best is not None:\n # Give access to the parent\n self.callback_on_new_best.parent = self\n\n self.n_eval_episodes = n_eval_episodes\n self.eval_freq = eval_freq\n self.best_mean_reward = -np.inf\n self.last_mean_reward = -np.inf\n self.deterministic = deterministic\n self.render = render\n self.warn = warn\n\n # Convert to VecEnv for consistency\n if not isinstance(eval_env, VecEnv):\n eval_env = DummyVecEnv([lambda: eval_env]) # type: ignore[list-item, return-value]\n\n self.eval_env = eval_env\n self.best_model_save_path = best_model_save_path\n # Logs will be written in ``evaluations.npz``\n if log_path is not None:\n log_path = os.path.join(log_path, \"evaluations\")\n self.log_path = log_path\n self.evaluations_results: list[list[float]] = []\n self.evaluations_timesteps: list[int] = []\n self.evaluations_length: list[list[int]] = []\n # For computing success rate\n self._is_success_buffer: list[bool] = []\n self.evaluations_successes: list[list[bool]] = []\n\n def _init_callback(self) -> None:\n # Does not work in some corner cases, where the wrapper is not the same\n if not isinstance(self.training_env, type(self.eval_env)):\n warnings.warn(\"Training and eval env are not of the same type\" f\"{self.training_env} != {self.eval_env}\")\n\n # Create folders if needed\n if self.best_model_save_path is not None:\n os.makedirs(self.best_model_save_path, exist_ok=True)\n if self.log_path is not None:\n os.makedirs(os.path.dirname(self.log_path), exist_ok=True)\n\n # Init callback called on new best model\n if self.callback_on_new_best is not None:\n self.callback_on_new_best.init_callback(self.model)\n\n def _log_success_callback(self, locals_: dict[str, Any], globals_: dict[str, Any]) -> None:\n \"\"\"\n Callback passed to the ``evaluate_policy`` function\n in order to log the success rate (when applicable),\n for instance when using HER.\n\n :param locals_:\n :param globals_:\n \"\"\"\n info = locals_[\"info\"]\n\n if locals_[\"done\"]:\n maybe_is_success = info.get(\"is_success\")\n if maybe_is_success is not None:\n self._is_success_buffer.append(maybe_is_success)\n\n def _on_step(self) -> bool:\n continue_training = True\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n if self.model.get_vec_normalize_env() is not None:\n try:\n sync_envs_normalization(self.training_env, self.eval_env)\n except AttributeError as e:\n raise AssertionError(\n \"Training and eval env are not wrapped the same way, \"\n \"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback \"\n \"and warning above.\"\n ) from e\n\n # Reset success rate buffer\n self._is_success_buffer = []\n\n episode_rewards, episode_lengths = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n return_episode_rewards=True,\n warn=self.warn,\n callback=self._log_success_callback,\n )\n\n if self.log_path is not None:\n assert isinstance(episode_rewards, list)\n assert isinstance(episode_lengths, list)\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(episode_rewards)\n self.evaluations_length.append(episode_lengths)\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n **kwargs, # type: ignore[arg-type]\n )\n\n mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)\n mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)\n self.last_mean_reward = float(mean_reward)\n\n if self.verbose >= 1:\n print(f\"Eval num_timesteps={self.num_timesteps}, \" f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n self.logger.record(\"eval/mean_reward\", float(mean_reward))\n self.logger.record(\"eval/mean_ep_length\", mean_ep_length)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose >= 1:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n # Dump log so the evaluation results are printed with the correct timestep\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(self.num_timesteps)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose >= 1:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = float(mean_reward)\n # Trigger callback on new best model, if needed\n if self.callback_on_new_best is not None:\n continue_training = self.callback_on_new_best.on_step()\n\n # Trigger callback after every evaluation, if needed\n if self.callback is not None:\n continue_training = continue_training and self._on_event()\n\n return continue_training\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n if self.callback:\n self.callback.update_locals(locals_)\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 12142}, "tests/test_predict.py::126": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["PPO", "pytest"], "enclosing_function": "test_mixing_gym_vecenv_api", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 10, "n_files_resolved": 5, "n_chars_extracted": 549}, "tests/test_logger.py::625": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["Monitor", "PPO"], "enclosing_function": "test_rollout_success_rate_onpolicy_algo", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 5788}, "tests/test_env_checker.py::142": {"resolved_imports": ["stable_baselines3/common/env_checker.py"], "used_names": ["check_env", "pytest", "spaces"], "enclosing_function": "test_check_env_detailed_error", "extracted_code": "# Source: stable_baselines3/common/env_checker.py\ndef check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:\n \"\"\"\n Check that an environment follows Gym API.\n This is particularly useful when using a custom environment.\n Please take a look at https://gymnasium.farama.org/api/env/\n for more information about the API.\n\n It also optionally check that the environment is compatible with Stable-Baselines.\n\n :param env: The Gym environment that will be checked\n :param warn: Whether to output additional warnings\n mainly related to the interaction with Stable Baselines\n :param skip_render_check: Whether to skip the checks for the render method.\n True by default (useful for the CI)\n \"\"\"\n assert isinstance(\n env, gym.Env\n ), \"Your environment must inherit from the gymnasium.Env class cf. https://gymnasium.farama.org/api/env/\"\n\n # ============= Check the spaces (observation and action) ================\n _check_spaces(env)\n\n # Define aliases for convenience\n observation_space = env.observation_space\n action_space = env.action_space\n\n try:\n env.reset(seed=0)\n except TypeError as e:\n raise TypeError(\"The reset() method must accept a `seed` parameter\") from e\n\n # Warn the user if needed.\n # A warning means that the environment may run but not work properly with Stable Baselines algorithms\n should_skip = False\n if warn:\n should_skip = _check_unsupported_spaces(env, observation_space, action_space)\n\n obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {\"\": observation_space}\n for key, space in obs_spaces.items():\n if isinstance(space, spaces.Box):\n _check_box_obs(space, key)\n\n # Check for the action space, it may lead to hard-to-debug issues\n if isinstance(action_space, spaces.Box) and (\n np.any(np.abs(action_space.low) != np.abs(action_space.high))\n or np.any(action_space.low != -1)\n or np.any(action_space.high != 1)\n ):\n warnings.warn(\n \"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) \"\n \"cf. https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\"\n )\n\n if isinstance(action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([action_space.low, action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n if isinstance(action_space, spaces.Box) and action_space.dtype != np.dtype(np.float32):\n warnings.warn(\n f\"Your action space has dtype {action_space.dtype}, we recommend using np.float32 to avoid cast errors.\"\n )\n\n # If Sequence or Graph observation space, do not check the observation any further\n if should_skip:\n return\n\n # ============ Check the returned values ===============\n _check_returned_values(env, observation_space, action_space)\n\n # ==== Check the render method and the declared render modes ====\n if not skip_render_check:\n _check_render(env, warn) # pragma: no cover\n\n try:\n check_for_nested_spaces(env.observation_space)\n # The check doesn't support nested observations/dict actions\n # A warning about it has already been emitted\n _check_nan(env)\n except NotImplementedError:\n pass", "n_imports_parsed": 6, "n_files_resolved": 1, "n_chars_extracted": 3498}, "tests/test_distributions.py::79": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/distributions.py", "stable_baselines3/common/utils.py"], "used_names": [], "enclosing_function": "test_get_distribution", "extracted_code": "", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/test_vec_monitor.py::77": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["BitFlippingEnv", "DummyVecEnv", "VecMonitor", "csv", "os", "uuid"], "enclosing_function": "test_vec_monitor_info_keywords", "extracted_code": "# Source: stable_baselines3/common/envs/bit_flipping_env.py\nclass BitFlippingEnv(Env):\n \"\"\"\n Simple bit flipping env, useful to test HER.\n The goal is to flip all the bits to get a vector of ones.\n In the continuous variant, if the ith action component has a value > 0,\n then the ith bit will be flipped. Uses a ``MultiBinary`` observation space\n by default.\n\n :param n_bits: Number of bits to flip\n :param continuous: Whether to use the continuous actions version or not,\n by default, it uses the discrete one\n :param max_steps: Max number of steps, by default, equal to n_bits\n :param discrete_obs_space: Whether to use the discrete observation\n version or not, ie a one-hot encoding of all possible states\n :param image_obs_space: Whether to use an image observation version\n or not, ie a greyscale image of the state\n :param channel_first: Whether to use channel-first or last image.\n \"\"\"\n\n spec = EnvSpec(\"BitFlippingEnv-v0\", \"no-entry-point\")\n state: np.ndarray\n\n def __init__(\n self,\n n_bits: int = 10,\n continuous: bool = False,\n max_steps: int | None = None,\n discrete_obs_space: bool = False,\n image_obs_space: bool = False,\n channel_first: bool = True,\n render_mode: str = \"human\",\n ):\n super().__init__()\n self.render_mode = render_mode\n # Shape of the observation when using image space\n self.image_shape = (1, 36, 36) if channel_first else (36, 36, 1)\n # The achieved goal is determined by the current state\n # here, it is a special where they are equal\n\n # observation space for observations given to the model\n self.observation_space = self._make_observation_space(discrete_obs_space, image_obs_space, n_bits)\n # observation space used to update internal state\n self._obs_space = spaces.MultiBinary(n_bits)\n\n if continuous:\n self.action_space = spaces.Box(-1, 1, shape=(n_bits,), dtype=np.float32)\n else:\n self.action_space = spaces.Discrete(n_bits)\n self.continuous = continuous\n self.discrete_obs_space = discrete_obs_space\n self.image_obs_space = image_obs_space\n self.desired_goal = np.ones((n_bits,), dtype=self.observation_space[\"desired_goal\"].dtype)\n if max_steps is None:\n max_steps = n_bits\n self.max_steps = max_steps\n self.current_step = 0\n\n def seed(self, seed: int) -> None:\n self._obs_space.seed(seed)\n\n def convert_if_needed(self, state: np.ndarray) -> int | np.ndarray:\n \"\"\"\n Convert to discrete space if needed.\n\n :param state:\n :return:\n \"\"\"\n\n if self.discrete_obs_space:\n # Convert from int8 to int32 for NumPy 2.0\n state = state.astype(np.int32)\n # The internal state is the binary representation of the\n # observed one\n return int(sum(state[i] * 2**i for i in range(len(state))))\n\n if self.image_obs_space:\n size = np.prod(self.image_shape)\n image = np.concatenate((state.astype(np.uint8) * 255, np.zeros(size - len(state), dtype=np.uint8)))\n return image.reshape(self.image_shape).astype(np.uint8)\n return state\n\n def convert_to_bit_vector(self, state: int | np.ndarray, batch_size: int) -> np.ndarray:\n \"\"\"\n Convert to bit vector if needed.\n\n :param state: The state to be converted, which can be either an integer or a numpy array.\n :param batch_size: The batch size.\n :return: The state converted into a bit vector.\n \"\"\"\n # Convert back to bit vector\n if isinstance(state, int):\n bit_vector = np.array(state).reshape(batch_size, -1)\n # Convert to binary representation\n bit_vector = ((bit_vector[:, :] & (1 << np.arange(len(self.state)))) > 0).astype(int)\n elif self.image_obs_space:\n bit_vector = state.reshape(batch_size, -1)[:, : len(self.state)] / 255 # type: ignore[assignment]\n else:\n bit_vector = np.array(state).reshape(batch_size, -1)\n return bit_vector\n\n def _make_observation_space(self, discrete_obs_space: bool, image_obs_space: bool, n_bits: int) -> spaces.Dict:\n \"\"\"\n Helper to create observation space\n\n :param discrete_obs_space: Whether to use the discrete observation version\n :param image_obs_space: Whether to use the image observation version\n :param n_bits: The number of bits used to represent the state\n :return: the environment observation space\n \"\"\"\n if discrete_obs_space and image_obs_space:\n raise ValueError(\"Cannot use both discrete and image observation spaces\")\n\n if discrete_obs_space:\n # In the discrete case, the agent act on the binary\n # representation of the observation\n return spaces.Dict(\n {\n \"observation\": spaces.Discrete(2**n_bits),\n \"achieved_goal\": spaces.Discrete(2**n_bits),\n \"desired_goal\": spaces.Discrete(2**n_bits),\n }\n )\n\n if image_obs_space:\n # When using image as input,\n # one image contains the bits 0 -> 0, 1 -> 255\n # and the rest is filled with zeros\n return spaces.Dict(\n {\n \"observation\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n \"achieved_goal\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n \"desired_goal\": spaces.Box(\n low=0,\n high=255,\n shape=self.image_shape,\n dtype=np.uint8,\n ),\n }\n )\n\n return spaces.Dict(\n {\n \"observation\": spaces.MultiBinary(n_bits),\n \"achieved_goal\": spaces.MultiBinary(n_bits),\n \"desired_goal\": spaces.MultiBinary(n_bits),\n }\n )\n\n def _get_obs(self) -> dict[str, int | np.ndarray]:\n \"\"\"\n Helper to create the observation.\n\n :return: The current observation.\n \"\"\"\n return OrderedDict(\n [\n (\"observation\", self.convert_if_needed(self.state.copy())),\n (\"achieved_goal\", self.convert_if_needed(self.state.copy())),\n (\"desired_goal\", self.convert_if_needed(self.desired_goal.copy())),\n ]\n )\n\n def reset(self, *, seed: int | None = None, options: dict | None = None) -> tuple[dict[str, int | np.ndarray], dict]:\n if seed is not None:\n self._obs_space.seed(seed)\n self.current_step = 0\n self.state = self._obs_space.sample()\n return self._get_obs(), {}\n\n def step(self, action: np.ndarray | int) -> GymStepReturn:\n \"\"\"\n Step into the env.\n\n :param action:\n :return:\n \"\"\"\n if self.continuous:\n self.state[action > 0] = 1 - self.state[action > 0]\n else:\n self.state[action] = 1 - self.state[action]\n obs = self._get_obs()\n reward = float(self.compute_reward(obs[\"achieved_goal\"], obs[\"desired_goal\"], None).item())\n terminated = reward == 0\n self.current_step += 1\n # Episode terminate when we reached the goal or the max number of steps\n info = {\"is_success\": terminated}\n truncated = self.current_step >= self.max_steps\n return obs, reward, terminated, truncated, info\n\n def compute_reward(\n self, achieved_goal: int | np.ndarray, desired_goal: int | np.ndarray, _info: dict[str, Any] | None\n ) -> np.float32:\n # As we are using a vectorized version, we need to keep track of the `batch_size`\n if isinstance(achieved_goal, int):\n batch_size = 1\n elif self.image_obs_space:\n batch_size = achieved_goal.shape[0] if len(achieved_goal.shape) > 3 else 1\n else:\n batch_size = achieved_goal.shape[0] if len(achieved_goal.shape) > 1 else 1\n\n desired_goal = self.convert_to_bit_vector(desired_goal, batch_size)\n achieved_goal = self.convert_to_bit_vector(achieved_goal, batch_size)\n\n # Deceptive reward: it is positive only when the goal is achieved\n # Here we are using a vectorized version\n distance = np.linalg.norm(achieved_goal - desired_goal, axis=-1)\n return -(distance > 0).astype(np.float32)\n\n def render(self) -> np.ndarray | None: # type: ignore[override]\n if self.render_mode == \"rgb_array\":\n return self.state.copy()\n print(self.state)\n return None\n\n def close(self) -> None:\n pass\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 11130}, "tests/test_n_step_replay.py::105": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_util.py"], "used_names": ["pytest"], "enclosing_function": "test_nstep_early_termination", "extracted_code": "", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/test_her.py::196": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/vec_env/__init__.py", "stable_baselines3/her/goal_selection_strategy.py"], "used_names": ["BitFlippingEnv", "DDPG", "DQN", "HerReplayBuffer", "SAC", "TD3", "deepcopy", "make_vec_env", "os", "pytest"], "enclosing_function": "test_save_load", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env\n\n\n# Source: stable_baselines3/common/envs/__init__.py\nfrom stable_baselines3.common.envs.bit_flipping_env import BitFlippingEnv\nfrom stable_baselines3.common.envs.identity_env import (\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 15, "n_files_resolved": 8, "n_chars_extracted": 7184}, "tests/test_n_step_replay.py::158": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_util.py"], "used_names": ["pytest"], "enclosing_function": "test_nstep_no_terminations", "extracted_code": "", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 0}, "tests/test_distributions.py::35": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/distributions.py", "stable_baselines3/common/utils.py"], "used_names": ["TanhBijector"], "enclosing_function": "test_bijector", "extracted_code": "# Source: stable_baselines3/common/distributions.py\nclass TanhBijector:\n \"\"\"\n Bijective transformation of a probability distribution\n using a squashing function (tanh)\n\n :param epsilon: small value to avoid NaN due to numerical imprecision.\n \"\"\"\n\n def __init__(self, epsilon: float = 1e-6):\n super().__init__()\n self.epsilon = epsilon\n\n @staticmethod\n def forward(x: th.Tensor) -> th.Tensor:\n return th.tanh(x)\n\n @staticmethod\n def atanh(x: th.Tensor) -> th.Tensor:\n \"\"\"\n Inverse of Tanh\n\n Taken from Pyro: https://github.com/pyro-ppl/pyro\n 0.5 * torch.log((1 + x ) / (1 - x))\n \"\"\"\n return 0.5 * (x.log1p() - (-x).log1p())\n\n @staticmethod\n def inverse(y: th.Tensor) -> th.Tensor:\n \"\"\"\n Inverse tanh.\n\n :param y:\n :return:\n \"\"\"\n eps = th.finfo(y.dtype).eps\n # Clip the action to avoid NaN\n return TanhBijector.atanh(y.clamp(min=-1.0 + eps, max=1.0 - eps))\n\n def log_prob_correction(self, x: th.Tensor) -> th.Tensor:\n # Squash correction (from original SAC implementation)\n return th.log(1.0 - th.tanh(x) ** 2 + self.epsilon)", "n_imports_parsed": 8, "n_files_resolved": 3, "n_chars_extracted": 1195}, "tests/test_logger.py::627": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["Monitor", "PPO"], "enclosing_function": "test_rollout_success_rate_onpolicy_algo", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 5788}, "tests/test_monitor.py::65": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["Monitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_monitor", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 5236}, "tests/test_monitor.py::85": {"resolved_imports": ["stable_baselines3/common/monitor.py"], "used_names": ["LoadMonitorResultsError", "Monitor", "get_monitor_files", "load_results", "os", "pytest", "uuid", "warnings"], "enclosing_function": "test_monitor_load_results", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\nclass LoadMonitorResultsError(Exception):\n \"\"\"\n Raised when loading the monitor log fails.\n \"\"\"\n\n pass\n\ndef get_monitor_files(path: str) -> list[str]:\n \"\"\"\n get all the monitor files in the given path\n\n :param path: the logging folder\n :return: the log files\n \"\"\"\n return glob(os.path.join(path, \"*\" + Monitor.EXT))\n\ndef load_results(path: str) -> pandas.DataFrame:\n \"\"\"\n Load all Monitor logs from a given directory path matching ``*monitor.csv``\n\n :param path: the directory path containing the log file(s)\n :return: the logged data\n \"\"\"\n monitor_files = get_monitor_files(path)\n if len(monitor_files) == 0:\n raise LoadMonitorResultsError(f\"No monitor files of the form *{Monitor.EXT} found in {path}\")\n data_frames, headers = [], []\n for file_name in monitor_files:\n with open(file_name) as file_handler:\n first_line = file_handler.readline()\n assert first_line[0] == \"#\"\n header = json.loads(first_line[1:])\n data_frame = pandas.read_csv(file_handler, index_col=None)\n headers.append(header)\n data_frame[\"t\"] += header[\"t_start\"]\n data_frames.append(data_frame)\n data_frames = [df for df in data_frames if not df.empty]\n if not data_frames:\n # Only empty monitor files, return empty df\n empty_df = pandas.DataFrame(columns=[\"r\", \"l\", \"t\"])\n # Create index to have the same columns\n empty_df.reset_index(inplace=True)\n return empty_df\n data_frame = pandas.concat(data_frames)\n data_frame.sort_values(\"t\", inplace=True)\n data_frame.reset_index(inplace=True)\n data_frame[\"t\"] -= min(header[\"t_start\"] for header in headers)\n return data_frame", "n_imports_parsed": 8, "n_files_resolved": 1, "n_chars_extracted": 6981}, "tests/test_sde.py::101": {"resolved_imports": ["stable_baselines3/__init__.py"], "used_names": ["A2C", "PPO", "SAC", "pytest"], "enclosing_function": "test_state_dependent_noise", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n__all__ = [\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 6, "n_files_resolved": 1, "n_chars_extracted": 1467}, "tests/test_vec_envs.py::172": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "Monitor", "VecFrameStack", "pytest", "spaces"], "enclosing_function": "test_vecenv_custom_calls", "extracted_code": "# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 14, "n_files_resolved": 4, "n_chars_extracted": 7364}, "tests/test_utils.py::200": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/atari_wrappers.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/noise.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "evaluate_policy", "pytest"], "enclosing_function": "test_evaluate_policy", "extracted_code": "# Source: stable_baselines3/common/evaluation.py\ndef evaluate_policy(\n model: \"type_aliases.PolicyPredictor\",\n env: gym.Env | VecEnv,\n n_eval_episodes: int = 10,\n deterministic: bool = True,\n render: bool = False,\n callback: Callable[[dict[str, Any], dict[str, Any]], None] | None = None,\n reward_threshold: float | None = None,\n return_episode_rewards: bool = False,\n warn: bool = True,\n) -> tuple[float, float] | tuple[list[float], list[int]]:\n \"\"\"\n Runs the policy for ``n_eval_episodes`` episodes and outputs the average return\n per episode (sum of undiscounted rewards).\n If a vector env is passed in, this divides the episodes to evaluate onto the\n different elements of the vector env. This static division of work is done to\n remove bias. See https://github.com/DLR-RM/stable-baselines3/issues/402 for more\n details and discussion.\n\n .. note::\n If environment has not been wrapped with ``Monitor`` wrapper, reward and\n episode lengths are counted as it appears with ``env.step`` calls. If\n the environment contains wrappers that modify rewards or episode lengths\n (e.g. reward scaling, early episode reset), these will affect the evaluation\n results as well. You can avoid this by wrapping environment with ``Monitor``\n wrapper before anything else.\n\n :param model: The RL agent you want to evaluate. This can be any object\n that implements a ``predict`` method, such as an RL algorithm (``BaseAlgorithm``)\n or policy (``BasePolicy``).\n :param env: The gym environment or ``VecEnv`` environment.\n :param n_eval_episodes: Number of episode to evaluate the agent\n :param deterministic: Whether to use deterministic or stochastic actions\n :param render: Whether to render the environment or not\n :param callback: callback function to perform additional checks,\n called ``n_envs`` times after each step.\n Gets locals() and globals() passed as parameters.\n See https://github.com/DLR-RM/stable-baselines3/issues/1912 for more details.\n :param reward_threshold: Minimum expected reward per episode,\n this will raise an error if the performance is not met\n :param return_episode_rewards: If True, a list of rewards and episode lengths\n per episode will be returned instead of the mean.\n :param warn: If True (default), warns user about lack of a Monitor wrapper in the\n evaluation environment.\n :return: Mean return per episode (sum of rewards), std of reward per episode.\n Returns (list[float], list[int]) when ``return_episode_rewards`` is True, first\n list containing per-episode return and second containing per-episode lengths\n (in number of steps).\n \"\"\"\n is_monitor_wrapped = False\n # Avoid circular import\n from stable_baselines3.common.monitor import Monitor\n\n if not isinstance(env, VecEnv):\n env = DummyVecEnv([lambda: env]) # type: ignore[list-item, return-value]\n\n is_monitor_wrapped = is_vecenv_wrapped(env, VecMonitor) or env.env_is_wrapped(Monitor)[0]\n\n if not is_monitor_wrapped and warn:\n warnings.warn(\n \"Evaluation environment is not wrapped with a ``Monitor`` wrapper. \"\n \"This may result in reporting modified episode lengths and rewards, if other wrappers happen to modify these. \"\n \"Consider wrapping environment first with ``Monitor`` wrapper.\",\n UserWarning,\n )\n\n n_envs = env.num_envs\n episode_rewards = []\n episode_lengths = []\n\n episode_counts = np.zeros(n_envs, dtype=\"int\")\n # Divides episodes among different sub environments in the vector as evenly as possible\n episode_count_targets = np.array([(n_eval_episodes + i) // n_envs for i in range(n_envs)], dtype=\"int\")\n\n current_rewards = np.zeros(n_envs)\n current_lengths = np.zeros(n_envs, dtype=\"int\")\n observations = env.reset()\n states = None\n episode_starts = np.ones((env.num_envs,), dtype=bool)\n while (episode_counts < episode_count_targets).any():\n actions, states = model.predict(\n observations, # type: ignore[arg-type]\n state=states,\n episode_start=episode_starts,\n deterministic=deterministic,\n )\n new_observations, rewards, dones, infos = env.step(actions)\n current_rewards += rewards\n current_lengths += 1\n for i in range(n_envs):\n if episode_counts[i] < episode_count_targets[i]:\n # unpack values so that the callback can access the local variables\n reward = rewards[i]\n done = dones[i]\n info = infos[i]\n episode_starts[i] = done\n\n if callback is not None:\n callback(locals(), globals())\n\n if dones[i]:\n if is_monitor_wrapped:\n # Atari wrapper can send a \"done\" signal when\n # the agent loses a life, but it does not correspond\n # to the true end of episode\n if \"episode\" in info.keys():\n # Do not trust \"done\" with episode endings.\n # Monitor wrapper includes \"episode\" key in info if environment\n # has been wrapped with it. Use those rewards instead.\n episode_rewards.append(info[\"episode\"][\"r\"])\n episode_lengths.append(info[\"episode\"][\"l\"])\n # Only increment at the real end of an episode\n episode_counts[i] += 1\n else:\n episode_rewards.append(current_rewards[i])\n episode_lengths.append(current_lengths[i])\n episode_counts[i] += 1\n current_rewards[i] = 0\n current_lengths[i] = 0\n\n observations = new_observations\n\n if render:\n env.render()\n\n mean_reward = np.mean(episode_rewards)\n std_reward = np.std(episode_rewards)\n if reward_threshold is not None:\n assert mean_reward > reward_threshold, \"Mean reward below threshold: \" f\"{mean_reward:.2f} < {reward_threshold:.2f}\"\n if return_episode_rewards:\n return episode_rewards, episode_lengths\n return mean_reward, std_reward", "n_imports_parsed": 17, "n_files_resolved": 8, "n_chars_extracted": 6399}, "tests/test_logger.py::166": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["DEBUG", "INFO", "configure"], "enclosing_function": "test_main", "extracted_code": "# Source: stable_baselines3/common/logger.py\nDEBUG = 10\n\nINFO = 20\n\ndef configure(folder: str | None = None, format_strings: list[str] | None = None) -> Logger:\n \"\"\"\n Configure the current logger.\n\n :param folder: the save location\n (if None, $SB3_LOGDIR, if still None, tempdir/SB3-[date & time])\n :param format_strings: the output logging format\n (if None, $SB3_LOG_FORMAT, if still None, ['stdout', 'log', 'csv'])\n :return: The logger object.\n \"\"\"\n if folder is None:\n folder = os.getenv(\"SB3_LOGDIR\")\n if folder is None:\n folder = os.path.join(tempfile.gettempdir(), datetime.datetime.now().strftime(\"SB3-%Y-%m-%d-%H-%M-%S-%f\"))\n assert isinstance(folder, str)\n os.makedirs(folder, exist_ok=True)\n\n log_suffix = \"\"\n if format_strings is None:\n format_strings = os.getenv(\"SB3_LOG_FORMAT\", \"stdout,log,csv\").split(\",\")\n\n format_strings = list(filter(None, format_strings))\n output_formats = [make_output_format(f, folder, log_suffix) for f in format_strings]\n\n logger = Logger(folder=folder, output_formats=output_formats)\n # Only print when some files will be saved\n if len(format_strings) > 0 and format_strings != [\"stdout\"]:\n logger.log(f\"Logging to {folder}\")\n return logger", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 1278}, "tests/test_run.py::239": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/noise.py"], "used_names": ["PPO", "pytest"], "enclosing_function": "test_ppo_warnings", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 7, "n_files_resolved": 3, "n_chars_extracted": 549}, "tests/test_vec_monitor.py::35": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/envs/bit_flipping_env.py", "stable_baselines3/common/evaluation.py", "stable_baselines3/common/monitor.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DummyVecEnv", "VecMonitor", "json", "os", "pandas", "uuid"], "enclosing_function": "test_vec_monitor", "extracted_code": "# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\nVecEnvWrapperT = TypeVar(\"VecEnvWrapperT\", bound=VecEnvWrapper)\n\n\ndef unwrap_vec_wrapper(env: VecEnv, vec_wrapper_class: type[VecEnvWrapperT]) -> VecEnvWrapperT | None:\n \"\"\"\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",\n \"VecTransposeImage\",\n \"VecVideoRecorder\",\n \"is_vecenv_wrapped\",\n \"sync_envs_normalization\",\n \"unwrap_vec_normalize\",\n \"unwrap_vec_wrapper\",\n]", "n_imports_parsed": 13, "n_files_resolved": 5, "n_chars_extracted": 2041}, "tests/test_n_step_replay.py::28": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/buffers.py", "stable_baselines3/common/env_util.py"], "used_names": ["DQN", "NStepReplayBuffer", "SAC", "TD3", "make_vec_env", "pytest"], "enclosing_function": "test_run", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/buffers.py\nclass NStepReplayBuffer(ReplayBuffer):\n \"\"\"\n Replay buffer used for computing n-step returns in off-policy algorithms like SAC/DQN.\n\n The n-step return combines multiple steps of future rewards,\n discounted by the discount factor gamma.\n This can help improve sample efficiency and credit assignment.\n\n This implementation uses the same storage space as a normal replay buffer,\n and NumPy vectorized operations at sampling time to efficiently compute the\n n-step return, without requiring extra memory.\n\n This implementation is inspired by:\n - https://github.com/younggyoseo/FastTD3\n - https://github.com/DLR-RM/stable-baselines3/pull/81\n\n It avoids potential issues such as:\n - https://github.com/younggyoseo/FastTD3/issues/6\n\n :param buffer_size: Max number of element in the buffer\n :param observation_space: Observation space\n :param action_space: Action space\n :param device: PyTorch device\n :param n_envs: Number of parallel environments\n :param optimize_memory_usage: Not supported\n :param handle_timeout_termination: Handle timeout termination (due to timelimit)\n separately and treat the task as infinite horizon task.\n https://github.com/DLR-RM/stable-baselines3/issues/284\n :param n_steps: Number of steps to accumulate rewards for n-step returns\n :param gamma: Discount factor for future rewards\n \"\"\"\n\n def __init__(self, *args, n_steps: int = 3, gamma: float = 0.99, **kwargs):\n super().__init__(*args, **kwargs)\n self.n_steps = n_steps\n self.gamma = gamma\n if self.optimize_memory_usage:\n raise NotImplementedError(\"NStepReplayBuffer doesn't support optimize_memory_usage=True\")\n\n def _get_samples(self, batch_inds: np.ndarray, env: VecNormalize | None = None) -> ReplayBufferSamples:\n \"\"\"\n Sample a batch of transitions and compute n-step returns.\n\n For each sampled transition, the method computes the cumulative discounted reward over\n the next `n_steps`, properly handling episode termination and timeouts.\n The next observation and done flag correspond to the last transition in the computed n-step trajectory.\n\n :param batch_inds: Indices of samples to retrieve\n :param env: Optional VecNormalize environment for normalizing observations/rewards\n :return: A batch of samples with n-step returns and corresponding observations/actions\n \"\"\"\n # Randomly choose env indices for each sample\n env_indices = np.random.randint(0, self.n_envs, size=batch_inds.shape)\n\n # Note: the self.pos index is dangerous (will overlap two different episodes when buffer is full)\n # so we set self.pos-1 to truncated=True (temporarily) if done=False and truncated=False\n last_valid_index = self.pos - 1\n original_timeout_values = self.timeouts[last_valid_index].copy()\n self.timeouts[last_valid_index] = np.logical_or(original_timeout_values, np.logical_not(self.dones[last_valid_index]))\n\n # Compute n-step indices with wrap-around\n steps = np.arange(self.n_steps).reshape(1, -1) # shape: [1, n_steps]\n indices = (batch_inds[:, None] + steps) % self.buffer_size # shape: [batch, n_steps]\n\n # Retrieve sequences of transitions\n rewards_seq = self._normalize_reward(self.rewards[indices, env_indices[:, None]], env) # [batch, n_steps]\n dones_seq = self.dones[indices, env_indices[:, None]] # [batch, n_steps]\n truncated_seq = self.timeouts[indices, env_indices[:, None]] # [batch, n_steps]\n\n # Compute masks: 1 until first done/truncation (inclusive)\n done_or_truncated = np.logical_or(dones_seq, truncated_seq)\n done_idx = done_or_truncated.argmax(axis=1)\n # If no done/truncation, keep full sequence\n has_done_or_truncated = done_or_truncated.any(axis=1)\n done_idx = np.where(has_done_or_truncated, done_idx, self.n_steps - 1)\n\n mask = np.arange(self.n_steps).reshape(1, -1) <= done_idx[:, None] # shape: [batch, n_steps]\n # Compute discount factors for bootstrapping (using target Q-Value)\n # It is gamma ** n_steps by default but should be adjusted in case of early termination/truncation.\n target_q_discounts = self.gamma ** mask.sum(axis=1, keepdims=True).astype(np.float32) # [batch, 1]\n\n # Apply discount\n discounts = self.gamma ** np.arange(self.n_steps, dtype=np.float32).reshape(1, -1) # [1, n_steps]\n discounted_rewards = rewards_seq * discounts * mask\n n_step_returns = discounted_rewards.sum(axis=1, keepdims=True) # [batch, 1]\n\n # Compute indices of next_obs/done at the final point of the n-step transition\n last_indices = (batch_inds + done_idx) % self.buffer_size\n next_obs = self._normalize_obs(self.next_observations[last_indices, env_indices], env)\n next_dones = self.dones[last_indices, env_indices][:, None].astype(np.float32)\n next_timeouts = self.timeouts[last_indices, env_indices][:, None].astype(np.float32)\n final_dones = next_dones * (1.0 - next_timeouts)\n\n # Revert back tmp changes to avoid sampling across episodes\n self.timeouts[last_valid_index] = original_timeout_values\n\n # Gather observations and actions\n obs = self._normalize_obs(self.observations[batch_inds, env_indices], env)\n actions = self.actions[batch_inds, env_indices]\n\n return ReplayBufferSamples(\n observations=self.to_torch(obs), # type: ignore[arg-type]\n actions=self.to_torch(actions),\n next_observations=self.to_torch(next_obs), # type: ignore[arg-type]\n dones=self.to_torch(final_dones),\n rewards=self.to_torch(n_step_returns),\n discounts=self.to_torch(target_q_discounts),\n )\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 6, "n_files_resolved": 3, "n_chars_extracted": 11747}, "tests/test_save_load.py::208": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/base_class.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/save_util.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "DDPG", "DQN", "DummyVecEnv", "PPO", "SAC", "TD3", "os", "pytest"], "enclosing_function": "test_set_env", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 21, "n_files_resolved": 7, "n_chars_extracted": 3393}, "tests/test_logger.py::623": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/logger.py", "stable_baselines3/common/monitor.py"], "used_names": ["Monitor", "PPO"], "enclosing_function": "test_rollout_success_rate_onpolicy_algo", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/monitor.py\nclass Monitor(gym.Wrapper[ObsType, ActType, ObsType, ActType]):\n \"\"\"\n A monitor wrapper for Gym environments, it is used to know the episode reward, length, time and other data.\n\n :param env: The environment\n :param filename: the location to save a log file, can be None for no log\n :param allow_early_resets: allows the reset of the environment before it is done\n :param reset_keywords: extra keywords for the reset call,\n if extra parameters are needed at reset\n :param info_keywords: extra information to log, from the information return of env.step()\n :param override_existing: appends to file if ``filename`` exists, otherwise\n override existing files (default)\n \"\"\"\n\n EXT = \"monitor.csv\"\n\n def __init__(\n self,\n env: gym.Env,\n filename: str | None = None,\n allow_early_resets: bool = True,\n reset_keywords: tuple[str, ...] = (),\n info_keywords: tuple[str, ...] = (),\n override_existing: bool = True,\n ):\n super().__init__(env=env)\n self.t_start = time.time()\n self.results_writer = None\n if filename is not None:\n env_id = env.spec.id if env.spec is not None else None\n self.results_writer = ResultsWriter(\n filename,\n header={\"t_start\": self.t_start, \"env_id\": str(env_id)},\n extra_keys=reset_keywords + info_keywords,\n override_existing=override_existing,\n )\n\n self.reset_keywords = reset_keywords\n self.info_keywords = info_keywords\n self.allow_early_resets = allow_early_resets\n self.rewards: list[float] = []\n self.needs_reset = True\n self.episode_returns: list[float] = []\n self.episode_lengths: list[int] = []\n self.episode_times: list[float] = []\n self.total_steps = 0\n # extra info about the current episode, that was passed in during reset()\n self.current_reset_info: dict[str, Any] = {}\n\n def reset(self, **kwargs) -> tuple[ObsType, dict[str, Any]]:\n \"\"\"\n Calls the Gym environment reset. Can only be called if the environment is over, or if allow_early_resets is True\n\n :param kwargs: Extra keywords saved for the next episode. only if defined by reset_keywords\n :return: the first observation of the environment\n \"\"\"\n if not self.allow_early_resets and not self.needs_reset:\n raise RuntimeError(\n \"Tried to reset an environment before done. If you want to allow early resets, \"\n \"wrap your env with Monitor(env, path, allow_early_resets=True)\"\n )\n self.rewards = []\n self.needs_reset = False\n for key in self.reset_keywords:\n value = kwargs.get(key)\n if value is None:\n raise ValueError(f\"Expected you to pass keyword argument {key} into reset\")\n self.current_reset_info[key] = value\n return self.env.reset(**kwargs)\n\n def step(self, action: ActType) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:\n \"\"\"\n Step the environment with the given action\n\n :param action: the action\n :return: observation, reward, terminated, truncated, information\n \"\"\"\n if self.needs_reset:\n raise RuntimeError(\"Tried to step environment that needs reset\")\n observation, reward, terminated, truncated, info = self.env.step(action)\n self.rewards.append(float(reward))\n if terminated or truncated:\n self.needs_reset = True\n ep_rew = sum(self.rewards)\n ep_len = len(self.rewards)\n ep_info = {\"r\": round(ep_rew, 6), \"l\": ep_len, \"t\": round(time.time() - self.t_start, 6)}\n for key in self.info_keywords:\n ep_info[key] = info[key]\n self.episode_returns.append(ep_rew)\n self.episode_lengths.append(ep_len)\n self.episode_times.append(time.time() - self.t_start)\n ep_info.update(self.current_reset_info)\n if self.results_writer:\n self.results_writer.write_row(ep_info)\n info[\"episode\"] = ep_info\n self.total_steps += 1\n return observation, reward, terminated, truncated, info\n\n def close(self) -> None:\n \"\"\"\n Closes the environment\n \"\"\"\n super().close()\n if self.results_writer is not None:\n self.results_writer.close()\n\n def get_total_steps(self) -> int:\n \"\"\"\n Returns the total number of timesteps\n\n :return:\n \"\"\"\n return self.total_steps\n\n def get_episode_rewards(self) -> list[float]:\n \"\"\"\n Returns the rewards of all the episodes\n\n :return:\n \"\"\"\n return self.episode_returns\n\n def get_episode_lengths(self) -> list[int]:\n \"\"\"\n Returns the number of timesteps of all the episodes\n\n :return:\n \"\"\"\n return self.episode_lengths\n\n def get_episode_times(self) -> list[float]:\n \"\"\"\n Returns the runtime in seconds of all the episodes\n\n :return:\n \"\"\"\n return self.episode_times", "n_imports_parsed": 18, "n_files_resolved": 4, "n_chars_extracted": 5788}, "tests/test_predict.py::75": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/utils.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["DQN", "DummyVecEnv", "SAC", "TD3", "get_device", "pytest"], "enclosing_function": "test_predict", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n raise ImportError(\n\n \"A2C\",\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]\n\n\n# Source: stable_baselines3/common/utils.py\ndef get_device(device: th.device | str = \"auto\") -> th.device:\n \"\"\"\n Retrieve PyTorch device.\n It checks that the requested device is available first.\n For now, it supports only cpu and cuda.\n By default, it tries to use the gpu.\n\n :param device: One for 'auto', 'cuda', 'cpu'\n :return: Supported Pytorch device\n \"\"\"\n # Cuda by default\n if device == \"auto\":\n device = \"cuda\"\n # Force conversion to th.device\n device = th.device(device)\n\n # Cuda not available\n if device.type == th.device(\"cuda\").type and not th.cuda.is_available():\n return th.device(\"cpu\")\n\n return device\n\n\n# Source: stable_baselines3/common/vec_env/__init__.py\n\nfrom stable_baselines3.common.vec_env.base_vec_env import CloudpickleWrapper, VecEnv, VecEnvWrapper\nfrom stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv\nfrom stable_baselines3.common.vec_env.stacked_observations import StackedObservations\nfrom stable_baselines3.common.vec_env.subproc_vec_env import SubprocVecEnv\nfrom stable_baselines3.common.vec_env.vec_check_nan import VecCheckNan\nfrom stable_baselines3.common.vec_env.vec_extract_dict_obs import VecExtractDictObs\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.vec_env.vec_monitor import VecMonitor\nfrom stable_baselines3.common.vec_env.vec_normalize import VecNormalize\nfrom stable_baselines3.common.vec_env.vec_transpose import VecTransposeImage\nfrom stable_baselines3.common.vec_env.vec_video_recorder import VecVideoRecorder\n\n__all__ = [\n \"CloudpickleWrapper\",\n \"DummyVecEnv\",\n \"StackedObservations\",\n \"SubprocVecEnv\",\n \"VecCheckNan\",\n \"VecEnv\",\n \"VecEnvWrapper\",\n \"VecExtractDictObs\",\n \"VecFrameStack\",\n \"VecMonitor\",\n \"VecNormalize\",", "n_imports_parsed": 10, "n_files_resolved": 5, "n_chars_extracted": 3334}, "tests/test_envs.py::249": {"resolved_imports": ["stable_baselines3/common/env_checker.py", "stable_baselines3/common/envs/__init__.py"], "used_names": ["IdentityEnvBox", "SimpleMultiObsEnv", "check_env", "pytest", "types"], "enclosing_function": "test_common_failures_reset", "extracted_code": "# Source: stable_baselines3/common/env_checker.py\ndef check_env(env: gym.Env, warn: bool = True, skip_render_check: bool = True) -> None:\n \"\"\"\n Check that an environment follows Gym API.\n This is particularly useful when using a custom environment.\n Please take a look at https://gymnasium.farama.org/api/env/\n for more information about the API.\n\n It also optionally check that the environment is compatible with Stable-Baselines.\n\n :param env: The Gym environment that will be checked\n :param warn: Whether to output additional warnings\n mainly related to the interaction with Stable Baselines\n :param skip_render_check: Whether to skip the checks for the render method.\n True by default (useful for the CI)\n \"\"\"\n assert isinstance(\n env, gym.Env\n ), \"Your environment must inherit from the gymnasium.Env class cf. https://gymnasium.farama.org/api/env/\"\n\n # ============= Check the spaces (observation and action) ================\n _check_spaces(env)\n\n # Define aliases for convenience\n observation_space = env.observation_space\n action_space = env.action_space\n\n try:\n env.reset(seed=0)\n except TypeError as e:\n raise TypeError(\"The reset() method must accept a `seed` parameter\") from e\n\n # Warn the user if needed.\n # A warning means that the environment may run but not work properly with Stable Baselines algorithms\n should_skip = False\n if warn:\n should_skip = _check_unsupported_spaces(env, observation_space, action_space)\n\n obs_spaces = observation_space.spaces if isinstance(observation_space, spaces.Dict) else {\"\": observation_space}\n for key, space in obs_spaces.items():\n if isinstance(space, spaces.Box):\n _check_box_obs(space, key)\n\n # Check for the action space, it may lead to hard-to-debug issues\n if isinstance(action_space, spaces.Box) and (\n np.any(np.abs(action_space.low) != np.abs(action_space.high))\n or np.any(action_space.low != -1)\n or np.any(action_space.high != 1)\n ):\n warnings.warn(\n \"We recommend you to use a symmetric and normalized Box action space (range=[-1, 1]) \"\n \"cf. https://stable-baselines3.readthedocs.io/en/master/guide/rl_tips.html\"\n )\n\n if isinstance(action_space, spaces.Box):\n assert np.all(\n np.isfinite(np.array([action_space.low, action_space.high]))\n ), \"Continuous action space must have a finite lower and upper bound\"\n\n if isinstance(action_space, spaces.Box) and action_space.dtype != np.dtype(np.float32):\n warnings.warn(\n f\"Your action space has dtype {action_space.dtype}, we recommend using np.float32 to avoid cast errors.\"\n )\n\n # If Sequence or Graph observation space, do not check the observation any further\n if should_skip:\n return\n\n # ============ Check the returned values ===============\n _check_returned_values(env, observation_space, action_space)\n\n # ==== Check the render method and the declared render modes ====\n if not skip_render_check:\n _check_render(env, warn) # pragma: no cover\n\n try:\n check_for_nested_spaces(env.observation_space)\n # The check doesn't support nested observations/dict actions\n # A warning about it has already been emitted\n _check_nan(env)\n except NotImplementedError:\n pass\n\n\n# Source: stable_baselines3/common/envs/__init__.py\n FakeImageEnv,\n IdentityEnv,\n IdentityEnvBox,\n IdentityEnvMultiBinary,\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n\n IdentityEnvMultiDiscrete,\n)\nfrom stable_baselines3.common.envs.multi_input_envs import SimpleMultiObsEnv\n\n__all__ = [\n \"BitFlippingEnv\",\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n\n \"FakeImageEnv\",\n \"IdentityEnv\",\n \"IdentityEnvBox\",\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n \"IdentityEnvMultiBinary\",\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]\n\n \"IdentityEnvMultiDiscrete\",\n \"SimpleMultiObsEnv\",\n \"SimpleMultiObsEnv\",\n]", "n_imports_parsed": 8, "n_files_resolved": 2, "n_chars_extracted": 4487}, "tests/test_sde.py::65": {"resolved_imports": ["stable_baselines3/__init__.py"], "used_names": ["PPO", "pytest"], "enclosing_function": "test_only_sde_squashed", "extracted_code": "# Source: stable_baselines3/__init__.py\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\n \"DDPG\",\n \"DQN\",\n \"PPO\",\n \"SAC\",\n \"TD3\",\n \"HerReplayBuffer\",\n \"get_system_info\",\n]", "n_imports_parsed": 6, "n_files_resolved": 1, "n_chars_extracted": 549}, "tests/test_callbacks.py::77": {"resolved_imports": ["stable_baselines3/__init__.py", "stable_baselines3/common/callbacks.py", "stable_baselines3/common/env_util.py", "stable_baselines3/common/envs/__init__.py", "stable_baselines3/common/vec_env/__init__.py"], "used_names": ["A2C", "CallbackList", "CheckpointCallback", "DDPG", "DQN", "EvalCallback", "EveryNTimesteps", "LogEveryNTimesteps", "PPO", "SAC", "StopTrainingOnMaxEpisodes", "StopTrainingOnNoModelImprovement", "StopTrainingOnRewardThreshold", "TD3", "make_vec_env", "os", "pytest", "shutil"], "enclosing_function": "test_callbacks", "extracted_code": "# Source: stable_baselines3/__init__.py\nimport os\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\n\nfrom stable_baselines3.a2c import A2C\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n\nfrom stable_baselines3.common.utils import get_system_info\nfrom stable_baselines3.ddpg import DDPG\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\nfrom stable_baselines3.dqn import DQN\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\n\nfrom stable_baselines3.her.her_replay_buffer import HerReplayBuffer\nfrom stable_baselines3.ppo import PPO\nfrom stable_baselines3.sac import SAC\nfrom stable_baselines3.td3 import TD3\n\n# Read version from file\nversion_file = os.path.join(os.path.dirname(__file__), \"version.txt\")\nwith open(version_file) as file_handler:\n __version__ = file_handler.read().strip()\n\n\ndef HER(*args, **kwargs):\n\n\n# Source: stable_baselines3/common/callbacks.py\nclass CallbackList(BaseCallback):\n \"\"\"\n Class for chaining callbacks.\n\n :param callbacks: A list of callbacks that will be called\n sequentially.\n \"\"\"\n\n def __init__(self, callbacks: list[BaseCallback]):\n super().__init__()\n assert isinstance(callbacks, list)\n self.callbacks = callbacks\n\n def _init_callback(self) -> None:\n for callback in self.callbacks:\n callback.init_callback(self.model)\n\n # Fix for https://github.com/DLR-RM/stable-baselines3/issues/1791\n # pass through the parent callback to all children\n callback.parent = self.parent\n\n def _on_training_start(self) -> None:\n for callback in self.callbacks:\n callback.on_training_start(self.locals, self.globals)\n\n def _on_rollout_start(self) -> None:\n for callback in self.callbacks:\n callback.on_rollout_start()\n\n def _on_step(self) -> bool:\n continue_training = True\n for callback in self.callbacks:\n # Return False (stop training) if at least one callback returns False\n continue_training = callback.on_step() and continue_training\n return continue_training\n\n def _on_rollout_end(self) -> None:\n for callback in self.callbacks:\n callback.on_rollout_end()\n\n def _on_training_end(self) -> None:\n for callback in self.callbacks:\n callback.on_training_end()\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n for callback in self.callbacks:\n callback.update_locals(locals_)\n\nclass CheckpointCallback(BaseCallback):\n \"\"\"\n Callback for saving a model every ``save_freq`` calls\n to ``env.step()``.\n By default, it only saves model checkpoints,\n you need to pass ``save_replay_buffer=True``,\n and ``save_vecnormalize=True`` to also save replay buffer checkpoints\n and normalization statistics checkpoints.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``save_freq = max(save_freq // n_envs, 1)``\n\n :param save_freq: Save checkpoints every ``save_freq`` call of the callback.\n :param save_path: Path to the folder where the model will be saved.\n :param name_prefix: Common prefix to the saved models\n :param save_replay_buffer: Save the model replay buffer\n :param save_vecnormalize: Save the ``VecNormalize`` statistics\n :param verbose: Verbosity level: 0 for no output, 2 for indicating when saving model checkpoint\n \"\"\"\n\n def __init__(\n self,\n save_freq: int,\n save_path: str,\n name_prefix: str = \"rl_model\",\n save_replay_buffer: bool = False,\n save_vecnormalize: bool = False,\n verbose: int = 0,\n ):\n super().__init__(verbose)\n self.save_freq = save_freq\n self.save_path = save_path\n self.name_prefix = name_prefix\n self.save_replay_buffer = save_replay_buffer\n self.save_vecnormalize = save_vecnormalize\n\n def _init_callback(self) -> None:\n # Create folder if needed\n if self.save_path is not None:\n os.makedirs(self.save_path, exist_ok=True)\n\n def _checkpoint_path(self, checkpoint_type: str = \"\", extension: str = \"\") -> str:\n \"\"\"\n Helper to get checkpoint path for each type of checkpoint.\n\n :param checkpoint_type: empty for the model, \"replay_buffer_\"\n or \"vecnormalize_\" for the other checkpoints.\n :param extension: Checkpoint file extension (zip for model, pkl for others)\n :return: Path to the checkpoint\n \"\"\"\n return os.path.join(self.save_path, f\"{self.name_prefix}_{checkpoint_type}{self.num_timesteps}_steps.{extension}\")\n\n def _on_step(self) -> bool:\n if self.n_calls % self.save_freq == 0:\n model_path = self._checkpoint_path(extension=\"zip\")\n self.model.save(model_path)\n if self.verbose >= 2:\n print(f\"Saving model checkpoint to {model_path}\")\n\n if self.save_replay_buffer and hasattr(self.model, \"replay_buffer\") and self.model.replay_buffer is not None:\n # If model has a replay buffer, save it too\n replay_buffer_path = self._checkpoint_path(\"replay_buffer_\", extension=\"pkl\")\n self.model.save_replay_buffer(replay_buffer_path) # type: ignore[attr-defined]\n if self.verbose > 1:\n print(f\"Saving model replay buffer checkpoint to {replay_buffer_path}\")\n\n if self.save_vecnormalize and self.model.get_vec_normalize_env() is not None:\n # Save the VecNormalize statistics\n vec_normalize_path = self._checkpoint_path(\"vecnormalize_\", extension=\"pkl\")\n self.model.get_vec_normalize_env().save(vec_normalize_path) # type: ignore[union-attr]\n if self.verbose >= 2:\n print(f\"Saving model VecNormalize to {vec_normalize_path}\")\n\n return True\n\nclass EvalCallback(EventCallback):\n \"\"\"\n Callback for evaluating an agent.\n\n .. warning::\n\n When using multiple environments, each call to ``env.step()``\n will effectively correspond to ``n_envs`` steps.\n To account for that, you can use ``eval_freq = max(eval_freq // n_envs, 1)``\n\n :param eval_env: The environment used for initialization\n :param callback_on_new_best: Callback to trigger\n when there is a new best model according to the ``mean_reward``\n :param callback_after_eval: Callback to trigger after every evaluation\n :param n_eval_episodes: The number of episodes to test the agent\n :param eval_freq: Evaluate the agent every ``eval_freq`` call of the callback.\n :param log_path: Path to a folder where the evaluations (``evaluations.npz``)\n will be saved. It will be updated at each evaluation.\n :param best_model_save_path: Path to a folder where the best model\n according to performance on the eval env will be saved.\n :param deterministic: Whether the evaluation should\n use a stochastic or deterministic actions.\n :param render: Whether to render or not the environment during evaluation\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about evaluation results\n :param warn: Passed to ``evaluate_policy`` (warns if ``eval_env`` has not been\n wrapped with a Monitor wrapper)\n \"\"\"\n\n def __init__(\n self,\n eval_env: gym.Env | VecEnv,\n callback_on_new_best: BaseCallback | None = None,\n callback_after_eval: BaseCallback | None = None,\n n_eval_episodes: int = 5,\n eval_freq: int = 10000,\n log_path: str | None = None,\n best_model_save_path: str | None = None,\n deterministic: bool = True,\n render: bool = False,\n verbose: int = 1,\n warn: bool = True,\n ):\n super().__init__(callback_after_eval, verbose=verbose)\n\n self.callback_on_new_best = callback_on_new_best\n if self.callback_on_new_best is not None:\n # Give access to the parent\n self.callback_on_new_best.parent = self\n\n self.n_eval_episodes = n_eval_episodes\n self.eval_freq = eval_freq\n self.best_mean_reward = -np.inf\n self.last_mean_reward = -np.inf\n self.deterministic = deterministic\n self.render = render\n self.warn = warn\n\n # Convert to VecEnv for consistency\n if not isinstance(eval_env, VecEnv):\n eval_env = DummyVecEnv([lambda: eval_env]) # type: ignore[list-item, return-value]\n\n self.eval_env = eval_env\n self.best_model_save_path = best_model_save_path\n # Logs will be written in ``evaluations.npz``\n if log_path is not None:\n log_path = os.path.join(log_path, \"evaluations\")\n self.log_path = log_path\n self.evaluations_results: list[list[float]] = []\n self.evaluations_timesteps: list[int] = []\n self.evaluations_length: list[list[int]] = []\n # For computing success rate\n self._is_success_buffer: list[bool] = []\n self.evaluations_successes: list[list[bool]] = []\n\n def _init_callback(self) -> None:\n # Does not work in some corner cases, where the wrapper is not the same\n if not isinstance(self.training_env, type(self.eval_env)):\n warnings.warn(\"Training and eval env are not of the same type\" f\"{self.training_env} != {self.eval_env}\")\n\n # Create folders if needed\n if self.best_model_save_path is not None:\n os.makedirs(self.best_model_save_path, exist_ok=True)\n if self.log_path is not None:\n os.makedirs(os.path.dirname(self.log_path), exist_ok=True)\n\n # Init callback called on new best model\n if self.callback_on_new_best is not None:\n self.callback_on_new_best.init_callback(self.model)\n\n def _log_success_callback(self, locals_: dict[str, Any], globals_: dict[str, Any]) -> None:\n \"\"\"\n Callback passed to the ``evaluate_policy`` function\n in order to log the success rate (when applicable),\n for instance when using HER.\n\n :param locals_:\n :param globals_:\n \"\"\"\n info = locals_[\"info\"]\n\n if locals_[\"done\"]:\n maybe_is_success = info.get(\"is_success\")\n if maybe_is_success is not None:\n self._is_success_buffer.append(maybe_is_success)\n\n def _on_step(self) -> bool:\n continue_training = True\n\n if self.eval_freq > 0 and self.n_calls % self.eval_freq == 0:\n # Sync training and eval env if there is VecNormalize\n if self.model.get_vec_normalize_env() is not None:\n try:\n sync_envs_normalization(self.training_env, self.eval_env)\n except AttributeError as e:\n raise AssertionError(\n \"Training and eval env are not wrapped the same way, \"\n \"see https://stable-baselines3.readthedocs.io/en/master/guide/callbacks.html#evalcallback \"\n \"and warning above.\"\n ) from e\n\n # Reset success rate buffer\n self._is_success_buffer = []\n\n episode_rewards, episode_lengths = evaluate_policy(\n self.model,\n self.eval_env,\n n_eval_episodes=self.n_eval_episodes,\n render=self.render,\n deterministic=self.deterministic,\n return_episode_rewards=True,\n warn=self.warn,\n callback=self._log_success_callback,\n )\n\n if self.log_path is not None:\n assert isinstance(episode_rewards, list)\n assert isinstance(episode_lengths, list)\n self.evaluations_timesteps.append(self.num_timesteps)\n self.evaluations_results.append(episode_rewards)\n self.evaluations_length.append(episode_lengths)\n\n kwargs = {}\n # Save success log if present\n if len(self._is_success_buffer) > 0:\n self.evaluations_successes.append(self._is_success_buffer)\n kwargs = dict(successes=self.evaluations_successes)\n\n np.savez(\n self.log_path,\n timesteps=self.evaluations_timesteps,\n results=self.evaluations_results,\n ep_lengths=self.evaluations_length,\n **kwargs, # type: ignore[arg-type]\n )\n\n mean_reward, std_reward = np.mean(episode_rewards), np.std(episode_rewards)\n mean_ep_length, std_ep_length = np.mean(episode_lengths), np.std(episode_lengths)\n self.last_mean_reward = float(mean_reward)\n\n if self.verbose >= 1:\n print(f\"Eval num_timesteps={self.num_timesteps}, \" f\"episode_reward={mean_reward:.2f} +/- {std_reward:.2f}\")\n print(f\"Episode length: {mean_ep_length:.2f} +/- {std_ep_length:.2f}\")\n # Add to current Logger\n self.logger.record(\"eval/mean_reward\", float(mean_reward))\n self.logger.record(\"eval/mean_ep_length\", mean_ep_length)\n\n if len(self._is_success_buffer) > 0:\n success_rate = np.mean(self._is_success_buffer)\n if self.verbose >= 1:\n print(f\"Success rate: {100 * success_rate:.2f}%\")\n self.logger.record(\"eval/success_rate\", success_rate)\n\n # Dump log so the evaluation results are printed with the correct timestep\n self.logger.record(\"time/total_timesteps\", self.num_timesteps, exclude=\"tensorboard\")\n self.logger.dump(self.num_timesteps)\n\n if mean_reward > self.best_mean_reward:\n if self.verbose >= 1:\n print(\"New best mean reward!\")\n if self.best_model_save_path is not None:\n self.model.save(os.path.join(self.best_model_save_path, \"best_model\"))\n self.best_mean_reward = float(mean_reward)\n # Trigger callback on new best model, if needed\n if self.callback_on_new_best is not None:\n continue_training = self.callback_on_new_best.on_step()\n\n # Trigger callback after every evaluation, if needed\n if self.callback is not None:\n continue_training = continue_training and self._on_event()\n\n return continue_training\n\n def update_child_locals(self, locals_: dict[str, Any]) -> None:\n \"\"\"\n Update the references to the local variables.\n\n :param locals_: the local variables during rollout collection\n \"\"\"\n if self.callback:\n self.callback.update_locals(locals_)\n\nclass StopTrainingOnRewardThreshold(BaseCallback):\n \"\"\"\n Stop the training once a threshold in episodic reward\n has been reached (i.e. when the model is good enough).\n\n It must be used with the ``EvalCallback``.\n\n :param reward_threshold: Minimum expected reward per episode\n to stop training.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating when training ended because episodic reward\n threshold reached\n \"\"\"\n\n parent: EvalCallback\n\n def __init__(self, reward_threshold: float, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.reward_threshold = reward_threshold\n\n def _on_step(self) -> bool:\n assert self.parent is not None, \"``StopTrainingOnMinimumReward`` callback must be used with an ``EvalCallback``\"\n continue_training = bool(self.parent.best_mean_reward < self.reward_threshold)\n if self.verbose >= 1 and not continue_training:\n print(\n f\"Stopping training because the mean reward {self.parent.best_mean_reward:.2f} \"\n f\"is above the threshold {self.reward_threshold}\"\n )\n return continue_training\n\nclass EveryNTimesteps(EventCallback):\n \"\"\"\n Trigger a callback every ``n_steps`` timesteps\n\n :param n_steps: Number of timesteps between two trigger.\n :param callback: Callback that will be called\n when the event is triggered.\n \"\"\"\n\n def __init__(self, n_steps: int, callback: BaseCallback):\n super().__init__(callback)\n self.n_steps = n_steps\n self.last_time_trigger = 0\n\n def _on_step(self) -> bool:\n if (self.num_timesteps - self.last_time_trigger) >= self.n_steps:\n self.last_time_trigger = self.num_timesteps\n return self._on_event()\n return True\n\nclass LogEveryNTimesteps(EveryNTimesteps):\n \"\"\"\n Log data every ``n_steps`` timesteps\n\n :param n_steps: Number of timesteps between two trigger.\n \"\"\"\n\n def __init__(self, n_steps: int):\n super().__init__(n_steps, callback=ConvertCallback(self._log_data))\n\n def _log_data(self, _locals: dict[str, Any], _globals: dict[str, Any]) -> bool:\n self.model.dump_logs()\n return True\n\nclass StopTrainingOnMaxEpisodes(BaseCallback):\n \"\"\"\n Stop the training once a maximum number of episodes are played.\n\n For multiple environments presumes that, the desired behavior is that the agent trains on each env for ``max_episodes``\n and in total for ``max_episodes * n_envs`` episodes.\n\n :param max_episodes: Maximum number of episodes to stop training.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating information about when training ended by\n reaching ``max_episodes``\n \"\"\"\n\n def __init__(self, max_episodes: int, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.max_episodes = max_episodes\n self._total_max_episodes = max_episodes\n self.n_episodes = 0\n\n def _init_callback(self) -> None:\n # At start set total max according to number of environments\n self._total_max_episodes = self.max_episodes * self.training_env.num_envs\n\n def _on_step(self) -> bool:\n # Check that the `dones` local variable is defined\n assert \"dones\" in self.locals, \"`dones` variable is not defined, please check your code next to `callback.on_step()`\"\n self.n_episodes += np.sum(self.locals[\"dones\"]).item()\n\n continue_training = self.n_episodes < self._total_max_episodes\n\n if self.verbose >= 1 and not continue_training:\n mean_episodes_per_env = self.n_episodes / self.training_env.num_envs\n mean_ep_str = (\n f\"with an average of {mean_episodes_per_env:.2f} episodes per env\" if self.training_env.num_envs > 1 else \"\"\n )\n\n print(\n f\"Stopping training with a total of {self.num_timesteps} steps because the \"\n f\"{self.locals.get('tb_log_name')} model reached max_episodes={self.max_episodes}, \"\n f\"by playing for {self.n_episodes} episodes \"\n f\"{mean_ep_str}\"\n )\n return continue_training\n\nclass StopTrainingOnNoModelImprovement(BaseCallback):\n \"\"\"\n Stop the training early if there is no new best model (new best mean reward) after more than N consecutive evaluations.\n\n It is possible to define a minimum number of evaluations before start to count evaluations without improvement.\n\n It must be used with the ``EvalCallback``.\n\n :param max_no_improvement_evals: Maximum number of consecutive evaluations without a new best model.\n :param min_evals: Number of evaluations before start to count evaluations without improvements.\n :param verbose: Verbosity level: 0 for no output, 1 for indicating when training ended because no new best model\n \"\"\"\n\n parent: EvalCallback\n\n def __init__(self, max_no_improvement_evals: int, min_evals: int = 0, verbose: int = 0):\n super().__init__(verbose=verbose)\n self.max_no_improvement_evals = max_no_improvement_evals\n self.min_evals = min_evals\n self.last_best_mean_reward = -np.inf\n self.no_improvement_evals = 0\n\n def _on_step(self) -> bool:\n assert self.parent is not None, \"``StopTrainingOnNoModelImprovement`` callback must be used with an ``EvalCallback``\"\n\n continue_training = True\n\n if self.n_calls > self.min_evals:\n if self.parent.best_mean_reward > self.last_best_mean_reward:\n self.no_improvement_evals = 0\n else:\n self.no_improvement_evals += 1\n if self.no_improvement_evals > self.max_no_improvement_evals:\n continue_training = False\n\n self.last_best_mean_reward = self.parent.best_mean_reward\n\n if self.verbose >= 1 and not continue_training:\n print(\n f\"Stopping training because there was no new best model in the last {self.no_improvement_evals:d} evaluations\"\n )\n\n return continue_training\n\n\n# Source: stable_baselines3/common/env_util.py\ndef make_vec_env(\n env_id: str | Callable[..., gym.Env],\n n_envs: int = 1,\n seed: int | None = None,\n start_index: int = 0,\n monitor_dir: str | None = None,\n wrapper_class: Callable[[gym.Env], gym.Env] | None = None,\n env_kwargs: dict[str, Any] | None = None,\n vec_env_cls: type[DummyVecEnv | SubprocVecEnv] | None = None,\n vec_env_kwargs: dict[str, Any] | None = None,\n monitor_kwargs: dict[str, Any] | None = None,\n wrapper_kwargs: dict[str, Any] | None = None,\n) -> VecEnv:\n \"\"\"\n Create a wrapped, monitored ``VecEnv``.\n By default it uses a ``DummyVecEnv`` which is usually faster\n than a ``SubprocVecEnv``.\n\n :param env_id: either the env ID, the env class or a callable returning an env\n :param n_envs: the number of environments you wish to have in parallel\n :param seed: the initial seed for the random number generator\n :param start_index: start rank index\n :param monitor_dir: Path to a folder where the monitor files will be saved.\n If None, no file will be written, however, the env will still be wrapped\n in a Monitor wrapper to provide additional information about training.\n :param wrapper_class: Additional wrapper to use on the environment.\n This can also be a function with single argument that wraps the environment in many things.\n Note: the wrapper specified by this parameter will be applied after the ``Monitor`` wrapper.\n if some cases (e.g. with TimeLimit wrapper) this can lead to undesired behavior.\n See here for more details: https://github.com/DLR-RM/stable-baselines3/issues/894\n :param env_kwargs: Optional keyword argument to pass to the env constructor\n :param vec_env_cls: A custom ``VecEnv`` class constructor. Default: None.\n :param vec_env_kwargs: Keyword arguments to pass to the ``VecEnv`` class constructor.\n :param monitor_kwargs: Keyword arguments to pass to the ``Monitor`` class constructor.\n :param wrapper_kwargs: Keyword arguments to pass to the ``Wrapper`` class constructor.\n :return: The wrapped environment\n \"\"\"\n env_kwargs = env_kwargs or {}\n vec_env_kwargs = vec_env_kwargs or {}\n monitor_kwargs = monitor_kwargs or {}\n wrapper_kwargs = wrapper_kwargs or {}\n assert vec_env_kwargs is not None # for mypy\n\n def make_env(rank: int) -> Callable[[], gym.Env]:\n def _init() -> gym.Env:\n # For type checker:\n assert monitor_kwargs is not None\n assert wrapper_kwargs is not None\n assert env_kwargs is not None\n\n if isinstance(env_id, str):\n # if the render mode was not specified, we set it to `rgb_array` as default.\n kwargs = {\"render_mode\": \"rgb_array\"}\n kwargs.update(env_kwargs)\n try:\n env = gym.make(env_id, **kwargs) # type: ignore[arg-type]\n except TypeError:\n env = gym.make(env_id, **env_kwargs)\n else:\n env = env_id(**env_kwargs)\n # Patch to support gym 0.21/0.26 and gymnasium\n env = _patch_env(env)\n\n if seed is not None:\n # Note: here we only seed the action space\n # We will seed the env at the next reset\n env.action_space.seed(seed + rank)\n # Wrap the env in a Monitor wrapper\n # to have additional training information\n monitor_path = os.path.join(monitor_dir, str(rank)) if monitor_dir is not None else None\n # Create the monitor folder if needed\n if monitor_path is not None and monitor_dir is not None:\n os.makedirs(monitor_dir, exist_ok=True)\n env = Monitor(env, filename=monitor_path, **monitor_kwargs)\n # Optionally, wrap the environment with the provided wrapper\n if wrapper_class is not None:\n env = wrapper_class(env, **wrapper_kwargs)\n return env\n\n return _init\n\n # No custom VecEnv is passed\n if vec_env_cls is None:\n # Default: use a DummyVecEnv\n vec_env_cls = DummyVecEnv\n\n vec_env = vec_env_cls([make_env(i + start_index) for i in range(n_envs)], **vec_env_kwargs)\n # Prepare the seeds for the first reset\n vec_env.seed(seed)\n return vec_env", "n_imports_parsed": 11, "n_files_resolved": 5, "n_chars_extracted": 26817}}}