| """Abstract base classes for RL algorithms.""" |
|
|
| import io |
| import pathlib |
| import time |
| from abc import ABC, abstractmethod |
| from collections import deque |
| from typing import Any, Dict, Iterable, List, Optional, Tuple, Type, Union |
|
|
| import gym |
| import numpy as np |
| import torch as th |
|
|
| from stable_baselines3.common import logger, utils |
| from stable_baselines3.common.callbacks import BaseCallback, CallbackList, ConvertCallback, EvalCallback |
| from stable_baselines3.common.env_util import is_wrapped |
| from stable_baselines3.common.monitor import Monitor |
| from stable_baselines3.common.noise import ActionNoise |
| from stable_baselines3.common.policies import BasePolicy, get_policy_from_name |
| from stable_baselines3.common.preprocessing import is_image_space, is_image_space_channels_first |
| from stable_baselines3.common.save_util import load_from_zip_file, recursive_getattr, recursive_setattr, save_to_zip_file |
| from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule |
| from stable_baselines3.common.utils import ( |
| check_for_correct_spaces, |
| get_device, |
| get_schedule_fn, |
| set_random_seed, |
| update_learning_rate, |
| ) |
| from stable_baselines3.common.vec_env import ( |
| DummyVecEnv, |
| VecEnv, |
| VecNormalize, |
| VecTransposeImage, |
| is_vecenv_wrapped, |
| unwrap_vec_normalize, |
| ) |
| from stable_baselines3.common.vec_env.obs_dict_wrapper import ObsDictWrapper |
|
|
|
|
| def maybe_make_env(env: Union[GymEnv, str, None], verbose: int) -> Optional[GymEnv]: |
| """If env is a string, make the environment; otherwise, return env. |
| |
| :param env: The environment to learn from. |
| :param verbose: logging verbosity |
| :return A Gym (vector) environment. |
| """ |
| if isinstance(env, str): |
| if verbose >= 1: |
| print(f"Creating environment from the given name '{env}'") |
| env = gym.make(env) |
| return env |
|
|
|
|
| class BaseAlgorithm(ABC): |
| """ |
| The base of RL algorithms |
| |
| :param policy: Policy object |
| :param env: The environment to learn from |
| (if registered in Gym, can be str. Can be None for loading trained models) |
| :param policy_base: The base policy used by this method |
| :param learning_rate: learning rate for the optimizer, |
| it can be a function of the current progress remaining (from 1 to 0) |
| :param policy_kwargs: Additional arguments to be passed to the policy on creation |
| :param tensorboard_log: the log location for tensorboard (if None, no logging) |
| :param verbose: The verbosity level: 0 none, 1 training information, 2 debug |
| :param device: Device on which the code should run. |
| By default, it will try to use a Cuda compatible device and fallback to cpu |
| if it is not possible. |
| :param support_multi_env: Whether the algorithm supports training |
| with multiple environments (as in A2C) |
| :param create_eval_env: Whether to create a second environment that will be |
| used for evaluating the agent periodically. (Only available when passing string for the environment) |
| :param monitor_wrapper: When creating an environment, whether to wrap it |
| or not in a Monitor wrapper. |
| :param seed: Seed for the pseudo random generators |
| :param use_sde: Whether to use generalized State Dependent Exploration (gSDE) |
| instead of action noise exploration (default: False) |
| :param sde_sample_freq: Sample a new noise matrix every n steps when using gSDE |
| Default: -1 (only sample at the beginning of the rollout) |
| :param supported_action_spaces: The action spaces supported by the algorithm. |
| """ |
|
|
| def __init__( |
| self, |
| policy: Type[BasePolicy], |
| env: Union[GymEnv, str, None], |
| policy_base: Type[BasePolicy], |
| learning_rate: Union[float, Schedule], |
| policy_kwargs: Dict[str, Any] = None, |
| tensorboard_log: Optional[str] = None, |
| verbose: int = 0, |
| device: Union[th.device, str] = "auto", |
| support_multi_env: bool = False, |
| create_eval_env: bool = False, |
| monitor_wrapper: bool = True, |
| seed: Optional[int] = None, |
| use_sde: bool = False, |
| sde_sample_freq: int = -1, |
| supported_action_spaces: Optional[Tuple[gym.spaces.Space, ...]] = None, |
| ): |
|
|
| if isinstance(policy, str) and policy_base is not None: |
| self.policy_class = get_policy_from_name(policy_base, policy) |
| else: |
| self.policy_class = policy |
|
|
| self.device = get_device(device) |
| if verbose > 0: |
| print(f"Using {self.device} device") |
|
|
| self.env = None |
| |
| self._vec_normalize_env = unwrap_vec_normalize(env) |
| self.verbose = verbose |
| self.policy_kwargs = {} if policy_kwargs is None else policy_kwargs |
| self.observation_space = None |
| self.action_space = None |
| self.n_envs = None |
| self.num_timesteps = 0 |
| |
| self._total_timesteps = 0 |
| self.eval_env = None |
| self.seed = seed |
| self.action_noise = None |
| self.start_time = None |
| self.policy = None |
| self.learning_rate = learning_rate |
| self.tensorboard_log = tensorboard_log |
| self.lr_schedule = None |
| self._last_obs = None |
| self._last_dones = None |
| |
| self._last_original_obs = None |
| self._episode_num = 0 |
| |
| self.use_sde = use_sde |
| self.sde_sample_freq = sde_sample_freq |
| |
| |
| self._current_progress_remaining = 1 |
| |
| self.ep_info_buffer = None |
| self.ep_success_buffer = None |
| |
| self._n_updates = 0 |
|
|
| |
| if env is not None: |
| if isinstance(env, str): |
| if create_eval_env: |
| self.eval_env = maybe_make_env(env, self.verbose) |
|
|
| env = maybe_make_env(env, self.verbose) |
| env = self._wrap_env(env, self.verbose, monitor_wrapper) |
|
|
| self.observation_space = env.observation_space |
| self.action_space = env.action_space |
| self.n_envs = env.num_envs |
| self.env = env |
|
|
| if supported_action_spaces is not None: |
| assert isinstance(self.action_space, supported_action_spaces), ( |
| f"The algorithm only supports {supported_action_spaces} as action spaces " |
| f"but {self.action_space} was provided" |
| ) |
|
|
| if not support_multi_env and self.n_envs > 1: |
| raise ValueError( |
| "Error: the model does not support multiple envs; it requires " "a single vectorized environment." |
| ) |
|
|
| if self.use_sde and not isinstance(self.action_space, gym.spaces.Box): |
| raise ValueError("generalized State-Dependent Exploration (gSDE) can only be used with continuous actions.") |
|
|
| @staticmethod |
| def _wrap_env(env: GymEnv, verbose: int = 0, monitor_wrapper: bool = True) -> VecEnv: |
| """ " |
| Wrap environment with the appropriate wrappers if needed. |
| For instance, to have a vectorized environment |
| or to re-order the image channels. |
| |
| :param env: |
| :param verbose: |
| :param monitor_wrapper: Whether to wrap the env in a ``Monitor`` when possible. |
| :return: The wrapped environment. |
| """ |
| if not isinstance(env, VecEnv): |
| if not is_wrapped(env, Monitor) and monitor_wrapper: |
| if verbose >= 1: |
| print("Wrapping the env with a `Monitor` wrapper") |
| env = Monitor(env) |
| if verbose >= 1: |
| print("Wrapping the env in a DummyVecEnv.") |
| env = DummyVecEnv([lambda: env]) |
|
|
| if ( |
| is_image_space(env.observation_space) |
| and not is_vecenv_wrapped(env, VecTransposeImage) |
| and not is_image_space_channels_first(env.observation_space) |
| ): |
| if verbose >= 1: |
| print("Wrapping the env in a VecTransposeImage.") |
| env = VecTransposeImage(env) |
|
|
| |
| if isinstance(env.observation_space, gym.spaces.dict.Dict): |
| env = ObsDictWrapper(env) |
|
|
| return env |
|
|
| @abstractmethod |
| def _setup_model(self) -> None: |
| """Create networks, buffer and optimizers.""" |
|
|
| def _get_eval_env(self, eval_env: Optional[GymEnv]) -> Optional[GymEnv]: |
| """ |
| Return the environment that will be used for evaluation. |
| |
| :param eval_env:) |
| :return: |
| """ |
| if eval_env is None: |
| eval_env = self.eval_env |
|
|
| if eval_env is not None: |
| eval_env = self._wrap_env(eval_env, self.verbose) |
| assert eval_env.num_envs == 1 |
| return eval_env |
|
|
| def _setup_lr_schedule(self) -> None: |
| """Transform to callable if needed.""" |
| self.lr_schedule = get_schedule_fn(self.learning_rate) |
|
|
| def _update_current_progress_remaining(self, num_timesteps: int, total_timesteps: int) -> None: |
| """ |
| Compute current progress remaining (starts from 1 and ends to 0) |
| |
| :param num_timesteps: current number of timesteps |
| :param total_timesteps: |
| """ |
| self._current_progress_remaining = 1.0 - float(num_timesteps) / float(total_timesteps) |
|
|
| def _update_learning_rate(self, optimizers: Union[List[th.optim.Optimizer], th.optim.Optimizer]) -> None: |
| """ |
| Update the optimizers learning rate using the current learning rate schedule |
| and the current progress remaining (from 1 to 0). |
| |
| :param optimizers: |
| An optimizer or a list of optimizers. |
| """ |
| |
| logger.record("train/learning_rate", self.lr_schedule(self._current_progress_remaining)) |
|
|
| if not isinstance(optimizers, list): |
| optimizers = [optimizers] |
| for optimizer in optimizers: |
| update_learning_rate(optimizer, self.lr_schedule(self._current_progress_remaining)) |
|
|
| def _excluded_save_params(self) -> List[str]: |
| """ |
| Returns the names of the parameters that should be excluded from being |
| saved by pickling. E.g. replay buffers are skipped by default |
| as they take up a lot of space. PyTorch variables should be excluded |
| with this so they can be stored with ``th.save``. |
| |
| :return: List of parameters that should be excluded from being saved with pickle. |
| """ |
| return [ |
| "policy", |
| "device", |
| "env", |
| "eval_env", |
| "replay_buffer", |
| "rollout_buffer", |
| "_vec_normalize_env", |
| ] |
|
|
| def _get_torch_save_params(self) -> Tuple[List[str], List[str]]: |
| """ |
| Get the name of the torch variables that will be saved with |
| PyTorch ``th.save``, ``th.load`` and ``state_dicts`` instead of the default |
| pickling strategy. This is to handle device placement correctly. |
| |
| Names can point to specific variables under classes, e.g. |
| "policy.optimizer" would point to ``optimizer`` object of ``self.policy`` |
| if this object. |
| |
| :return: |
| List of Torch variables whose state dicts to save (e.g. th.nn.Modules), |
| and list of other Torch variables to store with ``th.save``. |
| """ |
| state_dicts = ["policy"] |
|
|
| return state_dicts, [] |
|
|
| def _init_callback( |
| self, |
| callback: MaybeCallback, |
| eval_env: Optional[VecEnv] = None, |
| eval_freq: int = 10000, |
| n_eval_episodes: int = 5, |
| log_path: Optional[str] = None, |
| ) -> BaseCallback: |
| """ |
| :param callback: Callback(s) called at every step with state of the algorithm. |
| :param eval_freq: How many steps between evaluations; if None, do not evaluate. |
| :param n_eval_episodes: How many episodes to play per evaluation |
| :param n_eval_episodes: Number of episodes to rollout during evaluation. |
| :param log_path: Path to a folder where the evaluations will be saved |
| :return: A hybrid callback calling `callback` and performing evaluation. |
| """ |
| |
| if isinstance(callback, list): |
| callback = CallbackList(callback) |
|
|
| |
| if not isinstance(callback, BaseCallback): |
| callback = ConvertCallback(callback) |
|
|
| |
| if eval_env is not None: |
| eval_callback = EvalCallback( |
| eval_env, |
| best_model_save_path=log_path, |
| log_path=log_path, |
| eval_freq=eval_freq, |
| n_eval_episodes=n_eval_episodes, |
| ) |
| callback = CallbackList([callback, eval_callback]) |
|
|
| callback.init_callback(self) |
| return callback |
|
|
| def _setup_learn( |
| self, |
| total_timesteps: int, |
| eval_env: Optional[GymEnv], |
| callback: MaybeCallback = None, |
| eval_freq: int = 10000, |
| n_eval_episodes: int = 5, |
| log_path: Optional[str] = None, |
| reset_num_timesteps: bool = True, |
| tb_log_name: str = "run", |
| ) -> Tuple[int, BaseCallback]: |
| """ |
| Initialize different variables needed for training. |
| |
| :param total_timesteps: The total number of samples (env steps) to train on |
| :param eval_env: Environment to use for evaluation. |
| :param callback: Callback(s) called at every step with state of the algorithm. |
| :param eval_freq: How many steps between evaluations |
| :param n_eval_episodes: How many episodes to play per evaluation |
| :param log_path: Path to a folder where the evaluations will be saved |
| :param reset_num_timesteps: Whether to reset or not the ``num_timesteps`` attribute |
| :param tb_log_name: the name of the run for tensorboard log |
| :return: |
| """ |
| self.start_time = time.time() |
| if self.ep_info_buffer is None or reset_num_timesteps: |
| |
| self.ep_info_buffer = deque(maxlen=100) |
| self.ep_success_buffer = deque(maxlen=100) |
|
|
| if self.action_noise is not None: |
| self.action_noise.reset() |
|
|
| if reset_num_timesteps: |
| self.num_timesteps = 0 |
| self._episode_num = 0 |
| else: |
| |
| total_timesteps += self.num_timesteps |
| self._total_timesteps = total_timesteps |
|
|
| |
| if reset_num_timesteps or self._last_obs is None: |
| self._last_obs = self.env.reset() |
| self._last_dones = np.zeros((self.env.num_envs,), dtype=np.bool) |
| |
| if self._vec_normalize_env is not None: |
| self._last_original_obs = self._vec_normalize_env.get_original_obs() |
|
|
| if eval_env is not None and self.seed is not None: |
| eval_env.seed(self.seed) |
|
|
| eval_env = self._get_eval_env(eval_env) |
|
|
| |
| utils.configure_logger(self.verbose, self.tensorboard_log, tb_log_name, reset_num_timesteps) |
|
|
| |
| callback = self._init_callback(callback, eval_env, eval_freq, n_eval_episodes, log_path) |
|
|
| return total_timesteps, callback |
|
|
| def _update_info_buffer(self, infos: List[Dict[str, Any]], dones: Optional[np.ndarray] = None) -> None: |
| """ |
| Retrieve reward and episode length and update the buffer |
| if using Monitor wrapper. |
| |
| :param infos: |
| """ |
| if dones is None: |
| dones = np.array([False] * len(infos)) |
| for idx, info in enumerate(infos): |
| maybe_ep_info = info.get("episode") |
| maybe_is_success = info.get("is_success") |
| if maybe_ep_info is not None: |
| self.ep_info_buffer.extend([maybe_ep_info]) |
| if maybe_is_success is not None and dones[idx]: |
| self.ep_success_buffer.append(maybe_is_success) |
|
|
| def get_env(self) -> Optional[VecEnv]: |
| """ |
| Returns the current environment (can be None if not defined). |
| |
| :return: The current environment |
| """ |
| return self.env |
|
|
| def get_vec_normalize_env(self) -> Optional[VecNormalize]: |
| """ |
| Return the ``VecNormalize`` wrapper of the training env |
| if it exists. |
| |
| :return: The ``VecNormalize`` env. |
| """ |
| return self._vec_normalize_env |
|
|
| def set_env(self, env: GymEnv) -> None: |
| """ |
| Checks the validity of the environment, and if it is coherent, set it as the current environment. |
| Furthermore wrap any non vectorized env into a vectorized |
| checked parameters: |
| - observation_space |
| - action_space |
| |
| :param env: The environment for learning a policy |
| """ |
| |
| |
| env = self._wrap_env(env, self.verbose) |
| |
| check_for_correct_spaces(env, self.observation_space, self.action_space) |
|
|
| self.n_envs = env.num_envs |
| self.env = env |
|
|
| @abstractmethod |
| def learn( |
| self, |
| total_timesteps: int, |
| callback: MaybeCallback = None, |
| log_interval: int = 100, |
| tb_log_name: str = "run", |
| eval_env: Optional[GymEnv] = None, |
| eval_freq: int = -1, |
| n_eval_episodes: int = 5, |
| eval_log_path: Optional[str] = None, |
| reset_num_timesteps: bool = True, |
| ) -> "BaseAlgorithm": |
| """ |
| Return a trained model. |
| |
| :param total_timesteps: The total number of samples (env steps) to train on |
| :param callback: callback(s) called at every step with state of the algorithm. |
| :param log_interval: The number of timesteps before logging. |
| :param tb_log_name: the name of the run for TensorBoard logging |
| :param eval_env: Environment that will be used to evaluate the agent |
| :param eval_freq: Evaluate the agent every ``eval_freq`` timesteps (this may vary a little) |
| :param n_eval_episodes: Number of episode to evaluate the agent |
| :param eval_log_path: Path to a folder where the evaluations will be saved |
| :param reset_num_timesteps: whether or not to reset the current timestep number (used in logging) |
| :return: the trained model |
| """ |
|
|
| def predict( |
| self, |
| observation: np.ndarray, |
| state: Optional[np.ndarray] = None, |
| mask: Optional[np.ndarray] = None, |
| deterministic: bool = False, |
| ) -> Tuple[np.ndarray, Optional[np.ndarray]]: |
| """ |
| Get the model's action(s) from an observation |
| |
| :param observation: the input observation |
| :param state: The last states (can be None, used in recurrent policies) |
| :param mask: The last masks (can be None, used in recurrent policies) |
| :param deterministic: Whether or not to return deterministic actions. |
| :return: the model's action and the next state |
| (used in recurrent policies) |
| """ |
| return self.policy.predict(observation, state, mask, deterministic) |
|
|
| def set_random_seed(self, seed: Optional[int] = None) -> None: |
| """ |
| Set the seed of the pseudo-random generators |
| (python, numpy, pytorch, gym, action_space) |
| |
| :param seed: |
| """ |
| if seed is None: |
| return |
| set_random_seed(seed, using_cuda=self.device.type == th.device("cuda").type) |
| self.action_space.seed(seed) |
| if self.env is not None: |
| self.env.seed(seed) |
| if self.eval_env is not None: |
| self.eval_env.seed(seed) |
|
|
| def set_parameters( |
| self, |
| load_path_or_dict: Union[str, Dict[str, Dict]], |
| exact_match: bool = True, |
| device: Union[th.device, str] = "auto", |
| ) -> None: |
| """ |
| Load parameters from a given zip-file or a nested dictionary containing parameters for |
| different modules (see ``get_parameters``). |
| |
| :param load_path_or_iter: Location of the saved data (path or file-like, see ``save``), or a nested |
| dictionary containing nn.Module parameters used by the policy. The dictionary maps |
| object names to a state-dictionary returned by ``torch.nn.Module.state_dict()``. |
| :param exact_match: If True, the given parameters should include parameters for each |
| module and each of their parameters, otherwise raises an Exception. If set to False, this |
| can be used to update only specific parameters. |
| :param device: Device on which the code should run. |
| """ |
| params = None |
| if isinstance(load_path_or_dict, dict): |
| params = load_path_or_dict |
| else: |
| _, params, _ = load_from_zip_file(load_path_or_dict, device=device) |
|
|
| |
| |
| |
| objects_needing_update = set(self._get_torch_save_params()[0]) |
| updated_objects = set() |
|
|
| for name in params: |
| attr = None |
| try: |
| attr = recursive_getattr(self, name) |
| except Exception: |
| |
| |
| |
| raise ValueError(f"Key {name} is an invalid object name.") |
|
|
| if isinstance(attr, th.optim.Optimizer): |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| attr.load_state_dict(params[name]) |
| else: |
| |
| attr.load_state_dict(params[name], strict=exact_match) |
| updated_objects.add(name) |
|
|
| if exact_match and updated_objects != objects_needing_update: |
| raise ValueError( |
| "Names of parameters do not match agents' parameters: " |
| f"expected {objects_needing_update}, got {updated_objects}" |
| ) |
|
|
| @classmethod |
| def load( |
| cls, |
| path: Union[str, pathlib.Path, io.BufferedIOBase], |
| env: Optional[GymEnv] = None, |
| device: Union[th.device, str] = "auto", |
| **kwargs, |
| ) -> "BaseAlgorithm": |
| """ |
| Load the model from a zip-file |
| |
| :param path: path to the file (or a file-like) where to |
| load the agent from |
| :param env: the new environment to run the loaded model on |
| (can be None if you only need prediction from a trained model) has priority over any saved environment |
| :param device: Device on which the code should run. |
| :param kwargs: extra arguments to change the model when loading |
| """ |
| data, params, pytorch_variables = load_from_zip_file(path, device=device) |
|
|
| |
| if "policy_kwargs" in data: |
| if "device" in data["policy_kwargs"]: |
| del data["policy_kwargs"]["device"] |
|
|
| if "policy_kwargs" in kwargs and kwargs["policy_kwargs"] != data["policy_kwargs"]: |
| raise ValueError( |
| f"The specified policy kwargs do not equal the stored policy kwargs." |
| f"Stored kwargs: {data['policy_kwargs']}, specified kwargs: {kwargs['policy_kwargs']}" |
| ) |
|
|
| if "observation_space" not in data or "action_space" not in data: |
| raise KeyError("The observation_space and action_space were not given, can't verify new environments") |
|
|
| if env is not None: |
| |
| env = cls._wrap_env(env, data["verbose"]) |
| |
| check_for_correct_spaces(env, data["observation_space"], data["action_space"]) |
| else: |
| |
| if "env" in data: |
| env = data["env"] |
|
|
| |
| model = cls( |
| policy=data["policy_class"], |
| env=env, |
| device=device, |
| _init_setup_model=False, |
| ) |
|
|
| |
| model.__dict__.update(data) |
| model.__dict__.update(kwargs) |
| model._setup_model() |
|
|
| |
| model.set_parameters(params, exact_match=True, device=device) |
|
|
| |
| if pytorch_variables is not None: |
| for name in pytorch_variables: |
| recursive_setattr(model, name, pytorch_variables[name]) |
|
|
| |
| |
| if model.use_sde: |
| model.policy.reset_noise() |
| return model |
|
|
| def get_parameters(self) -> Dict[str, Dict]: |
| """ |
| Return the parameters of the agent. This includes parameters from different networks, e.g. |
| critics (value functions) and policies (pi functions). |
| |
| :return: Mapping of from names of the objects to PyTorch state-dicts. |
| """ |
| state_dicts_names, _ = self._get_torch_save_params() |
| params = {} |
| for name in state_dicts_names: |
| attr = recursive_getattr(self, name) |
| |
| params[name] = attr.state_dict() |
| return params |
|
|
| def save( |
| self, |
| path: Union[str, pathlib.Path, io.BufferedIOBase], |
| exclude: Optional[Iterable[str]] = None, |
| include: Optional[Iterable[str]] = None, |
| ) -> None: |
| """ |
| Save all the attributes of the object and the model parameters in a zip-file. |
| |
| :param path: path to the file where the rl agent should be saved |
| :param exclude: name of parameters that should be excluded in addition to the default ones |
| :param include: name of parameters that might be excluded but should be included anyway |
| """ |
| |
| data = self.__dict__.copy() |
|
|
| |
| if exclude is None: |
| exclude = [] |
| exclude = set(exclude).union(self._excluded_save_params()) |
|
|
| |
| if include is not None: |
| exclude = exclude.difference(include) |
|
|
| state_dicts_names, torch_variable_names = self._get_torch_save_params() |
| all_pytorch_variables = state_dicts_names + torch_variable_names |
| for torch_var in all_pytorch_variables: |
| |
| var_name = torch_var.split(".")[0] |
| |
| exclude.add(var_name) |
|
|
| |
| for param_name in exclude: |
| data.pop(param_name, None) |
|
|
| |
| pytorch_variables = None |
| if torch_variable_names is not None: |
| pytorch_variables = {} |
| for name in torch_variable_names: |
| attr = recursive_getattr(self, name) |
| pytorch_variables[name] = attr |
|
|
| |
| params_to_save = self.get_parameters() |
|
|
| save_to_zip_file(path, data=data, params=params_to_save, pytorch_variables=pytorch_variables) |
|
|