| |
| |
| |
| |
|
|
| """ |
| This script demonstrates an interactive demo with the H1 rough terrain environment. |
| |
| .. code-block:: bash |
| |
| # Usage |
| ./isaaclab.sh -p scripts/demos/h1_locomotion.py |
| |
| """ |
|
|
| """Launch Isaac Sim Simulator first.""" |
|
|
| import argparse |
| import os |
| import sys |
|
|
| sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), "../..")) |
| import scripts.reinforcement_learning.rsl_rl.cli_args as cli_args |
|
|
|
|
| from isaaclab.app import AppLauncher |
|
|
| |
| parser = argparse.ArgumentParser( |
| description="This script demonstrates an interactive demo with the H1 rough terrain environment." |
| ) |
| |
| cli_args.add_rsl_rl_args(parser) |
| |
| AppLauncher.add_app_launcher_args(parser) |
| |
| args_cli = parser.parse_args() |
|
|
| |
| app_launcher = AppLauncher(args_cli) |
| simulation_app = app_launcher.app |
|
|
| """Rest everything follows.""" |
|
|
| import torch |
| from rsl_rl.runners import OnPolicyRunner |
|
|
| import carb |
| import omni |
| from omni.kit.viewport.utility import get_viewport_from_window_name |
| from omni.kit.viewport.utility.camera_state import ViewportCameraState |
| from pxr import Gf, Sdf |
|
|
| from isaaclab.envs import ManagerBasedRLEnv |
| from isaaclab.sim.utils.stage import get_current_stage |
| from isaaclab.utils.math import quat_apply |
|
|
| from isaaclab_rl.rsl_rl import RslRlOnPolicyRunnerCfg, RslRlVecEnvWrapper |
| from isaaclab_rl.utils.pretrained_checkpoint import get_published_pretrained_checkpoint |
|
|
| from isaaclab_tasks.manager_based.locomotion.velocity.config.h1.rough_env_cfg import H1RoughEnvCfg_PLAY |
|
|
| TASK = "Isaac-Velocity-Rough-H1-v0" |
| RL_LIBRARY = "rsl_rl" |
|
|
|
|
| class H1RoughDemo: |
| """This class provides an interactive demo for the H1 rough terrain environment. |
| It loads a pre-trained checkpoint for the Isaac-Velocity-Rough-H1-v0 task, trained with RSL RL |
| and defines a set of keyboard commands for directing motion of selected robots. |
| |
| A robot can be selected from the scene through a mouse click. Once selected, the following |
| keyboard controls can be used to control the robot: |
| |
| * UP: go forward |
| * LEFT: turn left |
| * RIGHT: turn right |
| * DOWN: stop |
| * C: switch between third-person and perspective views |
| * ESC: exit current third-person view""" |
|
|
| def __init__(self): |
| """Initializes environment config designed for the interactive model and sets up the environment, |
| loads pre-trained checkpoints, and registers keyboard events.""" |
| agent_cfg: RslRlOnPolicyRunnerCfg = cli_args.parse_rsl_rl_cfg(TASK, args_cli) |
| |
| checkpoint = get_published_pretrained_checkpoint(RL_LIBRARY, TASK) |
| |
| env_cfg = H1RoughEnvCfg_PLAY() |
| env_cfg.scene.num_envs = 25 |
| env_cfg.episode_length_s = 1000000 |
| env_cfg.curriculum = None |
| env_cfg.commands.base_velocity.ranges.lin_vel_x = (0.0, 1.0) |
| env_cfg.commands.base_velocity.ranges.heading = (-1.0, 1.0) |
| |
| self.env = RslRlVecEnvWrapper(ManagerBasedRLEnv(cfg=env_cfg)) |
| self.device = self.env.unwrapped.device |
| |
| ppo_runner = OnPolicyRunner(self.env, agent_cfg.to_dict(), log_dir=None, device=self.device) |
| ppo_runner.load(checkpoint) |
| |
| self.policy = ppo_runner.get_inference_policy(device=self.device) |
|
|
| self.create_camera() |
| self.commands = torch.zeros(env_cfg.scene.num_envs, 4, device=self.device) |
| self.commands[:, 0:3] = self.env.unwrapped.command_manager.get_command("base_velocity") |
| self.set_up_keyboard() |
| self._prim_selection = omni.usd.get_context().get_selection() |
| self._selected_id = None |
| self._previous_selected_id = None |
| self._camera_local_transform = torch.tensor([-2.5, 0.0, 0.8], device=self.device) |
|
|
| def create_camera(self): |
| """Creates a camera to be used for third-person view.""" |
| stage = get_current_stage() |
| self.viewport = get_viewport_from_window_name("Viewport") |
| |
| self.camera_path = "/World/Camera" |
| self.perspective_path = "/OmniverseKit_Persp" |
| camera_prim = stage.DefinePrim(self.camera_path, "Camera") |
| camera_prim.GetAttribute("focalLength").Set(8.5) |
| coi_prop = camera_prim.GetProperty("omni:kit:centerOfInterest") |
| if not coi_prop or not coi_prop.IsValid(): |
| camera_prim.CreateAttribute( |
| "omni:kit:centerOfInterest", Sdf.ValueTypeNames.Vector3d, True, Sdf.VariabilityUniform |
| ).Set(Gf.Vec3d(0, 0, -10)) |
| self.viewport.set_active_camera(self.perspective_path) |
|
|
| def set_up_keyboard(self): |
| """Sets up interface for keyboard input and registers the desired keys for control.""" |
| self._input = carb.input.acquire_input_interface() |
| self._keyboard = omni.appwindow.get_default_app_window().get_keyboard() |
| self._sub_keyboard = self._input.subscribe_to_keyboard_events(self._keyboard, self._on_keyboard_event) |
| T = 1 |
| R = 0.5 |
| self._key_to_control = { |
| "UP": torch.tensor([T, 0.0, 0.0, 0.0], device=self.device), |
| "DOWN": torch.tensor([0.0, 0.0, 0.0, 0.0], device=self.device), |
| "LEFT": torch.tensor([T, 0.0, 0.0, -R], device=self.device), |
| "RIGHT": torch.tensor([T, 0.0, 0.0, R], device=self.device), |
| "ZEROS": torch.tensor([0.0, 0.0, 0.0, 0.0], device=self.device), |
| } |
|
|
| def _on_keyboard_event(self, event): |
| """Checks for a keyboard event and assign the corresponding command control depending on key pressed.""" |
| if event.type == carb.input.KeyboardEventType.KEY_PRESS: |
| |
| if event.input.name in self._key_to_control: |
| if self._selected_id: |
| self.commands[self._selected_id] = self._key_to_control[event.input.name] |
| |
| elif event.input.name == "ESCAPE": |
| self._prim_selection.clear_selected_prim_paths() |
| |
| elif event.input.name == "C": |
| if self._selected_id is not None: |
| if self.viewport.get_active_camera() == self.camera_path: |
| self.viewport.set_active_camera(self.perspective_path) |
| else: |
| self.viewport.set_active_camera(self.camera_path) |
| |
| elif event.type == carb.input.KeyboardEventType.KEY_RELEASE: |
| if self._selected_id: |
| self.commands[self._selected_id] = self._key_to_control["ZEROS"] |
|
|
| def update_selected_object(self): |
| """Determines which robot is currently selected and whether it is a valid H1 robot. |
| For valid robots, we enter the third-person view for that robot. |
| When a new robot is selected, we reset the command of the previously selected |
| to continue random commands.""" |
|
|
| self._previous_selected_id = self._selected_id |
| selected_prim_paths = self._prim_selection.get_selected_prim_paths() |
| if len(selected_prim_paths) == 0: |
| self._selected_id = None |
| self.viewport.set_active_camera(self.perspective_path) |
| elif len(selected_prim_paths) > 1: |
| print("Multiple prims are selected. Please only select one!") |
| else: |
| prim_splitted_path = selected_prim_paths[0].split("/") |
| |
| if len(prim_splitted_path) >= 4 and prim_splitted_path[3][0:4] == "env_": |
| self._selected_id = int(prim_splitted_path[3][4:]) |
| if self._previous_selected_id != self._selected_id: |
| self.viewport.set_active_camera(self.camera_path) |
| self._update_camera() |
| else: |
| print("The selected prim was not a H1 robot") |
|
|
| |
| if self._previous_selected_id is not None and self._previous_selected_id != self._selected_id: |
| self.env.unwrapped.command_manager.reset([self._previous_selected_id]) |
| self.commands[:, 0:3] = self.env.unwrapped.command_manager.get_command("base_velocity") |
|
|
| def _update_camera(self): |
| """Updates the per-frame transform of the third-person view camera to follow |
| the selected robot's torso transform.""" |
|
|
| base_pos = self.env.unwrapped.scene["robot"].data.root_pos_w[self._selected_id, :] |
| base_quat = self.env.unwrapped.scene["robot"].data.root_quat_w[self._selected_id, :] |
|
|
| camera_pos = quat_apply(base_quat, self._camera_local_transform) + base_pos |
|
|
| camera_state = ViewportCameraState(self.camera_path, self.viewport) |
| eye = Gf.Vec3d(camera_pos[0].item(), camera_pos[1].item(), camera_pos[2].item()) |
| target = Gf.Vec3d(base_pos[0].item(), base_pos[1].item(), base_pos[2].item() + 0.6) |
| camera_state.set_position_world(eye, True) |
| camera_state.set_target_world(target, True) |
|
|
|
|
| def main(): |
| """Main function.""" |
| demo_h1 = H1RoughDemo() |
| obs, _ = demo_h1.env.reset() |
| while simulation_app.is_running(): |
| |
| demo_h1.update_selected_object() |
| with torch.inference_mode(): |
| action = demo_h1.policy(obs) |
| obs, _, _, _ = demo_h1.env.step(action) |
| |
| obs[:, 9:13] = demo_h1.commands |
|
|
|
|
| if __name__ == "__main__": |
| main() |
| simulation_app.close() |
|
|