| import dataclasses |
| import enum |
| import logging |
| import os |
| import pickle |
| import socket |
| import sys |
| import time |
| import traceback |
|
|
| import dill |
| import hydra |
| import numpy as np |
| import omegaconf |
| import torch |
| import torch.nn.functional as F |
| import tyro |
|
|
| from omegaconf import open_dict |
| from openpi.policies import policy as _policy |
| from openpi.policies import policy_config as _policy_config |
| from openpi.serving import websocket_policy_server |
| from openpi.training import config as _config |
| from openpi.training.config import get_data_config |
|
|
| from unified_video_action.common.pytorch_util import dict_apply |
| from unified_video_action.policy.base_image_policy import BaseImagePolicy |
| from unified_video_action.workspace.base_workspace import BaseWorkspace |
| from umi.real_world.real_inference_util import get_real_obs_resolution |
|
|
| language_latents = pickle.load(open("prepared_data/language_latents.pkl", "rb")) |
|
|
|
|
| def echo_exception(): |
| exc_type, exc_value, exc_traceback = sys.exc_info() |
| tb_lines = traceback.format_exception(exc_type, exc_value, exc_traceback) |
| return "".join(tb_lines) |
|
|
|
|
| def smooth_action(act_out, window_size=3, pad_size=1): |
| kernel = torch.ones(1, 1, window_size) / window_size |
| kernel = kernel.to(act_out.device) |
|
|
| act_out_padded = F.pad(act_out, (0, 0, pad_size, pad_size), mode="replicate") |
|
|
| batch_size, timesteps, action_dim = act_out_padded.shape |
| act_out_padded = act_out_padded.permute(0, 2, 1) |
| act_out_padded = act_out_padded.reshape(-1, 1, timesteps) |
|
|
| smoothed_act_out = F.conv1d(act_out_padded, kernel, padding=0) |
|
|
| smoothed_act_out = smoothed_act_out.reshape(batch_size, action_dim, timesteps - 2 * pad_size) |
| smoothed_act_out = smoothed_act_out.permute(0, 2, 1) |
|
|
| return smoothed_act_out |
|
|
|
|
| class EvalRealPolicyAdapter: |
| """Adapter to wrap eval_real.py PolicyInferenceNode as a Policy interface.""" |
|
|
| def __init__(self, ckpt_path: str, device: str, output_dir: str): |
| self.ckpt_path = ckpt_path |
| if not self.ckpt_path.endswith(".ckpt"): |
| self.ckpt_path = os.path.join(self.ckpt_path, "checkpoints", "latest.ckpt") |
| |
| payload = torch.load(open(self.ckpt_path, "rb"), map_location="cpu", pickle_module=dill) |
| self.cfg = payload["cfg"] |
|
|
| with open_dict(self.cfg): |
| if "autoregressive_model_params" in self.cfg.model.policy: |
| self.cfg.model.policy.autoregressive_model_params.num_sampling_steps = "100" |
| print("-----------------------------------------------") |
| print("num_sampling_steps", self.cfg.model.policy.autoregressive_model_params.num_sampling_steps) |
| print("-----------------------------------------------") |
|
|
| cfg_path = self.ckpt_path.replace(".ckpt", ".yaml") |
| with open(cfg_path, "w") as f: |
| f.write(omegaconf.OmegaConf.to_yaml(self.cfg)) |
| print(f"Exported config to {cfg_path}") |
| |
| print(f"Loading configure: {self.cfg.task.name}, workspace: {self.cfg.model._target_}, policy: {self.cfg.model.policy._target_}") |
|
|
| self.obs_res = get_real_obs_resolution(self.cfg.task.shape_meta) |
| self.device = torch.device(device) |
|
|
| cls = hydra.utils.get_class(self.cfg.model._target_) |
| self.workspace = cls(self.cfg, output_dir=output_dir) |
| self.workspace: BaseWorkspace |
| self.workspace.load_payload(payload, exclude_keys=None, include_keys=None) |
|
|
| self.policy: BaseImagePolicy = self.workspace.model |
|
|
| if self.cfg.training.use_ema: |
| self.policy = self.workspace.ema_model |
| print("Using EMA model") |
|
|
| self.policy.eval().to(self.device) |
| self.policy.reset() |
| |
| |
| |
| self.past_action_list = [] |
| self._metadata = {"obs_resolution": self.obs_res} |
|
|
| @property |
| def metadata(self): |
| return self._metadata |
|
|
| def infer(self, obs: dict) -> dict: |
| """Infer action from observation. Returns dict with 'actions' key.""" |
| obs_dict_np = obs.copy() |
| task_name = None |
|
|
| if "task_name" in obs_dict_np: |
| task_name = obs_dict_np["task_name"] |
| print("task_name", obs_dict_np["task_name"]) |
| del obs_dict_np["task_name"] |
|
|
| if self.cfg.task.dataset.language_emb_model is not None and task_name: |
| if "cup" in task_name: |
| language_goal = language_latents["cup"] |
| elif "towel" in task_name: |
| language_goal = language_latents["towel"] |
| elif "mouse" in task_name: |
| language_goal = language_latents["mouse"] |
| else: |
| language_goal = None |
| if language_goal is not None: |
| language_goal = torch.tensor(language_goal).to(self.device) |
| language_goal = language_goal.unsqueeze(0) |
| print("task_name", task_name) |
| else: |
| language_goal = None |
|
|
| with torch.no_grad(): |
| obs_dict = dict_apply( |
| obs_dict_np, lambda x: torch.from_numpy(x).unsqueeze(0).to(self.device) |
| ) |
|
|
| if self.cfg.name == "uva": |
| result = self.policy.predict_action( |
| obs_dict=obs_dict, language_goal=language_goal |
| ) |
|
|
| self.past_action_list.append(np.array(result["action"][0].cpu())) |
| if len(self.past_action_list) > 2: |
| self.past_action_list.pop(0) |
| action = smooth_action(result["action_pred"].detach().to("cpu")).numpy()[0] |
| else: |
| result = self.policy.predict_action( |
| obs_dict, language_goal=language_goal |
| ) |
| action = result["action_pred"][0].detach().to("cpu").numpy() |
| print("action") |
|
|
| del result |
| del obs_dict |
|
|
| return {"actions": action} |
|
|
| class EnvMode(enum.Enum): |
| """Supported environments.""" |
|
|
| ALOHA = "aloha" |
| ALOHA_SIM = "aloha_sim" |
| DROID = "droid" |
| LIBERO = "libero" |
|
|
| REAL = "real" |
|
|
|
|
| @dataclasses.dataclass |
| class Checkpoint: |
| """Load a policy from a trained checkpoint.""" |
|
|
| |
| data_config: str |
| |
| dir: str | None = None |
|
|
|
|
| @dataclasses.dataclass |
| class EvalRealCheckpoint: |
| """Load a policy from eval_real.py style checkpoint.""" |
|
|
| |
| dir: str |
| |
| device: str = "cuda" |
| |
| output_dir: str = "." |
|
|
|
|
| @dataclasses.dataclass |
| class Default: |
| """Use the default policy for the given environment.""" |
|
|
|
|
| @dataclasses.dataclass |
| class Args: |
| """Arguments for the serve_policy script.""" |
|
|
| |
| env: EnvMode = EnvMode.ALOHA_SIM |
|
|
| |
| |
| default_prompt: str | None = None |
|
|
| |
| port: int = 8012 |
| |
| record: bool = False |
|
|
| |
| policy: Checkpoint | EvalRealCheckpoint | Default = dataclasses.field(default_factory=Default) |
| |
| |
|
|
|
|
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
| |
|
|
| |
| |
| |
| |
| |
| |
| |
|
|
|
|
| def create_policy(args: Args): |
| """Create a policy from the given arguments.""" |
| match args.policy: |
| case EvalRealCheckpoint(): |
| return EvalRealPolicyAdapter( |
| ckpt_path=args.policy.dir, |
| device=args.policy.device, |
| output_dir=args.policy.output_dir, |
| ) |
| case Checkpoint(): |
| import pathlib |
| import openpi.shared.normalize as _normalize |
| |
| _data_config: _config.DataConfig = get_data_config(args.policy.data_config) |
| norm_stats = _data_config.norm_stats |
| return _policy_config.create_trained_policy( |
| _data_config, args.policy.dir, default_prompt=args.default_prompt, norm_stats=norm_stats, use_vllm=_data_config.inference_use_vllm |
| ) |
| case Default(): |
| raise NotImplementedError("Default policies are not yet supported.") |
| |
|
|
|
|
|
|
| def main(args: Args) -> None: |
| policy = create_policy(args) |
| policy_metadata = policy.metadata |
|
|
| |
| if args.record: |
| policy = _policy.PolicyRecorder(policy, "policy_records") |
|
|
| |
| |
| |
|
|
| server = websocket_policy_server.WebsocketPolicyServer( |
| policy=policy, |
| host="0.0.0.0", |
| port=args.port, |
| metadata=policy_metadata, |
| ) |
| server.serve_forever() |
|
|
|
|
| if __name__ == "__main__": |
| logging.basicConfig(level=logging.INFO, force=True) |
| main(tyro.cli(Args)) |
|
|